예제 #1
0
    def __call__(self, x):
        # x.shape == (batchsize, 3, 128, 64)
        batchsize = x.shape[0]
        h = F.elu(self.bn1(self.conv1_1(x)))
        h = F.elu(self.bn2(self.conv1_2(h)))
        h = F.max_pooling_2d(h, 3, 2, cover_all=False)
        h = self.conv2_1(h)
        h = self.conv2_3(h)
        h = self.conv3_1(h)
        h = self.conv3_3(h)
        h = self.conv4_1(h)
        h = self.conv4_3(h)

        h = h.reshape(batchsize, -1)
        h = F.dropout(h, ratio=0.6)
        h = F.elu(self.fc1_bn(self.fc1(h)))

        # Features in rows, normalize axis 1.
        weights = self.mean_vectors
        features = self.ball(h)
        features = F.normalize(features, eps=1e-8)
        scale = F.softplus(self.scale)
        normalized_weight = F.normalize(weights, axis=0, eps=1e-8)
        logits = F.tile(scale[None, ], (batchsize, 1)) * \
            F.matmul(features, normalized_weight)
        return logits
    def __call__(self, X):
        h0 = F.pad(X, ((0, 0), (0, 0), (0, 0), (37, 37)),
                   'constant')  # (1, 96, 1366) -> (1, 96, 1440)
        h1 = F.transpose(self.norm0(F.transpose(h0, axes=(0, 3, 1, 2))),
                         axes=(0, 2, 3, 1))  # normalize along time axis is OK?
        h1 = F.max_pooling_2d(F.elu(self.norm1(self.conv1(h1))), (2, 2),
                              stride=(2, 2))
        h1 = F.dropout(h1, ratio=0.1)
        h2 = F.max_pooling_2d(F.elu(self.norm2(self.conv2(h1))), (3, 3),
                              stride=(3, 3))
        h2 = F.dropout(h2, ratio=0.1)
        h3 = F.max_pooling_2d(F.elu(self.norm3(self.conv3(h2))), (4, 4),
                              stride=(4, 4))
        h3 = F.dropout(h3, ratio=0.1)
        h4 = F.max_pooling_2d(F.elu(self.norm4(self.conv4(h3))), (4, 4),
                              stride=(4, 4))
        h4 = F.dropout(h4, ratio=0.1)
        h4 = F.transpose(h4, axes=(0, 3, 1, 2))
        h4 = F.reshape(h4, (h4.shape[0], 15, 128))

        self.gru1.reset_state()  # reset hidden states per. track Is this OK?
        self.gru2.reset_state()  # reset hidden states per. track Is this OK?
        for i in range(h4.shape[1]):
            h5 = self.gru1(h4[:, i, :])
            h6 = self.gru2(h5)

        h6 = F.dropout(h6, ratio=0.3)
        h7 = F.sigmoid(self.fc1(h6))

        return h7
예제 #3
0
    def forward(self, ws, cs, ls, dep_ts=None):
        ws = map(self.emb_word, ws)
        cs = [F.squeeze(
            F.max_pooling_2d(
                self.conv_char(
                    F.expand_dims(
                        self.emb_char(c), 1)), (int(l[0]), 1)))
                    for c, l in zip(cs, ls)]
        xs_f = [F.dropout(F.concat([w, c]), 0.5) for w, c in zip(ws, cs)]
        xs_b = [x[::-1] for x in xs_f]

        _, _, hs_f = self.lstm_f(None, None, xs_f)
        _, _, hs_b = self.lstm_b(None, None, xs_b)
        hs_b = [x[::-1] for x in hs_b]
        hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, hs_b)]

        dep_ys = [self.biaffine_arc(
            F.elu(F.dropout(self.arc_dep(h), 0.32)),
            F.elu(F.dropout(self.arc_head(h), 0.32))) for h in hs]

        if dep_ts is not None:
            heads = dep_ts
        else:
            heads = [F.argmax(y, axis=1) for y in dep_ys]

        cat_ys = [self.biaffine_tag(
                    F.elu(F.dropout(self.rel_dep(h), 0.32)),
                    F.elu(F.dropout(self.rel_head(
                        F.embed_id(t, h, ignore_label=IGNORE)), 0.32)))
                  for h, t in zip(hs, heads)]

        return cat_ys, dep_ys
예제 #4
0
    def forward(self, ws, ss, ps):
        batchsize, length = ws.shape
        xp = chainer.cuda.get_array_module(ws[0])
        ws = self.emb_word(ws) # (batch, length, word_dim)
        ss = F.reshape(self.emb_suf(ss), (batchsize, length, -1))
        ps = F.reshape(self.emb_prf(ps), (batchsize, length, -1))
        hs = F.transpose(F.concat([ws, ss, ps], 2), (1, 0, 2))
        hs = F.dropout(hs, self.dropout_ratio, train=self.train)
        hs = F.split_axis(hs, length, 0)
        hs_f = []
        hs_b = []
        self._init_state()
        for h_in_f, h_in_b in zip(hs, reversed(hs)):
            h_f = self.lstm_f2(self.lstm_f1(F.reshape(h_in_f, (-1, self.in_dim))))
            hs_f.append(h_f)
            h_b = self.lstm_b2(self.lstm_b1(F.reshape(h_in_b, (-1, self.in_dim))))
            hs_b.append(h_b)

        hs = zip(hs_f, reversed(hs_b))

        cat_ys = [self.linear_cat2(F.dropout(
            F.elu(self.linear_cat1(h)), 0.5, train=self.train)) for h in hs]

        dep_ys = [self.biaffine(
            F.elu(F.dropout(self.linear_dep(h), 0.32, train=self.train)),
            F.elu(F.dropout(self.linear_head(h), 0.32, train=self.train))) for h in hs]

        return cat_ys, dep_ys
예제 #5
0
 def __call__(self, x, test=False):
     h = F.elu(self.c0(
         x))  # no bn because images from generator will katayotteru?
     h = F.elu(self.bn1(self.c1(h), test=test))
     h = F.elu(self.bn2(self.c2(h), test=test))
     h = F.elu(self.bn3(self.c3(h), test=test))
     return self.l4l(h)
예제 #6
0
 def forward(self, x):
     n = x.data.shape[0]
     h = F.reshape(x, (n, 1, 28, 28))
     h = F.elu(self.conv1(h))
     h = F.elu(self.conv2(h))
     h = self.lin(h)
     return h
예제 #7
0
    def __call__(self, x):
        # x.shape == (batchsize, 3, 128, 64)
        batchsize = x.shape[0]
        h = F.elu(self.bn1(self.conv1_1(x)))
        h = F.elu(self.bn2(self.conv1_2(h)))
        h = F.max_pooling_2d(h, 3, 2, cover_all=False)
        h = self.conv2_1(h)
        h = self.conv2_3(h)
        h = self.conv3_1(h)
        h = self.conv3_3(h)
        h = self.conv4_1(h)
        h = self.conv4_3(h)

        h = h.reshape(batchsize, -1)
        h = F.dropout(h, ratio=0.6)
        h = F.elu(self.fc1_bn(self.fc1(h)))

        # Features in rows, normalize axis 1.
        weights = self.mean_vectors
        features = self.ball(h)
        features = F.normalize(features, eps=1e-8)
        scale = F.softplus(self.scale)
        normalized_weight = F.normalize(weights, axis=0, eps=1e-8)
        logits = F.tile(scale[None, ], (batchsize, 1)) * \
            F.matmul(features, normalized_weight)
        return logits
예제 #8
0
    def __call__(self, x):
        h = F.elu(self.bnc1(self.c1(x)))
        h = F.elu(self.bnc2(self.c2(h)))
        h = F.elu(self.bnc3(self.c3(h)))
        h = F.elu(self.bnc4(self.c4(h)))
        h = self.c5(h)

        return h
예제 #9
0
 def fwd(self,x):
      h_input = F.relu(self.l_input(x))
      h0=F.elu(self.l0(h_input))
      h1=F.elu(self.l1(h0))
      h2=F.elu(self.l2(h1))
      h3=F.elu(self.l3(h2))
      h_output = self.l_output(h3)
      return h_output
예제 #10
0
 def __call__(self, z, test=False):
     # mattya's implementation does not have bn after c1
     h = F.elu(self.bn1(self.c1(z), test=test))
     h = F.elu(self.bn2(self.c2(h), test=test))
     h = F.elu(self.bn3(self.c3(h), test=test))
     h = self.l0z(h)
     print h.shape
     return h
예제 #11
0
 def __call__(self, x, test=False):
     # no bn because images from generator will katayotteru?
     h = F.elu(self.c0(x))
     h = F.dropout(h, ratio=0.1, train=not test)
     h = F.elu(self.bn1(self.c1(h), test=test))
     h = F.elu(self.bn2(self.c2(h), test=test))
     h = F.elu(self.bn3(self.c3(h), test=test))
     l = self.l4l(h)
     return l
예제 #12
0
 def __call__(self, x, t):
     n = x.data.shape[0]
     h = F.reshape(x, (n, 1, 28, 28))
     h = F.elu(self.conv1(h))
     h = F.elu(self.conv2(h))
     h = self.lin(h)
     loss = F.softmax_cross_entropy(h, t)
     acc = F.accuracy(h, t)
     chainer.report({'loss': loss, 'acc': acc}, self)
     return loss
예제 #13
0
    def __call__(self, x):
        h = F.elu(self.l0(x))

        for i in range(self.n_blocks):
            for j in range(self.block_size):
                h = getattr(self, 'c{}'.format(i * self.block_size + j))(h)
                h = F.elu(h)
            if i < self.n_blocks - 1:
                h = F.max_pooling_2d(h, ksize=2, stride=2)

        return self.ln(h)
예제 #14
0
파일: mlp.py 프로젝트: GAIMJKP/models-2
    def forward(self, inputs):
        # Input shape: [batch_size, num_nodes, feature_dims]
        batch_size, num_nodes = inputs.shape[:2]
        inputs = inputs.reshape(batch_size * num_nodes, -1)
        # New shape: [batch_size * num_nodes, feature_dims]

        x = F.elu(self.fc1(inputs))
        x = F.dropout(x, self.dropout_prob)
        x = F.elu(self.fc2(x))
        x = self.bn(x)

        return x.reshape(batch_size, num_nodes, -1)
예제 #15
0
    def __call__(self, x):
        h = x
        for iL in range(self.NPLayers):
            h = self.__dict__["P%d"%iL](h)
            if iL==0: h = F.local_response_normalization(h)
            h = F.max_pooling_2d(F.elu(h), ksize=self.NKsize[iL+1], cover_all=True)
        h = F.spatial_pyramid_pooling_2d(F.elu(h), 3, F.MaxPooling2D)

        h = F.dropout(F.elu(self.L1(h)),ratio=self.L1_dropout,train=self.IsTrain)
        h = F.elu(self.L2(h))
        y    = h
        return y
예제 #16
0
파일: mlp.py 프로젝트: souravsingh/models
    def forward(self, inputs):
        # Input shape: [batch_size, num_nodes, feature_dims]
        batch_size, num_nodes = inputs.shape[:2]
        inputs = inputs.reshape(batch_size * num_nodes, -1)
        # New shape: [batch_size * num_nodes, feature_dims]

        x = F.elu(self.fc1(inputs))
        x = F.dropout(x, self.dropout_prob)
        x = F.elu(self.fc2(x))
        x = self.bn(x)

        return x.reshape(batch_size, num_nodes, -1)
예제 #17
0
    def glimpse_net(self, glimpse, location):
        # glimpse network includes next three nodes.
        h_glimpse = F.elu(self.emb_x(glimpse))

        # Location Encoding
        h_location = F.elu(self.emb_l(location))

        # g is the  final glimpse feature vector
        # equal to f_g(theta_g)
        g = F.elu(self.fc_loc_to_glimpse(h_location) + \
                F.reshape(self.fc_image_to_glimpse(h_glimpse), (glimpse.data.shape[0],-1))
            )
        return g
예제 #18
0
 def __call__(self, x, test=False):
     h = self.b1(F.elu(self.c1(x)), test=test)
     h = self.b2(F.elu(self.c2(h)), test=test)
     h = self.b3(F.elu(self.c3(h)), test=test)
     h = self.r1(h, test=test)
     h = self.r2(h, test=test)
     h = self.r3(h, test=test)
     h = self.r4(h, test=test)
     h = self.r5(h, test=test)
     h = self.b4(F.elu(self.d1(h)), test=test)
     h = self.b5(F.elu(self.d2(h)), test=test)
     y = self.d3(h)
     return (F.tanh(y) + 1) * 127.5
예제 #19
0
파일: encoder.py 프로젝트: oknaki/vae-icons
 def __call__(self, x, test=False):
     h = x
     h = F.elu(self.bn1(self.c1(h), test=test))
     h = F.max_pooling_2d(h, 2)
     h = F.elu(self.bn2(self.c2(h), test=test))
     h = F.max_pooling_2d(h, 2)
     h = F.elu(self.bn3(self.c3(h), test=test))
     h = F.max_pooling_2d(h, 2)
     h = F.elu(self.bn4(self.c4(h), test=test))
     h = F.average_pooling_2d(h, 2)
     mu = self.out_mu(F.sigmoid(self.c5_mu(h)))
     vr = self.out_vr(F.sigmoid(self.c5_vr(h)))
     return mu, vr
예제 #20
0
 def __call__(self, x, test=False):
     h = self.b1(F.elu(self.c1(x)))
     h = self.b2(F.elu(self.c2(h)))
     h = self.b3(F.elu(self.c3(h)))
     h = self.r1(h)
     h = self.r2(h)
     h = self.r3(h)
     h = self.r4(h)
     h = self.r5(h)
     h = self.b4(F.elu(self.d1(h)))
     h = self.b5(F.elu(self.d2(h)))
     y = self.d3(h)
     return (F.tanh(y) + 1) * 127.5
예제 #21
0
 def __call__(self, x):
     h = funcs.elu(self.fc1(x))
     h = funcs.elu(self.fc2(h))
     hs = funcs.elu(self.fc3(h))
     ha = funcs.elu(self.fc4(h))
     state_value = self.state_value(hs)
     advantage_value = self.advantage_value(ha)
     advantage_mean = (funcs.sum(advantage_value, axis=1) /
                       float(self.output_size)).reshape(-1, 1)
     q_value = funcs.concat(
         [state_value for _ in range(self.output_size)],
         axis=1) + (advantage_value - funcs.concat(
             [advantage_mean for _ in range(self.output_size)], axis=1))
     return q_value
예제 #22
0
    def forward_one_step(self, state, x_last, train=True):

        x = Variable(x_last, volatile=False)
        a = F.elu(self.conv1(x))

        l1 = F.dropout(F.elu(self.l1_x(a) + self.l1_h(state['h1'])),
                       train=train)
        c1, h1 = F.lstm(state['c1'], l1)
        l2 = F.dropout(F.elu(self.l2_h1(h1) + self.l2_h(state['h2'])),
                       train=train)
        c2, h2 = F.lstm(state['c2'], l2)

        state = {'c1': c1, 'h1': h1, 'c2': c2, 'h2': h2, 'x_last': x}
        return state
예제 #23
0
    def forward_down(self, x, sample=False):
        """
        """
        h = F.elu(x)
        h = self.down1(h)
        sections = [self.z_dim, self.z_dim*2, self.z_dim*3,
                    self.z_dim*4, self.z_dim*4+self.h_dim]
        pz_mean, pz_logv, rz_mean, rz_logv, down_context, h_det = \
            F.split_axis(h, sections, axis=1)

        prior = F.gaussian(pz_mean, 2 * pz_logv)
        logps = self.gaussian_diag_logps(pz_mean, 2*pz_logv, prior)

        if sample:
            z = prior
            context = 0
            logqs = chainer.Variable(
                self.xp.zeros(logps.shape, dtype="float32"), name="logqs")
        else:
            post_mean = rz_mean + self.qz_mean
            post_logv = 2 * (rz_logv + self.qz_logv)
            posterior = F.gaussian(post_mean, post_logv)
            context = self.up_context + down_context
            logqs = self.gaussian_diag_logps(post_mean, post_logv, posterior)

            z = posterior

        # autoregressive nn
        h = self.ar1(z)
        h = h + context
        h = self.ar2(h)
        sections = [self.z_dim]
        arw_mean, arw_logv = F.split_axis(h, sections, axis=1)
        # arw_mean, arw_logv = h[0] * 0.1, h[1] * 0.1  # ??
        z = (z - 0.1*arw_mean) / F.exp(F.clip(0.1*arw_logv, -100., 100.))
        logqs += arw_logv

        kl_cost = logqs - logps
        kl_cost, kl_obj = self.kl_sum(kl_cost)

        z = F.concat([z, h_det])
        z = F.elu(z)
        z = self.down2(z)
        if self.downsample:
            output_shape = z.shape[2:]
            x = F.resize_images(x, output_shape)

        z = x + 0.1 * z
        return z, kl_obj, kl_cost
예제 #24
0
    def __call__(self, x):
        h = F.elu(self.conv1(x))
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.res2(h, self.train)
        h = self.res3(h, self.train)
        h = self.res4(h, self.train)
        h = self.res5(h, self.train)
        h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D)

        h = F.elu(self.conv2(h))
        h = F.dropout(h, ratio=0.5)
        h = self.conv3(h)
        h = F.reshape(h, (-1, self.num_class))

        return h
예제 #25
0
    def self_attention(self, h, adj, step):
        attention_layer_index = 0 if self.attention_tying else step
        mask = np.sum(adj, axis=1)
        mask[mask == 0] = -10000
        # [mb, atoms, ch] -> [mb, ch, atoms]
        mb, atoms, ch = h.shape
        h = functions.transpose(h, axes=(0, 2, 1))
        h = self.linear_transform_layer[attention_layer_index](h)
        # [mb, 1, atoms]
        f_1 = self.conv1d_layer_1[attention_layer_index](h)
        # [mb, 1, atoms] -> [mb, atoms, 1]
        f_1 = functions.transpose(f_1, axes=(0, 2, 1))
        # [mb, atoms, 1] -> [mb, atoms, atoms]
        f_1 = functions.tile(f_1, reps=(1, 1, atoms))
        # [mb, 1, atoms]
        f_2 = self.conv1d_layer_2[attention_layer_index](h)
        # [mb, 1, atoms] -> [mb, atoms, atoms]
        f_2 = functions.tile(f_2, reps=(1, atoms, 1))
        logits = f_1 + f_2
        # logits *= mask
        # [mb, atoms, atoms]
        coefs = functions.softmax(functions.leaky_relu(logits))
        coefs = functions.transpose(coefs, axes=(0, 2, 1))
        # [mb, ch, atoms] -> [mb, atoms, ch]
        h = functions.transpose(h, axes=(0, 2, 1))

        h = functions.dropout(
            h, ratio=self.dropout_rate) if self.dropout_rate != 0.0 else h
        # [mb, atoms, atoms] * [mb, atoms, ch]
        vals = functions.matmul(coefs, h)

        h = functions.elu(vals)
        return h
예제 #26
0
    def forward(self, ws, cs):
        batchsize, length, max_word_len = cs.shape
        ws = self.emb_word(ws) # (batch, length, word_dim)
        cs = F.reshape(
            F.max_pooling_2d(
                self.conv_char(
                    F.reshape(
                        self.emb_char(cs),
                        (batchsize * length, 1, max_word_len, 50))), (max_word_len, 1)),
                    (batchsize, length, self.char_dim))

        hs = F.transpose(F.concat([ws, cs], 2), (1, 0, 2))
        hs = F.dropout(hs, self.dropout_ratio, train=self.train)
        hs = F.split_axis(hs, length, 0)
        hs_f = []
        hs_b = []
        self._init_state()
        for h_in_f, h_in_b in zip(hs, reversed(hs)):
            h_f = self.lstm_f2(self.lstm_f1(F.reshape(h_in_f, (batchsize, -1))))
            hs_f.append(h_f)
            h_b = self.lstm_b2(self.lstm_b1(F.reshape(h_in_b, (batchsize, -1))))
            hs_b.append(h_b)

        hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, reversed(hs_b))]

        cat_ys = [self.linear_cat2(F.dropout(
            F.elu(self.linear_cat1(h)), 0.5, train=self.train)) for h in hs]

        hs = [F.reshape(h, (length, -1)) for h in \
                F.split_axis(F.transpose(F.stack(hs, 2), (0, 2, 1)), batchsize, 0)]

        dep_ys = [self.biaffine(
            F.relu(F.dropout(self.linear_dep(h), 0.32, train=self.train)),
            F.relu(F.dropout(self.linear_head(h), 0.32, train=self.train))) for h in hs]
        return cat_ys, dep_ys
예제 #27
0
    def __call__(self, x):
        h = x
        for iL in range(self.NPLayers):
            h = self.__dict__["P%d" % iL](h)
            if iL == 0: h = F.local_response_normalization(h)
            h = F.max_pooling_2d(F.elu(h),
                                 ksize=self.NKsize[iL + 1],
                                 cover_all=True)
        h = F.spatial_pyramid_pooling_2d(F.elu(h), 3, F.MaxPooling2D)

        h = F.dropout(F.elu(self.L1(h)),
                      ratio=self.L1_dropout,
                      train=self.IsTrain)
        h = F.elu(self.L2(h))
        y = h
        return y
예제 #28
0
    def __call__(self, x, t):
        h = F.elu(self.conv1(x))
        h = F.max_pooling_2d(h, 3, stride=2)
        p1 = self.score_pool1(h)

        h = self.fire2(h)
        h = self.fire3(h)
        h = self.fire4(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        u4 = self.upsample_pool4(self.score_pool4(h))

        h = self.fire5(h)
        h = self.fire6(h)
        h = self.fire7(h)
        h = self.fire8(h)

        # h = F.max_pooling_2d(h, 3, stride=2)
        h = self.fire9(h)
        u9 = self.upsample_pool9(self.score_pool9(h))

        h = F.concat((p1, u4, u9), axis=1)
        h = self.add_layer(h)
        h = self.upsample_final(h)

        self.h = h
        self.loss = F.softmax_cross_entropy(h, t)

        self.evaluator.preparation(h, t)
        self.accuracy = self.evaluator.get_accuracy()
        self.iou = self.evaluator.get_iou()

        return self.loss
예제 #29
0
    def __call__(self, x, t):
        x.volatile = not self.train

        h = F.elu(self.conv1(x))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = self.fire2(h)
        h = self.fire3(h)
        h = self.fire4(h)

        h = F.max_pooling_2d(h, 2, stride=2)

        h = self.fire5(h)
        h = self.fire6(h)
        h = self.fire7(h)
        h = self.fire8(h)

        h = F.max_pooling_2d(h, 2, stride=2)
        h = self.fire9(h)

        h = F.reshape(self.conv10(h), (len(x.data), self.n_class))

        self.prob = F.softmax(h)
        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        chainer.report({'loss': self.loss, 'accuracy': self.accuracy}, self)
        return self.loss
예제 #30
0
    def glimpse_net(self, glimpse, location):
        # glimpse network includes next three nodes.
        h_glimpse = F.max_pooling_2d(F.elu(self.emb_x(glimpse)), 2, stride=2)

        # Location Encoding
        h_location = F.elu(self.emb_l(location))

        # g is the  final glimpse feature vector
        # equal to f_g(theta_g)
        g_location = self.conv_loc_to_glimpse(h_location)
        g_glimpse = F.elu( \
                F.max_pooling_2d(self.conv_image_to_glimpse(h_glimpse), 2, stride=2))
        g_glimpse = F.reshape(g_glimpse, (glimpse.data.shape[0],-1))
        g = F.elu(g_location + g_glimpse)

        return g
예제 #31
0
파일: ggnn.py 프로젝트: Minys233/GCN-BMP
    def masked_self_attention(self, input, adj, step):
        adj = np.sum(adj, axis=1)
        # [mb, atoms, ch]
        mb, atoms, ch = input.shape
        attention_layer_index = 0 if self.attention_tying else step
        # [mb, atoms, hidden_dim]
        h = functions.reshape(input, shape=(mb * atoms, ch))
        h = self.linear_transform_layer[attention_layer_index](h)
        h = functions.reshape(h, shape=(mb, atoms, -1))
        # [mb, atoms, atoms, 2 * hidden_dim]
        a_input = functions.concat([functions.tile(h, reps=(1, 1, atoms)).reshape(mb, atoms * atoms, -1),
                                    functions.tile(h, reps=(1, atoms, 1))], axis=-1).reshape(mb, atoms, atoms,
                                                                                             2 * self.hidden_dim)
        a_input = functions.reshape(a_input, shape=(mb * atoms * atoms, 2 * self.hidden_dim))
        # [mb * atoms * atoms, 2 * hidden_dim] => [mb * atoms * atoms, 1] => [mb, atoms * atoms]
        e = functions.leaky_relu(
            functions.reshape(functions.squeeze(self.neural_network_layer[attention_layer_index](a_input), axis=-1),
                              shape=(mb, atoms, atoms)))

        # [mb, atoms, atoms]
        zero_vec = -9e15 * self.xp.ones_like(e, dtype=self.xp.float32)
        # [mb, atoms, atoms]
        attention = functions.where(adj > 0, e, zero_vec)

        # [mb, atoms, atoms]
        attention = functions.softmax(attention, axis=2)
        # [mb, atoms, atoms] * [mb, atoms, hidden_dim] => [mb, atoms, hidden_dim]
        h_prime = functions.matmul(attention, h)
        h_prime = functions.elu(h_prime)
        return h_prime
예제 #32
0
    def forward(self, ws, ss, ps, dep_ts=None):
        batchsize = len(ws)
        xp = chainer.cuda.get_array_module(ws[0])
        split = scanl(lambda x, y: x + y, 0, [w.shape[0] for w in ws])[1:-1]

        wss = self.emb_word(F.hstack(ws))
        sss = F.reshape(self.emb_suf(F.vstack(ss)), (-1, 4 * self.afix_dim))
        pss = F.reshape(self.emb_prf(F.vstack(ps)), (-1, 4 * self.afix_dim))
        ins = F.dropout(F.concat([wss, sss, pss]),
                        self.dropout_ratio,
                        train=self.train)

        xs_f = list(F.split_axis(ins, split, 0))
        xs_b = [x[::-1] for x in xs_f]
        cx_f, hx_f, cx_b, hx_b = self._init_state(xp, batchsize)
        _, _, hs_f = self.lstm_f(hx_f, cx_f, xs_f, train=self.train)
        _, _, hs_b = self.lstm_b(hx_b, cx_b, xs_b, train=self.train)
        hs_b = [x[::-1] for x in hs_b]
        # ys: [(sentence length, number of category)]
        hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, hs_b)]

        dep_ys = [
            self.biaffine_arc(
                F.elu(F.dropout(self.arc_dep(h), 0.32, train=self.train)),
                F.elu(F.dropout(self.arc_head(h), 0.32, train=self.train)))
            for h in hs
        ]

        # if dep_ts is not None and random.random >= 0.5:
        if dep_ts is not None:
            heads = dep_ts
        else:
            heads = [F.argmax(y, axis=1) for y in dep_ys]

        heads = F.elu(F.dropout(
            self.rel_head(
                F.vstack([F.embed_id(t, h, ignore_label=IGNORE) \
                        for h, t in zip(hs, heads)])),
            0.32, train=self.train))

        childs = F.elu(
            F.dropout(self.rel_dep(F.vstack(hs)), 0.32, train=self.train))
        cat_ys = self.biaffine_tag(childs, heads)

        cat_ys = list(F.split_axis(cat_ys, split, 0))

        return cat_ys, dep_ys
예제 #33
0
    def __call__(self, x, t=None):
        self.clear()
        x.volatile = not self.train
        t.volatile = 'AUTO'

        h = F.elu(self.conv1(x))
        h = F.max_pooling_2d(h, 3, stride=2)

        h = self.fire2(h)
        h = self.fire3(h)
        h = self.fire4(h)

        h = F.max_pooling_2d(h, 3, stride=2)

        h = self.fire5(h)
        h = self.fire6(h)
        h = self.fire7(h)
        h = self.fire8(h)

        h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D)
        h = F.elu(self.conv9(h))

        memory_h = chainer.Variable(h.data, volatile='AUTO')
        with chainer.no_backprop_mode():
            weight, self.memory = \
                self.apply_memory(memory_h, t, self.update_weight, self.train)

        if self.train:
            self.apply_memory.memory.data = self.memory.data

        h = F.dropout(h, ratio=0.5, train=self.train)
        h = self.conv_infer(h)
        h = F.reshape(h, (-1, self.n_class))

        h = h*weight
        self.h = h
        self.prob = F.softmax(h)

        if self.active_learn:
            t = mask_gt_for_active_learning(self.prob, t, self.xp, self.n_class)

        self.loss = F.softmax_cross_entropy(h, t)

        self.accuracy = F.accuracy(h, t)
        chainer.report({'loss': self.loss, 'accuracy': self.accuracy}, self)
        return self.loss
예제 #34
0
    def forward_up(self, x):
        """
        """
        h = F.elu(x)
        h = self.up1(h)
        sections = [self.z_dim, self.z_dim*2, self.z_dim*2+self.h_dim]
        self.qz_mean, self.qz_logv, self.up_context, h = \
            F.split_axis(h, sections, axis=1)

        h = F.elu(h)
        h = self.up2(h)

        if self.downsample:
            output_shape = h.shape[2:]
            x = F.resize_images(x, output_shape)

        return x + 0.1 * h
예제 #35
0
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.elu(x, alpha=self.alpha)
        self.assertEqual(y.data.dtype, numpy.float32)

        expected = self.x.copy()
        for i in numpy.ndindex(self.x.shape):
            if self.x[i] < 0:
                expected[i] = self.alpha * (numpy.exp(expected[i]) - 1)

        gradient_check.assert_allclose(expected, y.data)
예제 #36
0
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = functions.elu(x, alpha=self.alpha)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))

        gradient_check.assert_allclose(gx, x.grad)
예제 #37
0
 def __call__(self, x):
     h = x
     h = self.__dict__["P1_1"](F.elu(h))
     h = self.__dict__["BN1_1"](h)
     h = self.__dict__["P1_2"](F.elu(h))
     h = self.__dict__["BN1_2"](h)
     h = F.max_pooling_2d(F.elu(h), ksize=3, stride=2, cover_all=False)
     h = self.__dict__["P2_1"](h)
     h = self.__dict__["P2_2"](F.elu(h))
     h = self.__dict__["P2_2"](F.elu(h))
     h = F.max_pooling_2d(F.elu(h), ksize=3, stride=2, cover_all=False)
     h = self.__dict__["P3_1"](h)
     h = self.__dict__["P3_2"](F.elu(h))
     h = self.__dict__["P3_3"](F.elu(h))
     h = F.average_pooling_2d(F.elu(h), ksize=6)
     y = F.reshape(h,(len(h.data),self.F_unit))
     return y
예제 #38
0
파일: test_elu.py 프로젝트: Fhrozen/chainer
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.elu(x, alpha=self.alpha)
        self.assertEqual(y.data.dtype, self.dtype)

        expected = self.x.copy()
        for i in numpy.ndindex(self.x.shape):
            if self.x[i] < 0:
                expected[i] = self.alpha * (numpy.exp(expected[i]) - 1)

        testing.assert_allclose(
            expected, y.data, **self.check_forward_options)
예제 #39
0
파일: dcgan.py 프로젝트: re53min/TOHO_AI
    def __call__(self, x):

        h = add_noise(x)
        h = F.elu(add_noise(self.c0_0(h)))
        h = F.elu(add_noise(self.bn0_1(self.c0_1(h))))
        h = F.elu(add_noise(self.bn1_0(self.c1_0(h))))
        h = F.elu(add_noise(self.bn1_1(self.c1_1(h))))
        h = F.elu(add_noise(self.bn2_0(self.c2_0(h))))
        h = F.elu(add_noise(self.bn2_1(self.c2_1(h))))
        h = F.elu(add_noise(self.bn3_0(self.c3_0(h))))

        return self.l4(h)
예제 #40
0
파일: net.py 프로젝트: sfl2/FaxOCR
 def __call__(self, x):
     h = self.b0(x,test=not self.train)
     h = self.b1_1(F.elu(self.c1_1(h)),test=not self.train)
     h = self.b1_2(F.elu(self.c1_2(h)),test=not self.train)
     h = F.max_pooling_2d(h,2,stride=2)
     h = self.b2_2(F.elu(self.c2_1(h)),test=not self.train)
     h = self.b2_2(F.elu(self.c2_2(h)),test=not self.train)
     h = F.max_pooling_2d(h,2,stride=2)
     h = self.b3_1(F.elu(self.c3_1(h)),test=not self.train)
     h = self.b3_2(F.elu(self.c3_2(h)),test=not self.train)
     h = F.max_pooling_2d(h,2,stride=2)
     #print h.data
     h = F.dropout(F.elu(self.f1(h)),train=self.train)
     #print h.data
     return self.f2(h)
예제 #41
0
파일: functions.py 프로젝트: musyoku/adgm
	def __call__(self, x):
		return F.elu(x, self.alpha)
예제 #42
0
 def __call__(self, x, test=False):
     h = F.elu(self.c0(x))     # no bn because images from generator will katayotteru?
     h = F.elu(self.bn1(self.c1(h), test=test))
     h = F.elu(self.bn2(self.c2(h), test=test))
     h = F.elu(self.bn3(self.c3(h), test=test))
     return self.l4l(h)
예제 #43
0
 def __call__(self, vector):
     vector = self.l1(vector)
     vector = F.elu(vector)
     vector = self.l2(vector)
     return vector
예제 #44
0
 def __call__(self, sentence):
     self.fwd.reset_state()
     fwds = map(self.fwd, sentence)
     self.bwd.reset_state()
     bwds = reversed(map(self.bwd, reversed(sentence)))
     return [F.elu(self.mix(f, b)) for f, b in zip(fwds, bwds)]
예제 #45
0
파일: test_elu.py 프로젝트: Fhrozen/chainer
 def f(x):
     y = functions.elu(x, alpha=self.alpha)
     return y * y
예제 #46
0
파일: test_elu.py 프로젝트: asi1024/chainer
 def forward(self, inputs, device):
     x, = inputs
     return functions.elu(x, alpha=self.alpha),
예제 #47
0
파일: nn.py 프로젝트: musyoku/lstm
	def __call__(self, x):
		return functions.elu(x, self.alpha)
예제 #48
0
파일: test_elu.py 프로젝트: Fhrozen/chainer
 def f(x):
     return functions.elu(x, alpha=self.alpha)