Exemplo n.º 1
0
    def forward(self, x, add_noise=False):
        h = x

        if self.blur_k is None:
            k = np.asarray([1, 2, 1]).astype('f')
            k = k[:, None] * k[None, :]
            k = k / np.sum(k)
            self.blur_k = self.xp.asarray(k)[None, None, :]
        if self.enable_blur:
            h = blur(upscale2x(h), self.blur_k)
        else:
            h = upscale2x(h)
        h = self.c0(h)

        # h should be (batch, ch, size, size)
        if add_noise:
            h = self.n0(h)

        h = F.leaky_relu(self.b0(h))
        h = F.normalize(h)

        h = self.c1(h)
        if add_noise:
            h = self.n1(h)

        h = F.leaky_relu(self.b1(h))
        h = F.normalize(h)
        return h
Exemplo n.º 2
0
    def __call__(self, x, t):
        h = self.base(x, layers=['res5'])['res5']
        self.cam = h
        h = _global_average_pooling_2d(h)
        ################################################################################
        #                           ResNet50の後ろにArcFace実装
        ################################################################################
        # --------------------------- cos(theta) & phi(theta) ---------------------------
        cosine = F.linear(F.normalize(h), F.normalize(self.weight)) # fc8
        sine = F.sqrt(F.clip((1.0 - F.square(cosine)),0, 1))
        phi = cosine * cos_m - sine * sin_m
        if easy_margin:
            phi = F.where(cosine.data > 0, phi, cosine)
        else:
            phi = F.where(cosine.data > th, phi, cosine - mm)
        # --------------------------- convert label to one-hot ---------------------------
        one_hot = cp.eye(10)[t].astype(cp.float32)
        one_hot = Variable(one_hot)
        # -------------torch.where(out_i = {x_i if condition_i else y_i) -------------
        output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
        output *= s
        ################################################################################
        #h = self.fc(h)

        return output
Exemplo n.º 3
0
        def mp_matching_func(v1, v2, w):
            """
            Implementation of m = f_m(v_1, v_2, W).
            m_k = cosine(W_k \odot v_1, W_k \odot v_2)
            Similar to multi-head attention mechanism
            :param v1: (mb, N_1, hidden_dim)
            :param v2: (mb, N_1, hidden_dim) or (mb, hidden_size)
            :param w: (head, hidden_dim)
            :return: m: (mb, N_1, head)
            """
            mb, N_1, _ = v1.shape
            # w: (hidden_dim, head)
            w = F.transpose(w, axes=(1, 0))
            # w: (1, 1, hidden_dim, head)
            w = F.expand_dims(F.expand_dims(w, axis=0), axis=0)
            # v1: (mb, N_1, hidden_dim, head)
            v1 = F.tile(w, reps=(mb, N_1, 1, 1)) * F.stack([v1] * self.head, axis=3)
            if len(v2.shape) == 3:
                v2 = F.tile(w, reps=(mb, N_1, 1, 1)) * F.stack([v2] * self.head, axis=3)
            else:
                # v2: (mb, hidden_dim) -> (mb, N_1, hidden_dim) -> (mb, N_1, hidden_dim, head)
                v2 = F.tile(w, reps=(mb, N_1, 1, 1)) * F.stack([F.stack([v2] * N_1, axis=1)] * self.head, axis=3)

            # v1/v2: (mb, N_1, hidden_dim, head)
            v1_normed = F.normalize(v1, axis=2)
            v2_normed = F.normalize(v2, axis=2)
            # (mb, N_1, head, head)
            sim = F.matmul(F.transpose(v1_normed, axes=(0, 1, 3, 2)), v2_normed)
            # sim: (mb, N_1, head, head) -> (mb, N_1, head)
            sim = sim[:, :, :, 0]
            return sim 
Exemplo n.º 4
0
    def __call__(self, *args, **kwargs):
        with chainer.no_backprop_mode():
            ndisp = int(get_arg(args[2])) if len(args) >= 2 else 64
            g0 = self._to_gray(args[0])
            g1 = self._to_gray(args[1])

        print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
        print("layer_1 ...\n")
        x0 = self._extract_feature(g0)
        x1 = self._extract_feature(g1)

        print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))

        # fast mode
        if len(self._layers2) == 0:
            print("fast mode...")
            x0 = F.normalize(x0)
            x1 = F.normalize(x1)
            #v = -su.reduce_to_vol(x0, x1, ndisp)
            v = -su.reduce_to_prodvol(x0, x1, ndisp)
            print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
            return v

        # accurate mode
        print("accurate mode...")
        v = su.reduce_to_vol(
            x0, x1, ndisp,
            lambda y0, y1: self._looping(F.concat(
                (y0, y1), 1), self._layers2, F.relu, F.sigmoid))
        print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
        return v
Exemplo n.º 5
0
    def __call__(self, x):
        bs, channel, width, height = x.shape
        assert channel == self.D
        N = width * height

        # assignment
        a = self.wb(x)
        a = F.softmax(a)
        a = F.reshape(a, (bs, self.K, N))
        a = F.stack([a] * self.D, axis=1)

        # attention
        w = F.relu(self.attn(x))
        w = F.reshape(w, (bs, 1, N))
        w = F.stack([w] * self.D * self.K)
        w = F.reshape(w, (bs, self.D, self.K, N))

        x = F.reshape(x, (bs, self.D, N))
        x = F.stack([x] * self.K, axis=2)

        _c = F.broadcast_to(
            F.stack([self.c] * N, axis=2), (bs, self.D, self.K, N))

        v = F.sum(w * a * (x - _c), axis=3)

        v = F.normalize(v, axis=2)
        v = F.reshape(v, (bs, self.D * self.K))
        v = F.normalize(v, axis=1)
        return v
Exemplo n.º 6
0
    def __call__(self, x):
        # x.shape == (batchsize, 3, 128, 64)
        batchsize = x.shape[0]
        h = F.elu(self.bn1(self.conv1_1(x)))
        h = F.elu(self.bn2(self.conv1_2(h)))
        h = F.max_pooling_2d(h, 3, 2, cover_all=False)
        h = self.conv2_1(h)
        h = self.conv2_3(h)
        h = self.conv3_1(h)
        h = self.conv3_3(h)
        h = self.conv4_1(h)
        h = self.conv4_3(h)

        h = h.reshape(batchsize, -1)
        h = F.dropout(h, ratio=0.6)
        h = F.elu(self.fc1_bn(self.fc1(h)))

        # Features in rows, normalize axis 1.
        weights = self.mean_vectors
        features = self.ball(h)
        features = F.normalize(features, eps=1e-8)
        scale = F.softplus(self.scale)
        normalized_weight = F.normalize(weights, axis=0, eps=1e-8)
        logits = F.tile(scale[None, ], (batchsize, 1)) * \
            F.matmul(features, normalized_weight)
        return logits
Exemplo n.º 7
0
    def __call__(self, x):
        # x.shape == (batchsize, 3, 128, 64)
        batchsize = x.shape[0]
        h = F.elu(self.bn1(self.conv1_1(x)))
        h = F.elu(self.bn2(self.conv1_2(h)))
        h = F.max_pooling_2d(h, 3, 2, cover_all=False)
        h = self.conv2_1(h)
        h = self.conv2_3(h)
        h = self.conv3_1(h)
        h = self.conv3_3(h)
        h = self.conv4_1(h)
        h = self.conv4_3(h)

        h = h.reshape(batchsize, -1)
        h = F.dropout(h, ratio=0.6)
        h = F.elu(self.fc1_bn(self.fc1(h)))

        # Features in rows, normalize axis 1.
        weights = self.mean_vectors
        features = self.ball(h)
        features = F.normalize(features, eps=1e-8)
        scale = F.softplus(self.scale)
        normalized_weight = F.normalize(weights, axis=0, eps=1e-8)
        logits = F.tile(scale[None, ], (batchsize, 1)) * \
            F.matmul(features, normalized_weight)
        return logits
Exemplo n.º 8
0
    def Normal(self, xp):

        tmplist = [1]

        tmp1 = self.embedE(xp.array(tmplist, 'i'))
        print(tmp1.data)

        allE = list()
        for i in range(self.e_size):
            allE.append(i)
        allEembed = self.embedE(xp.array(allE, 'i'))
        #print allEembed.data
        allEembed = F.normalize(allEembed)

        tmp2 = self.embedE(xp.array(tmplist, 'i'))
        print(tmp2.data)

        allR = list()
        for i in range(self.r_size):
            allR.append(i)
        allRembed = self.embedR(xp.array(allR, 'i'))
        #print allRembed.data
        allRembed = F.normalize(allRembed)
        #print allRembed.shape
        return 0
Exemplo n.º 9
0
 def mp_matching_func_pairwise(v1, v2, w):
     """
     Implementation of m = f_m(v_1, v_2, W).
     m_k = cosine(W_k \odot v_1, W_k \odot v_2)
     :param v1: (mb, N_1, hidden_dim)
     :param v2: (mb, N_2, hidden_dim)
     :param w: (head, hidden_dim)
     :return: sim: (mb, N_1, N_2, head)
     """
     mb, N_1, _ = v1.shape
     N_2 = v2.shape[1]
     # w: (head, hidden_dim) -> (1, head, hidden_dim) -> (1, head, 1, hidden_dim)
     w = F.expand_dims(F.expand_dims(w, axis=0), axis=2)
     # v1: (mb, head, N_1, hidden_dim)
     v1 = F.tile(w, reps=(mb, 1, N_1, 1)) * F.stack([v1] * self.head, axis=1)
     # v2: (mb, head, N_2, hidden_dim)
     v2 = F.tile(w, reps=(mb, 1, N_2, 1)) * F.stack([v2] * self.head, axis=1)
     # v1: (mb, head, N_1, hidden_dim), normalized on hidden_dim
     v1_normed = F.normalize(v1, axis=3)
     # v2: (mb, head, N_2, hidden_dim), normalized on hidden_dim
     v2_normed = F.normalize(v2, axis=3)
     # sim: (mb, head, N_1, N_2)
     sim = F.matmul(v1_normed, F.transpose(v2_normed, axes=(0, 1, 3, 2)))
     # sim: (mb, N_1, N_2, head)
     sim = F.transpose(sim, axes=(0, 2, 3, 1))
     return sim
Exemplo n.º 10
0
def cos_sim(x, y):
    """
    Variableを2つ受け取ってcosine類似度を返す関数
    Chainerにはない
    """
    norm_x = F.normalize(F.squeeze(x, axis=(1, 2)))
    norm_y = F.normalize(F.squeeze(y, axis=(1, 2)))
    return F.batch_matmul(norm_x, norm_y, transa=True)
Exemplo n.º 11
0
def spectral_normalize(weight, init_u):
    W = weight.reshape(weight.shape[0], -1)  #C x N
    v = F.normalize(F.matmul(W, init_u, transa=True), eps=1e-12,
                    axis=0)  #N x C * C x 1 -> N x 1
    u = F.normalize(F.matmul(W, v), eps=1e-12, axis=0)  #C x N * N x 1 -> C x 1
    sigma = F.matmul(F.matmul(u, W, transa=True),
                     v)  #1 x C * C x N * N x -> 1 x 1 (spectral norm)
    return weight / sigma
Exemplo n.º 12
0
def cos_sim(x, y):
    if len(x.shape) > 2:
        norm_x = F.normalize(F.squeeze(F.squeeze(x,axis=(2,)),axis=(2,)))
        norm_y = F.normalize(F.squeeze(F.squeeze(y,axis=(2,)),axis=(2,)))
    else:
        norm_x = F.normalize(x)
        norm_y = F.normalize(y)
    return F.batch_matmul(norm_x, norm_y, transa=True)
 def attention_layer(self, features, features_proj, Xp):
     h = F.expand_dims(self.w_att(Xp), 1)
     features_proj = F.normalize(features_proj, axis=-1)
     h = F.normalize(h, axis=-1)
     h_att = F.relu(features_proj + F.broadcast_to(h, features_proj.shape)) # (N, self.D, self.C) + (N, 1, self.C)
     out_att = self.w(F.reshape(h_att, (-1, self.C))) # (Nxself.D, self.C) -> (Nxself.D, 1)
     out_att = F.reshape(out_att, (-1, self.D)) # (N, self.D)
     alpha = F.softmax(out_att) # (N, self.D)
     context = F.sum(features * F.broadcast_to(F.expand_dims(alpha, 1), features.shape), axis=2) # (N, self.C, self.D) * (N, 1, self.D)
     return context, alpha
Exemplo n.º 14
0
 def __call__(self, x1, x2, eps=1e-5):
     with chainer.using_config('enable_backprop', False):
         hs1 = self.predict_single(x1)
     hs2 = self.predict_single(x2)
     xp = chainer.cuda.get_array_module(x1)
     loss = chainer.Variable(xp.array(0, 'float32'))
     for h1, h2 in zip(hs1, hs2):
         h1 = cf.normalize(h1, axis=1)
         h2 = cf.normalize(h2, axis=1)
         loss += cf.sum(
             cf.square(h1 - h2)) / (h1.shape[0] * h1.shape[2] * h1.shape[3])
     return loss
Exemplo n.º 15
0
def content_based_addressing(memory, keys, strengths):
    """ (M,n_locations,width) -> (M,N,width) -> (M,N) -> (M,N,n_locations) """
    M, n_locations, width = memory.shape
    N = keys.shape[1]
    m, k, s = memory, keys, strengths
    m = F.reshape(F.normalize(F.reshape(m, (-1, width))),
                  (M, n_locations, width))
    k = F.reshape(F.normalize(F.reshape(k, (-1, width))), (M, N, width))
    t = F.scale(F.batch_matmul(k, m, transb=True), s, axis=0)
    r = F.reshape(F.softmax(F.reshape(t, (-1, n_locations))),
                  (M, N, n_locations))
    return r
Exemplo n.º 16
0
    def forward(self,
                z,
                stage,
                camera_matrices,
                z2=None,
                z3=None,
                z4=None,
                theta=None):
        # z1 and z2 are for foreground, z3 and z4 are for background
        proj_mappings = list()
        for i in range(len(camera_matrices)):
            proj_mappings.append(
                self.projection.compute_proj_idcs(camera_matrices[i]))

        proj_frustrum_idcs, proj_grid_coords = list(zip(*proj_mappings))
        if not isinstance(z, Variable):
            z = Variable(z)
        if not isinstance(z2, Variable):
            z2 = Variable(z2)

        w = self.mapping(z)
        voxel = self.voxel_gen(w)
        img_feature = self.deepvoxel(
            proj_frustrum_idcs,
            proj_grid_coords,
            voxel,
            return_foreground_weight=self.use_background_generator)
        if self.use_background_generator:
            novel_feats, depth, foreground_weight = img_feature
            if z3 is None:
                z3 = Variable(self.make_hidden(z.shape[0]))
                z4 = Variable(self.make_hidden(z.shape[0]))
            w3 = self.mapping(z3)
            w4 = self.mapping(z4)
            background, background_depth = self.background_generator(
                w3, w4, theta)
            novel_feats = F.normalize(novel_feats, axis=1) + \
                          F.normalize(background, axis=1) * (1 - foreground_weight)
            depth = depth + background_depth * (1 - foreground_weight)
            print(foreground_weight.array.mean(),
                  foreground_weight.array.std(), depth.array.mean(),
                  background_depth.mean(), novel_feats.array.std(),
                  background.array.std())
        else:
            novel_feats, depth = img_feature

        if z2 is None:
            z2 = self.make_hidden(z.shape[0])
        w2 = self.mapping(z2)
        novel_img = self.style_generator(novel_feats, w2, stage)
        x_fake = F.concat([novel_img, depth], axis=1)
        return x_fake
def max_singular_value_fully_differentiable(W, Ip=1):
    """
    Apply power iteration for the weight parameter (fully differentiable version)
    """

    xp = cuda.get_array_module(W.data)
    u = xp.random.normal(size=(1, W.shape[0])).astype(dtype="f")

    for _ in range(Ip):
        _v = F.normalize(F.matmul(u, W), eps=1e-12)
        _u = F.normalize(F.matmul(_v, F.transpose(W)), eps=1e-12)
    sigma = F.sum(F.linear(_u, F.transpose(W)) * _v)
    return sigma
Exemplo n.º 18
0
 def attention(v1, v2):
     """
     Implementation of cosine-similarity-based attention mechanism
     :param v1: (mb, N_1, hidden_dim)
     :param v2: (mb, N_2, hidden_dim)
     :return: att: (mb, N_1, N_2)
     """
     # (mb, N_1, hidden_dim) -> (mb, N_1, hidden_dim)
     v1_normed = F.normalize(v1, axis=2)
     # (mb, N_2, hidden_dim) -> (mb, N_2, hidden_dim)
     v2_normed = F.normalize(v2, axis=2)
     # (mb, N_1, N_2)
     att = F.matmul(v1_normed, F.transpose(v2_normed, axes=(0, 2, 1)))
     return att
Exemplo n.º 19
0
    def ocr_mapping_net(self, h_ocr_act, h_act, h_ocr_rsn, h_rsn):
        h_act = F.relu(self.act_l(h_act))
        h_act = F.normalize(h_act)

        h_rsn = F.relu(self.rsn_l(h_rsn))
        h_rsn = F.normalize(h_rsn)

        h_ocr_act = F.relu(h_ocr_act)
        h_ocr_act = F.normalize(h_ocr_act)

        h_ocr_rsn = F.relu(h_ocr_rsn)
        h_ocr_rsn = F.normalize(h_ocr_rsn)

        return h_ocr_act, h_act, h_ocr_rsn, h_rsn
Exemplo n.º 20
0
    def __call__(self, x, t):
        # Deep layers
        h1 = F.max_pooling_2d(F.relu(
            self.googlenetbn.norm1(self.googlenetbn.conv1(x))),
                              3,
                              stride=2,
                              pad=1)
        h1 = F.max_pooling_2d(F.relu(
            self.googlenetbn.norm2(self.googlenetbn.conv2(h1))),
                              3,
                              stride=2,
                              pad=1)

        h1 = self.googlenetbn.inc3a(h1)
        h1 = self.googlenetbn.inc3b(h1)
        h1 = self.googlenetbn.inc3c(h1)
        h1 = self.googlenetbn.inc4a(h1)

        h1 = self.googlenetbn.inc4b(h1)
        h1 = self.googlenetbn.inc4c(h1)
        h1 = self.googlenetbn.inc4d(h1)

        h1 = self.googlenetbn.inc4e(h1)
        h1 = self.googlenetbn.inc5a(h1)
        h1 = F.average_pooling_2d(self.googlenetbn.inc5b(h1), 7)
        h1 = self.googlenetbn.loss3_fc(h1)

        h1 = F.normalize(h1)

        # Shallow layers
        h2 = F.average_pooling_2d(x, 4, stride=4, pad=2)
        h2 = F.max_pooling_2d(F.relu(self.norm_s1(self.conv_s1(h2))),
                              5,
                              stride=4,
                              pad=1)
        h3 = F.average_pooling_2d(x, 8, stride=8, pad=4)
        h3 = F.max_pooling_2d(F.relu(self.norm_s2(self.conv_s2(h3))),
                              4,
                              stride=2,
                              pad=1)

        h23 = F.concat((h2, h3), axis=1)
        h23 = F.normalize(F.reshape(h23, (x.data.shape[0], 3072)))

        h = F.concat((h1, h23), axis=1)

        h = F.normalize(F.relu(self.fc4_1(h)))
        h = self.fc4_2(h)

        return h
Exemplo n.º 21
0
def silhouette_loss(target, prediction, num_levels=5):
    batch_size = target.shape[0]
    loss_list = []
    t2 = target[:, None, :, :]
    p2 = prediction[:, None, :, :]
    for i in range(num_levels):
        if i != 0:
            t2 = cf.average_pooling_2d(t2, 2, 2)
            p2 = cf.average_pooling_2d(p2, 2, 2)
        t3 = cf.normalize(cf.reshape(t2, (batch_size, -1)))
        p3 = cf.normalize(cf.reshape(p2, (batch_size, -1)))
        loss_list.append(cf.sum(cf.square(t3 - p3)) / batch_size)
    loss = sum(loss_list)
    return loss
Exemplo n.º 22
0
    def test(self, filename, noscore=True):
        testres = 0
        self.prepare_data(filename=filename, train=False, noscore=noscore)

        if noscore:
            pred = pd.DataFrame()
            for j in range(self.test_T):
                x_j = Variable(self.test_X[j])
                pred_j = self.predict(x_j)
                pred_j = F.reshape(pred_j, (pred_j.data.shape[0], ))
                pred = pd.concat(
                    [pred, pd.DataFrame(np.sort(pred_j.data)[::-1]).T])
            pred.to_csv("new_results.csv", index=False)
            print("save new_results.csv !")

        else:
            for j in range(self.test_T):
                sorted_idxes = np.argsort(self.test_Y[j])[::-1]
                nthres = min(self.n_thres_cand, sorted_idxes.shape[0])
                x_j = Variable(self.test_X[j][sorted_idxes[:nthres]])
                y_j = Variable(self.test_Y[j][sorted_idxes[:nthres]])
                y_j = F.reshape(y_j, (1, y_j.shape[0]))
                # normalize output score to avoid divergence
                y_j = F.normalize(y_j)
                pred_j = self.predict(x_j)
                pred_j = F.reshape(pred_j, (pred_j.data.shape[0], ))
                testres += ndcg(y_j.data, pred_j.data, self.n_thres_cand)
            print("test_ndcg:{}".format(testres / self.test_T))
Exemplo n.º 23
0
    def __call__(self, x, subtract_mean=True):
        if subtract_mean:
            x = x - self._image_mean
#        h = super(ModifiedGoogLeNet, self).__call__(
#            x, layers=['pool5'], train=train)['pool5']
#        h = self.bn_fc(h, test=not train)
#        y = self.fc(h)
#        return y
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, 3, stride=2)
        h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4 / 5)
        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4 / 5)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)
        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)
        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)
        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.bn_fc(h)
        y = self.fc(h)
        if self.normalize_output:
            y = F.normalize(y)
        return y
Exemplo n.º 24
0
    def render_normal(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = cf.concat((faces, faces[:, :, ::-1]), axis=1).data

        # normal
        faces_normal = nr.vertices_to_faces(vertices, faces)

        (bs, nf) = faces_normal.shape[:2]
        faces_normal = faces_normal.reshape((bs * nf, 3, 3))
        v10 = faces_normal[:, 0] - faces_normal[:, 1]
        v12 = faces_normal[:, 2] - faces_normal[:, 1]
        normals = cf.normalize(nr.cross(v10, v12))
        normals = normals.reshape((bs, nf, 3))

        textures = normals[:, :, None, None, None, :]
        textures = cf.tile(textures, (1, 1, 2, 2, 2, 1))

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = nr.look_at(vertices, self.eye)
        elif self.camera_mode == 'look':
            vertices = nr.look(vertices, self.eye, self.camera_direction, self.up)

        # perspective transformation
        if self.perspective:
            vertices = nr.perspective(vertices, angle=self.viewing_angle)

        # rasterization
        faces = nr.vertices_to_faces(vertices, faces)
        images = nr.rasterize(
            faces, textures, self.image_size, self.anti_aliasing, self.near, self.far, self.rasterizer_eps,
            self.background_color)
        return images
Exemplo n.º 25
0
    def check_forward(self, x_data, axis):
        eps = self.eps
        x = chainer.Variable(x_data)

        y = functions.normalize(x, eps=eps, axis=axis)
        self.assertEqual(y.data.dtype, self.dtype)
        y_data = cuda.to_cpu(y.data)

        y_expect = numpy.empty_like(self.x)
        shape = self.x.shape
        indices = []
        axis_tuple = axis if isinstance(axis, tuple) else (axis,)
        for i in six.moves.range(len(shape)):
            if i not in axis_tuple:
                indices.append(six.moves.range(shape[i]))
            else:
                indices.append([slice(None)])
        indices_tuple = list(itertools.product(*indices))
        for index in indices_tuple:
            # Note: Casting back the result of `numpy.linalg.norm` to `x.dtype`
            # because old NumPy casts it to float32 when a float16 value is
            # given.
            numerator = numpy.linalg.norm(self.x[index]).astype(x.dtype) + eps
            y_expect[index] = self.x[index] / numerator
        testing.assert_allclose(y_expect, y_data, **self.check_forward_options)
Exemplo n.º 26
0
def compact_bilinear_pooling(x, randweight):
    h = F.convolution_2d(x, randweight['W1']) * F.convolution_2d(
        x, randweight['W2'])
    h = global_average_pooling_2d(h)
    h = power_normalize(h)
    h = F.normalize(h)
    return h
    def check_forward(self, x_data, proxy_data, labels_data):
        x = chainer.Variable(x_data)
        proxy = chainer.Variable(proxy_data)

        x = F.normalize(x)
        loss = proxy_nca_loss(x, proxy, labels_data)
        self.assertEqual(loss.dtype, np.float32)
Exemplo n.º 28
0
    def check_forward(self, x_data, axis):
        eps = self.eps
        x = chainer.Variable(x_data)

        y = functions.normalize(x, eps=eps, axis=axis)
        self.assertEqual(y.data.dtype, self.dtype)
        y_data = cuda.to_cpu(y.data)

        y_expect = numpy.empty_like(self.x)
        shape = self.x.shape
        indices = []
        axis_tuple = axis if isinstance(axis, tuple) else (axis, )
        for i in six.moves.range(len(shape)):
            if i not in axis_tuple:
                indices.append(six.moves.range(shape[i]))
            else:
                indices.append([slice(None)])
        indices_tuple = list(itertools.product(*indices))
        for index in indices_tuple:
            # Note: Casting back the result of `numpy.linalg.norm` to `x.dtype`
            # because old NumPy casts it to float32 when a float16 value is
            # given.
            numerator = numpy.linalg.norm(self.x[index]).astype(x.dtype) + eps
            y_expect[index] = self.x[index] / numerator
        testing.assert_allclose(y_expect, y_data, **self.check_forward_options)
Exemplo n.º 29
0
def proxy_nca_loss(x, proxy, labels):
    """Proxy-NCA loss function.

    Args:
        x (:class:`~chainer.Variable`):
            L2 normalized anchor points whose shape is (B, D), where B is the
            batch size and D is the number of dimensions of feature vector.
        proxy (:class:`~chainer.Variable` or :class:`~chainer.Parameter`):
            Proxies whose shape is (K, D), where K is the number of classes
            in the dataset.
        labels (:class:`numpy.ndarray`):
            Class labels associated to x. The shape is (B,) and dtype is int.
            Note that the class IDs must be 0, 1, ..., K-1.

    Returns:
        :class:`~chainer.Variable`: Loss value.

    See: `No Fuss Distance Metric Learning using Proxies \
        <http://openaccess.thecvf.com/content_ICCV_2017/papers/\
        Movshovitz-Attias_No_Fuss_Distance_ICCV_2017_paper.pdf>`_
    """
    proxy = F.normalize(proxy)
    distance = squared_distance_matrix(x, proxy)
    d_posi = distance[np.arange(len(x)), labels]

    # For each row, remove one element corresponding to the positive distance
    B, K = distance.shape  # batch size and the number of classes
    mask = np.tile(np.arange(K), (B, 1)) != labels[:, None]
    d_nega = distance[mask].reshape(B, K - 1)

    log_denominator = F.logsumexp(-d_nega, axis=1)
    loss = d_posi + log_denominator
    return F.average(loss)
Exemplo n.º 30
0
 def __call__(self, x, subtract_mean=True):
     if subtract_mean:
         x = x - self._image_mean
     h = F.relu(self.conv1(x))
     h = F.max_pooling_2d(h, 3, stride=2)
     h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4 / 5)
     h = F.relu(self.conv2_reduce(h))
     h = F.relu(self.conv2(h))
     h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4 / 5)
     h = F.max_pooling_2d(h, 3, stride=2)
     h = self.inc3a(h)
     h = self.inc3b(h)
     h = F.max_pooling_2d(h, 3, stride=2)
     h = self.inc4a(h)
     h = self.inc4b(h)
     h = self.inc4c(h)
     h = self.inc4d(h)
     h = self.inc4e(h)
     h = F.max_pooling_2d(h, 3, stride=2)
     h = self.inc5a(h)
     h = self.inc5b(h)
     h = F.average_pooling_2d(h, 7, stride=1)
     h = self.bn_fc(h)
     y = self.fc(h)
     if self.normalize_output:
         y = F.normalize(y, axis=1)
     return y
Exemplo n.º 31
0
def update(gen, dis, optimizer_gen, optimizer_dis, x_batch, margin):
    xp = gen.xp
    batch_size = len(x_batch)

    # from generated image
    z = xp.random.normal(0, 1, (batch_size, latent_size)).astype(np.float32)
    z = z / (xp.linalg.norm(z, axis=1, keepdims=True) + 1e-12)
    x_gen = gen(z)
    total_size = np.prod(x_gen.shape)
    y_gen, h_gen = dis(x_gen)
    h_gen = F.normalize(F.reshape(h_gen, (batch_size, -1)))
    similarity = F.sum(F.matmul(h_gen, h_gen,
                                transb=True)) / (batch_size * batch_size)
    loss_gen = F.mean_squared_error(x_gen, y_gen) + 0.1 * similarity
    loss_dis = F.sum(
        F.relu(margin * margin -
               F.batch_l2_norm_squared(x_gen - y_gen))) / total_size
    # from real image
    x = xp.asarray(x_batch)
    y, h = dis(x)
    loss_dis += F.mean_squared_error(x, y)

    gen.cleargrads()
    loss_gen.backward()
    optimizer_gen.update()

    dis.cleargrads()
    loss_dis.backward()
    optimizer_dis.update()

    return float(loss_gen.data), float(loss_dis.data)
Exemplo n.º 32
0
    def forward(self, x):
        """
        h1 : (1, 64, 112, 112)
        h2 : (1, 128, 56, 56)
        h3 : (1, 256, 28, 28)
        h4 : (1, 512, 14, 14)
        h5 : (1, 512, 7, 7)

        :param x:
        :return:
        """
        h = x
        h = F.relu((self.conv1_1(h)))
        h = F.relu((self.conv1_2(h)))
        pool1 = F.max_pooling_2d(h, 2, stride=2)
        h = F.relu((self.conv2_1(pool1)))
        h = F.relu((self.conv2_2(h)))
        pool2 = F.max_pooling_2d(h, 2, stride=2)
        h = F.relu((self.conv3_1(pool2)))
        h = F.relu((self.conv3_2(h)))
        h = F.relu((self.conv3_3(h)))
        pool3 = F.max_pooling_2d(h, 2, stride=2)
        h = F.relu((self.conv4_1(pool3)))
        h = F.relu((self.conv4_2(h)))
        h = F.relu((self.conv4_3(h)))
        pool4 = F.max_pooling_2d(h, 2, stride=2)

        if self.texture:
            h = {
                'pool1': pool1,
                'pool2': pool2,
                'pool3': pool3,
                'pool4': pool4
            }[self.texture_layer]
            if self.cbp:
                h = F.convolution_2d(h, self.W1) * F.convolution_2d(h, self.W2)
                h = global_average_pooling_2d(h)
                if self.normalize:
                    h = power_normalize(h)
                    h = F.normalize(h)

                h = self.fc8(F.dropout(h, 0.2))
                return h
            else:
                b, ch, height, width = h.data.shape
                h = F.reshape(h, (b, ch, width * height))
                h = F.batch_matmul(h, h, transb=True) / self.xp.float32(
                    width * height)
                h = self.fc8(F.dropout(h, 0.4))
                return h
        else:
            h = F.relu((self.conv5_1(pool4)))
            h = F.relu((self.conv5_2(h)))
            h = F.relu((self.conv5_3(h)))
            h = F.max_pooling_2d(h, 2, stride=2)
            h = F.dropout(F.relu(self.fc6(h)), ratio=0.5)
            h = F.dropout(F.relu(self.fc7(h)), ratio=0.5)

            h = self.fc8(h)
            return h
Exemplo n.º 33
0
    def __call__(self, x):
        if self.normalizedW is None:
            if self.norm_to_one:
                self.normalizedW = F.normalize(self.vocab_freq * self.W)
            else:
                self.normalizedW = self.norm_by_freq(self.vocab_freq)

        return embed_id.embed_id(x, self.normalizedW, ignore_label=self.ignore_label)
Exemplo n.º 34
0
    def check_eps(self, x_data):
        x = chainer.Variable(x_data)

        y = functions.normalize(x, axis=self.axis)
        self.assertEqual(y.data.dtype, numpy.float32)
        y_data = cuda.to_cpu(y.data)

        y_expect = numpy.zeros_like(self.x)
        testing.assert_allclose(y_expect, y_data)
Exemplo n.º 35
0
Arquivo: utils.py Projeto: kzky/works
def grad_norm_hook(optimizer):
    for p in optimizer.target.params():
        grad_data = p.grad
        shape = grad_data.shape
        reshape = (1, np.prod(shape), )

        grad = Variable(grad_data)
        grad_reshape = F.reshape(grad, reshape)
        grad_norm = F.normalize(grad_reshape)
        grad_norm_reshape = F.reshape(grad_norm, shape)

        p.grad = grad_norm_reshape.data
Exemplo n.º 36
0
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)

        y = functions.normalize(x)
        self.assertEqual(y.data.dtype, numpy.float32)
        y_data = cuda.to_cpu(y.data)

        y_expect = numpy.empty_like(self.x)
        for n in six.moves.range(len(self.x)):
            y_expect[n] = self.x[n] / numpy.linalg.norm(self.x[n])

        testing.assert_allclose(y_expect, y_data)
Exemplo n.º 37
0
def look_at(vertices, eye, at=None, up=None):
    """
    "Look at" transformation of vertices.
    """
    assert (vertices.ndim == 3)

    xp = chainer.cuda.get_array_module(vertices)
    batch_size = vertices.shape[0]
    if at is None:
        at = xp.array([0, 0, 0], 'float32')
    if up is None:
        up = xp.array([0, 1, 0], 'float32')

    if isinstance(eye, list) or isinstance(eye, tuple):
        eye = xp.array(eye, 'float32')
    if eye.ndim == 1:
        eye = cf.tile(eye[None, :], (batch_size, 1))
    if at.ndim == 1:
        at = cf.tile(at[None, :], (batch_size, 1))
    if up.ndim == 1:
        up = cf.tile(up[None, :], (batch_size, 1))

    # create new axes
    z_axis = cf.normalize(at - eye)
    x_axis = cf.normalize(neural_renderer.cross(up, z_axis))
    y_axis = cf.normalize(neural_renderer.cross(z_axis, x_axis))

    # create rotation matrix: [bs, 3, 3]
    r = cf.concat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), axis=1)
    if r.shape[0] != vertices.shape[0]:
        r = cf.broadcast_to(r, (vertices.shape[0], 3, 3))

    # apply
    # [bs, nv, 3] -> [bs, nv, 3] -> [bs, nv, 3]
    if vertices.shape != eye.shape:
        eye = cf.broadcast_to(eye[:, None, :], vertices.shape)
    vertices = vertices - eye
    vertices = cf.matmul(vertices, r, transb=True)

    return vertices
Exemplo n.º 38
0
    def __call__(self, x):
        """Normalize input and scale it.

        Args:
            x (chainer.Variable): A variable holding 4-dimensional array.
                Its :obj:`dtype` is :obj:`numpy.float32`.

        Returns:
            chainer.Variable:
            The shape and :obj:`dtype` are same as those of input.
        """

        x = F.normalize(x, eps=self.eps, axis=1)
        scale = F.broadcast_to(self.scale[:, np.newaxis, np.newaxis], x.shape)
        return x * scale
Exemplo n.º 39
0
def lighting(
        faces, textures, intensity_ambient=0.5, intensity_directional=0.5, color_ambient=(1, 1, 1),
        color_directional=(1, 1, 1), direction=(0, 1, 0)):
    xp = chainer.cuda.get_array_module(faces)
    bs, nf = faces.shape[:2]

    # arguments
    if isinstance(color_ambient, tuple) or isinstance(color_ambient, list):
        color_ambient = xp.array(color_ambient, 'float32')
    if isinstance(color_directional, tuple) or isinstance(color_directional, list):
        color_directional = xp.array(color_directional, 'float32')
    if isinstance(direction, tuple) or isinstance(direction, list):
        direction = xp.array(direction, 'float32')
    if color_ambient.ndim == 1:
        color_ambient = cf.broadcast_to(color_ambient[None, :], (bs, 3))
    if color_directional.ndim == 1:
        color_directional = cf.broadcast_to(color_directional[None, :], (bs, 3))
    if direction.ndim == 1:
        direction = cf.broadcast_to(direction[None, :], (bs, 3))

    # create light
    light = xp.zeros((bs, nf, 3), 'float32')

    # ambient light
    if intensity_ambient != 0:
        light = light + intensity_ambient * cf.broadcast_to(color_ambient[:, None, :], light.shape)

    # directional light
    if intensity_directional != 0:
        faces = faces.reshape((bs * nf, 3, 3))
        v10 = faces[:, 0] - faces[:, 1]
        v12 = faces[:, 2] - faces[:, 1]
        normals = cf.normalize(neural_renderer.cross(v10, v12))
        normals = normals.reshape((bs, nf, 3))

        if direction.ndim == 2:
            direction = cf.broadcast_to(direction[:, None, :], normals.shape)
        cos = cf.relu(cf.sum(normals * direction, axis=2))
        light = (
            light + intensity_directional * cfmath.mul(*cf.broadcast(color_directional[:, None, :], cos[:, :, None])))

    # apply
    light = cf.broadcast_to(light[:, :, None, None, None, :], textures.shape)
    textures = textures * light
    return textures
Exemplo n.º 40
0
    def _compute_loss_with_noise(self, x_l, y_l, x_u):
        x_l_0 = x_l
        x_l_grad = Variable(x_l.grad)
        shape = x_l_grad.shape
        bs = shape[0]
        d = np.prod(shape[1:])
        noise = F.reshape(F.normalize(F.reshape(x_l_grad, (bs, d))), shape)
        x_l_noise = x_l + noise * 0.1

        # label confidence loss
        h = self.ae.encoder(x_l)
        y = self.ae.mlp(h,)

        h = self.ae.encoder(x_l_0)
        y_0 = self.ae.mlp(h,)
        l_lc_l = 0
        l_lc_l += F.mean_squared_error(y_0, y)

        loss = l_lc_l * self.lambda_
        return loss
Exemplo n.º 41
0
    def check_forward(self, x_data, axis):
        eps = self.eps
        x = chainer.Variable(x_data)

        y = functions.normalize(x, eps=eps, axis=axis)
        self.assertEqual(y.data.dtype, numpy.float32)
        y_data = cuda.to_cpu(y.data)

        y_expect = numpy.empty_like(self.x)
        shape = self.x.shape
        indices = []
        for i in six.moves.range(len(shape)):
            if i != axis:
                indices.append(six.moves.range(shape[i]))
            else:
                indices.append([slice(None)])
        indices_tuple = list(itertools.product(*indices))
        for index in indices_tuple:
            numerator = numpy.linalg.norm(self.x[index]) + eps
            y_expect[index] = self.x[index] / numerator
        testing.assert_allclose(y_expect, y_data)
Exemplo n.º 42
0
 def f(x):
     return functions.normalize(x, eps=self.eps, axis=axis)