示例#1
0
def test_min_with_index(seed, ctx, func_name, inshape, axis, keepdims):
    x = np.random.RandomState(seed).randn(*inshape).astype(np.float32)
    x = nn.Variable.from_numpy_array(x)
    with nn.context_scope(ctx), nn.auto_forward(True):
        val, idx = F.min(x, axis, keepdims, with_index=True)
    assert_allclose(val.d, np.amin(x.d, axis, keepdims=keepdims))
    shape = [a for i, a in enumerate(x.d.shape) if i not in axis] + [-1]
    assert np.all(idx.d == x.d.reshape(*shape).argmin(-1).reshape(idx.d.shape))
    with nn.context_scope(ctx), nn.auto_forward(True):
        idx = F.min(x, axis, keepdims, only_index=True)
    shape = [a for i, a in enumerate(x.d.shape) if i not in axis] + [-1]
    assert np.all(idx.d == x.d.reshape(*shape).argmin(-1).reshape(idx.d.shape))
示例#2
0
    def chamfer_hausdorff_oneside_dists(X0, X1):
        b0 = X0.shape[0]
        b1 = X1.shape[0]

        sum_ = 0
        max_ = nn.NdArray.from_numpy_array(np.array(-np.inf))
        n = 0
        for i in tqdm.tqdm(range(0, b0, sub_batch_size),
                           desc="cdist-outer-loop"):
            x0 = nn.NdArray.from_numpy_array(X0[i:i + sub_batch_size])
            norm_x0 = F.sum(x0**2.0, axis=1, keepdims=True)
            min_ = nn.NdArray.from_numpy_array(np.ones(x0.shape[0]) * np.inf)
            for j in tqdm.tqdm(range(0, b1, sub_batch_size),
                               desc="cdist-inner-loop"):
                x1 = nn.NdArray.from_numpy_array(X1[j:j + sub_batch_size])
                # block pwd
                norm_x1 = F.transpose(F.sum(x1**2.0, axis=1, keepdims=True),
                                      (1, 0))
                x1_T = F.transpose(x1, (1, 0))
                x01 = F.affine(x0, x1_T)
                bpwd = (norm_x0 + norm_x1 - 2.0 * x01)**0.5
                # block min
                min_ = F.minimum2(min_, F.min(bpwd, axis=1))
            # sum/max over cols
            sum_ += F.sum(min_)
            n += bpwd.shape[0]
            max_ = F.maximum2(max_, F.max(min_))
        ocd = sum_.data / n
        ohd = max_.data
        return ocd, ohd
示例#3
0
    def forward_impl(self, inputs, outputs):
        x = inputs[0].data
        m = inputs[1].data
        M = inputs[2].data
        y = outputs[0].data
        y.copy_from(x)

        if not self.training:
            return
        mb = F.min(x, keepdims=True)
        Mb = F.max(x, keepdims=True)
        F.minimum2(m, mb, outputs=[m])
        F.maximum2(M, Mb, outputs=[M])
示例#4
0
    def forward_impl(self, inputs, outputs):
        x = inputs[0].data
        m = inputs[1].data
        M = inputs[2].data
        y = outputs[0].data
        y.copy_from(x)

        if not self.training:
            return
        mb = F.min(x, keepdims=True)
        Mb = F.max(x, keepdims=True)
        F.identity(self.decay * m + (1 - self.decay) * mb, outputs=[m])
        F.identity(self.decay * M + (1 - self.decay) * Mb, outputs=[M])
示例#5
0
    def ray_march(self, camloc, raydir, t0, t1, N, n_chunks, t_argmin=False):
        # Points computation
        BR, _ = t0.shape
        t0 = F.reshape(t0, (BR, 1, 1))
        t1 = F.reshape(t1, (BR, 1, 1))
        camloc = F.reshape(camloc, (BR, 1, 3))
        raydir = F.reshape(raydir, (BR, 1, 3))
        step = (t1 - t0) / (N - 1)
        intervals = F.reshape(F.arange(0, N), (1, N, 1))
        ts = t0 + step * intervals
        points = camloc + ts * raydir
        points = F.reshape(points, (BR * N, 3))

        # SDF computation
        sdf_points = []
        batch = (BR * N) // n_chunks
        for r in range(0, BR * N, batch):
            sdf_points.append(self.sdf(points[r:r + batch, :]))
        sdf_points = F.reshape(F.concatenate(*sdf_points, axis=0), (BR, N, 1)) if n_chunks != 1 else \
            F.reshape(sdf_points[0], (BR, N, 1))

        # t_argmin computation
        if t_argmin:
            idx_min = F.min(sdf_points, axis=1, keepdims=True, only_index=True)
            t_argmin = F.reshape(F.gather(ts, idx_min, axis=1, batch_dims=1),
                                 (BR, 1))
            return t_argmin

        # Intersection check
        points = F.reshape(points, (BR, N, 3))
        sdf_pos = F.greater_equal_scalar(sdf_points[:, :-1, :], 0)
        sdf_neg = F.less_equal_scalar(sdf_points[:, 1:, :], 0)
        mask_hit = sdf_pos * sdf_neg

        decreasing_consts = F.reshape(F.arange(N, 1, -1), (1, N - 1, 1))
        vals = mask_hit * decreasing_consts
        idx_max = F.max(vals, axis=1, only_index=True)

        points = points[:, :-1, :]
        x_hit = F.gather(points, idx_max, axis=1, batch_dims=1)
        x_hit = F.reshape(x_hit, (BR, 3))
        mask_hit = F.greater_scalar(F.sum(mask_hit, axis=1), 0)
        mask_hit = F.reshape(mask_hit, (BR, 1))

        x_hit_rm0 = x_hit
        step = F.reshape(step, (BR, 1))
        raydir = F.reshape(raydir, (BR, 3))
        x_hit_rm1 = x_hit_rm0 + step * raydir

        return x_hit_rm0, x_hit_rm1, mask_hit
示例#6
0
    def __call__(self, x, return_encoding_indices=False):

        x = F.transpose(x, (0, 2, 3, 1))
        x_flat = x.reshape((-1, self.embedding_dim))

        x_flat_squared = F.broadcast(F.sum(x_flat**2, axis=1, keepdims=True),
                                     (x_flat.shape[0], self.num_embedding))
        emb_wt_squared = F.transpose(
            F.sum(self.embedding_weight**2, axis=1, keepdims=True), (1, 0))

        distances = x_flat_squared + emb_wt_squared - 2 * \
            F.affine(x_flat, F.transpose(self.embedding_weight, (1, 0)))

        encoding_indices = F.min(distances,
                                 only_index=True,
                                 axis=1,
                                 keepdims=True)
        encoding_indices.need_grad = False

        quantized = F.embed(
            encoding_indices.reshape(encoding_indices.shape[:-1]),
            self.embedding_weight).reshape(x.shape)

        if return_encoding_indices:
            return encoding_indices, F.transpose(quantized, (0, 3, 1, 2))

        encodings = F.one_hot(encoding_indices, (self.num_embedding, ))

        e_latent_loss = F.mean(
            F.squared_error(quantized.get_unlinked_variable(need_grad=False),
                            x))
        q_latent_loss = F.mean(
            F.squared_error(quantized,
                            x.get_unlinked_variable(need_grad=False)))
        loss = q_latent_loss + self.commitment_cost * e_latent_loss

        quantized = x + (quantized - x).get_unlinked_variable(need_grad=False)

        avg_probs = F.mean(encodings, axis=0)
        perplexity = F.exp(-F.sum(avg_probs * F.log(avg_probs + 1.0e-10)))

        return loss, F.transpose(quantized,
                                 (0, 3, 1, 2)), perplexity, encodings