コード例 #1
0
ファイル: test_mv_op.py プロジェクト: sandyhouse/Paddle
    def test_static_graph(self):
        for x_stop_gradient in [False, True]:
            for vec_stop_gradient in [False, True]:

                paddle.enable_static()

                train_program = Program()
                startup_program = Program()

                self.input_x = np.random.rand(5, 100).astype("float64")
                self.input_vec = np.random.rand(100).astype("float64")

                with program_guard(train_program, startup_program):
                    data_x = paddle.static.data("x",
                                                shape=[5, 100],
                                                dtype="float64")
                    data_vec = paddle.static.data("vec",
                                                  shape=[100],
                                                  dtype="float64")

                    data_x.stop_gradient = x_stop_gradient
                    data_vec.stop_gradient = vec_stop_gradient

                    result_vec = paddle.mv(data_x, data_vec)

                    self.place = paddle.CPUPlace()
                    exe = paddle.static.Executor(self.place)
                    res, = exe.run(feed={
                        "x": self.input_x,
                        "vec": self.input_vec
                    },
                                   fetch_list=[result_vec])
                    z_expected = np.array(np.dot(self.input_x, self.input_vec))
                    self.assertTrue(np.allclose(res, z_expected))
コード例 #2
0
ファイル: spectral_norm.py プロジェクト: omar16100/PaddleOCR
    def compute_weight(self, module, do_power_iteration):
        weight = getattr(module, self.name + '_orig')
        u = getattr(module, self.name + '_u')
        v = getattr(module, self.name + '_v')
        weight_mat = self.reshape_weight_to_matrix(weight)

        if do_power_iteration:
            with paddle.no_grad():
                for _ in range(self.n_power_iterations):
                    v.set_value(
                        F.normalize(
                            paddle.matmul(weight_mat,
                                          u,
                                          transpose_x=True,
                                          transpose_y=False),
                            axis=0,
                            epsilon=self.eps,
                        ))

                    u.set_value(
                        F.normalize(
                            paddle.matmul(weight_mat, v),
                            axis=0,
                            epsilon=self.eps,
                        ))
                if self.n_power_iterations > 0:
                    u = u.clone()
                    v = v.clone()

        sigma = paddle.dot(u, paddle.mv(weight_mat, v))
        weight = weight / sigma
        return weight
コード例 #3
0
ファイル: test_mv_op.py プロジェクト: sandyhouse/Paddle
        def test_shape():
            paddle.enable_static()

            self.input_x = np.random.rand(5, 100).astype("float64")
            self.input_vec = np.random.rand(100).astype("float64")

            data_x = paddle.static.data("x", shape=[5, 100], dtype="float64")
            data_vec = paddle.static.data("vec",
                                          shape=[100, 2],
                                          dtype="float64")
            result_vec = paddle.mv(data_x, data_vec)
コード例 #4
0
ファイル: test_mv_op.py プロジェクト: sandyhouse/Paddle
    def test_dygraph_api_out(self):
        paddle.disable_static()

        self.x_data = np.random.random((5, 100)).astype("float64")
        self.x = paddle.to_tensor(self.x_data)
        self.vec_data = np.random.random((100)).astype("float64")
        self.vec = paddle.to_tensor(self.vec_data)
        z = paddle.mv(self.x, self.vec)
        np_z = z.numpy()
        z_expected = np.array(np.dot(self.x_data, self.vec_data))
        self.assertTrue(np.allclose(np_z, z_expected))

        paddle.enable_static()
コード例 #5
0
ファイル: jtnn_vae.py プロジェクト: xueeinstein/PaddleHelix
    def dfs_assemble(self, y_tree_mess, x_mol_vecs, all_nodes, cur_mol, global_amap, fa_amap, cur_node, fa_node,
                     prob_decode, check_aroma):
        """DFS in subgraph assembly"""
        fa_nid = fa_node.nid if fa_node is not None else -1
        prev_nodes = [fa_node] if fa_node is not None else []

        children = [nei for nei in cur_node.neighbors if nei.nid != fa_nid]
        neighbors = [nei for nei in children if nei.mol.GetNumAtoms() > 1]
        neighbors = sorted(neighbors, key=lambda x: x.mol.GetNumAtoms(), reverse=True)
        singletons = [nei for nei in children if nei.mol.GetNumAtoms() == 1]
        neighbors = singletons + neighbors

        cur_amap = [(fa_nid, a2, a1) for nid, a1, a2 in fa_amap if nid == cur_node.nid]
        cands, aroma_score = enum_assemble(cur_node, neighbors, prev_nodes, cur_amap)
        if len(cands) == 0 or (sum(aroma_score) < 0 and check_aroma):
            return None, cur_mol

        cand_smiles, cand_amap = zip(*cands)

        aroma_score = paddle.to_tensor(aroma_score)
        cands = [(smiles, all_nodes, cur_node) for smiles in cand_smiles]

        if len(cands) > 1:
            jtmpn_holder = JTMPN.tensorize(cands, y_tree_mess[1])
            fatoms = jtmpn_holder['fatoms']
            fbonds = jtmpn_holder['fbonds']
            agraph = jtmpn_holder['agraph']
            bgraph = jtmpn_holder['bgraph']
            scope = jtmpn_holder['scope']
            cand_vecs = self.jtmpn(fatoms, fbonds, agraph, bgraph, scope, y_tree_mess[0])
            scores = paddle.mv(cand_vecs, x_mol_vecs) + aroma_score
        else:
            scores = paddle.to_tensor([1.0])

        if prob_decode:
            probs = paddle.squeeze(F.softmax(paddle.reshape(scores, shape=[1, -1]), axis=1)) + 1e-7
            cand_idx = paddle.multinomial(probs, probs.numel())
        else:
            cand_idx = paddle.argsort(scores, descending=True)

        backup_mol = Chem.RWMol(cur_mol)
        pre_mol = cur_mol
        for i in range(cand_idx.numel()):
            cur_mol = Chem.RWMol(backup_mol)
            pred_amap = cand_amap[int(cand_idx[i].numpy())]
            new_global_amap = copy.deepcopy(global_amap)

            for nei_id, ctr_atom, nei_atom in pred_amap:
                if nei_id == fa_nid:
                    continue
                new_global_amap[nei_id][nei_atom] = new_global_amap[cur_node.nid][ctr_atom]

            cur_mol = attach_mols(cur_mol, children, [], new_global_amap)
            new_mol = cur_mol.GetMol()
            new_mol = Chem.MolFromSmiles(Chem.MolToSmiles(new_mol))

            if new_mol is None:
                continue

            has_error = False
            for nei_node in children:
                if nei_node.is_leaf:
                    continue
                tmp_mol, tmp_mol2 = self.dfs_assemble(y_tree_mess, x_mol_vecs, all_nodes, cur_mol, new_global_amap,
                                                      pred_amap, nei_node, cur_node, prob_decode, check_aroma)
                if tmp_mol is None:
                    has_error = True
                    if i == 0: pre_mol = tmp_mol2
                    break
                cur_mol = tmp_mol
            if not has_error: return cur_mol, cur_mol
        return None, pre_mol
コード例 #6
0
def imresize(img, scale, antialiasing=True):
    # Now the scale should be the same for H and W
    # input: img: CHW RGB [0,1]
    # output: CHW RGB [0,1] w/o round

    in_C, in_H, in_W = img.shape
    _, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
    kernel_width = 4
    kernel = 'cubic'

    # Return the desired dimension order for performing the resize.  The
    # strategy is to perform the resize first along the dimension with the
    # smallest scale factor.
    # Now we do not support this.

    # get weights and indices
    weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
        in_H, out_H, scale, kernel, kernel_width, antialiasing)
    weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
        in_W, out_W, scale, kernel, kernel_width, antialiasing)
    # process H dimension
    # symmetric copying
    img_aug = paddle.zeros([in_C, in_H + sym_len_Hs + sym_len_He, in_W])
    img_aug[:, sym_len_Hs:sym_len_Hs + in_H, :] = img

    sym_patch = img[:, :sym_len_Hs, :]
    inv_idx = paddle.arange(sym_patch.shape[1] - 1, -1, -1)
    sym_patch_inv = paddle.index_select(sym_patch, inv_idx, 1)

    img_aug[:, :sym_len_Hs, :] = sym_patch_inv

    sym_patch = img[:, -sym_len_He:, :]
    inv_idx = paddle.arange(sym_patch.shape[1] - 1, -1, -1)
    sym_patch_inv = paddle.index_select(sym_patch, inv_idx, 1)

    img_aug[:, sym_len_Hs + in_H:sym_len_Hs + in_H +
            sym_len_He, :] = sym_patch_inv

    out_1 = paddle.zeros([in_C, out_H, in_W])
    kernel_width = weights_H.shape[1]
    for i in range(out_H):
        idx = int(indices_H[i][0])

        out_1[0, i, :] = paddle.mv(
            img_aug[0, idx:idx + kernel_width, :].transpose([1, 0]),
            (weights_H[i]))
        out_1[1, i, :] = paddle.mv(
            img_aug[1, idx:idx + kernel_width, :].transpose([1, 0]),
            (weights_H[i]))
        out_1[2, i, :] = paddle.mv(
            img_aug[2, idx:idx + kernel_width, :].transpose([1, 0]),
            (weights_H[i]))

    # process W dimension
    # symmetric copying
    out_1_aug = paddle.zeros([in_C, out_H, in_W + sym_len_Ws + sym_len_We])
    out_1_aug[:, :, sym_len_Ws:sym_len_Ws + in_W] = out_1

    sym_patch = out_1[:, :, :sym_len_Ws]
    inv_idx = paddle.arange(sym_patch.shape[2] - 1, -1, -1)
    sym_patch_inv = paddle.index_select(sym_patch, inv_idx, 2)
    out_1_aug[:, :, 0:sym_len_Ws] = sym_patch_inv

    sym_patch = out_1[:, :, -sym_len_We:]
    inv_idx = paddle.arange(sym_patch.shape[2] - 1, -1, -1)
    sym_patch_inv = paddle.index_select(sym_patch, inv_idx, 2)
    out_1_aug[:, :,
              sym_len_Ws + in_W:sym_len_Ws + in_W + sym_len_We] = sym_patch_inv

    out_2 = paddle.zeros([in_C, out_H, out_W])
    kernel_width = weights_W.shape[1]
    for i in range(out_W):
        idx = int(indices_W[i][0])
        out_2[0, :, i] = out_1_aug[0, :,
                                   idx:idx + kernel_width].mv(weights_W[i])
        out_2[1, :, i] = out_1_aug[1, :,
                                   idx:idx + kernel_width].mv(weights_W[i])
        out_2[2, :, i] = out_1_aug[2, :,
                                   idx:idx + kernel_width].mv(weights_W[i])

    return paddle.clip(out_2, 0, 1)
コード例 #7
0
 def forward(self, inputs, _ver):
     """
     forward
     """
     x = paddle.mv(inputs, _ver)
     return x
コード例 #8
0
 def forward(self, x, y):
     """
     forward
     """
     x = paddle.mv(x, y)
     return x