def recv(self, reduce_func, msg, recv_mode="dst"): """Recv message and aggregate the message by reduce_func The UDF reduce_func function should has the following format. .. code-block:: python def reduce_func(msg): ''' Args: msg: A LodTensor or a dictionary of LodTensor whose batch_size is equals to the number of unique dst nodes. Return: It should return a tensor with shape (batch_size, out_dims). The batch size should be the same as msg. ''' pass Args: msg: A tensor or a dictionary of tensor created by send function.. reduce_func: A callable UDF reduce function. Return: A tensor with shape (num_nodes, out_dims). The output for nodes with no message will be zeros. """ if not self._is_tensor: raise ValueError("You must call Graph.tensor()") if not isinstance(msg, dict): raise TypeError( "The input of msg should be a dict, but receives a %s" % (type(msg))) if not callable(reduce_func): raise TypeError("reduce_func should be callable") src, dst, eid = self.sorted_edges(sort_by=recv_mode) msg = op.RowReader(msg, eid) if recv_mode == "dst": uniq_ind, segment_ids = paddle.unique(dst, return_inverse=True) elif recv_mode == "src": uniq_ind, segment_ids = paddle.unique(src, return_inverse=True) bucketed_msg = Message(msg, segment_ids) output = reduce_func(bucketed_msg) output_dim = output.shape[-1] init_output = paddle.zeros(shape=[self._num_nodes, output_dim], dtype=output.dtype) final_output = scatter(init_output, uniq_ind, output) return final_output
def forward(self, input_ids, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_output=None, use_cache=False, cache=None): output = self.bart(input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, encoder_output, use_cache, cache) if use_cache: output = output[0] eos_mask = paddle.cast( input_ids == self.bart.config['eos_token_id'], dtype='int64') if len(paddle.unique(paddle.sum(eos_mask, axis=1))) > 1: raise ValueError( 'All examples must have the same number of <eos> tokens.') output_shape = paddle.shape(output) # TODO(gongenlei): support bool tensor index output = output.masked_select( eos_mask.unsqueeze(-1).astype('bool').tile( [1, 1, output_shape[-1]])) sentence_representation = output.reshape( [output_shape[0], -1, output_shape[-1]])[:, -1, :] logits = self.classifier(sentence_representation) return logits
def test_sample_result_fisher_yates_sampling(self): paddle.disable_static() if fluid.core.is_compiled_with_cuda(): row = paddle.to_tensor(self.row) colptr = paddle.to_tensor(self.colptr) nodes = paddle.to_tensor(self.nodes) perm_buffer = paddle.to_tensor(self.edges_id) out_neighbors, out_count = paddle.incubate.graph_sample_neighbors( row, colptr, nodes, perm_buffer=perm_buffer, sample_size=self.sample_size, flag_perm_buffer=True) out_count_cumsum = paddle.cumsum(out_count) for i in range(len(out_count)): if i == 0: neighbors = out_neighbors[0:out_count_cumsum[i]] else: neighbors = out_neighbors[out_count_cumsum[i - 1]: out_count_cumsum[i]] # Ensure the correct sample size. self.assertTrue( out_count[i] == self.sample_size or out_count[i] == len(self.dst_src_dict[self.nodes[i]])) # Ensure no repetitive sample neighbors. self.assertTrue( neighbors.shape[0] == paddle.unique(neighbors).shape[0]) # Ensure the correct sample neighbors. in_neighbors = np.isin(neighbors.numpy(), self.dst_src_dict[self.nodes[i]]) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0])
def forward(self, inputs): """ forward """ x = paddle.unique(inputs, axis=self.axis) return x
def test_x_dtype(): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): x = paddle.fluid.data(name='x', shape=[10, 10], dtype='float16') result = paddle.unique(x) self.assertRaises(TypeError, test_x_dtype)
def func_sample_result(self): paddle.disable_static() row = paddle.to_tensor(self.row) colptr = paddle.to_tensor(self.colptr) nodes = paddle.to_tensor(self.nodes) edge_src, edge_dst, sample_index, reindex_nodes = \ paddle.incubate.graph_khop_sampler(row, colptr, nodes, self.sample_sizes, return_eids=False) # Reindex edge_src and edge_dst to original index. edge_src = edge_src.reshape([-1]) edge_dst = edge_dst.reshape([-1]) sample_index = sample_index.reshape([-1]) for i in range(len(edge_src)): edge_src[i] = sample_index[edge_src[i]] edge_dst[i] = sample_index[edge_dst[i]] for n in self.nodes: edge_src_n = edge_src[edge_dst == n] if edge_src_n.shape[0] == 0: continue # Ensure no repetitive sample neighbors. self.assertTrue( edge_src_n.shape[0] == paddle.unique(edge_src_n).shape[0]) # Ensure the correct sample size. self.assertTrue( edge_src_n.shape[0] == self.sample_sizes[0] or edge_src_n.shape[0] == len(self.dst_src_dict[n])) in_neighbors = np.isin(edge_src_n.numpy(), self.dst_src_dict[n]) # Ensure the correct sample neighbors. self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0])
def test_sample_result(self): paddle.disable_static() row = paddle.to_tensor(self.row) colptr = paddle.to_tensor(self.colptr) nodes = paddle.to_tensor(self.nodes) out_neighbors, out_count = paddle.incubate.graph_sample_neighbors( row, colptr, nodes, sample_size=self.sample_size) out_count_cumsum = paddle.cumsum(out_count) for i in range(len(out_count)): if i == 0: neighbors = out_neighbors[0:out_count_cumsum[i]] else: neighbors = out_neighbors[out_count_cumsum[i - 1]: out_count_cumsum[i]] # Ensure the correct sample size. self.assertTrue( out_count[i] == self.sample_size or out_count[i] == len(self.dst_src_dict[self.nodes[i]])) # Ensure no repetitive sample neighbors. self.assertTrue( neighbors.shape[0] == paddle.unique(neighbors).shape[0]) # Ensure the correct sample neighbors. in_neighbors = np.isin(neighbors.numpy(), self.dst_src_dict[self.nodes[i]]) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0])
def test_dygraph_api_out(self): paddle.disable_static() x_data = x_data = np.random.randint(0, 10, (120)) x = paddle.to_tensor(x_data) out = paddle.unique(x) expected_out = np.unique(x_data) self.assertTrue((out.numpy() == expected_out).all(), True) paddle.enable_static()
def nearest_neighbor_features_per_object(reference_embeddings, query_embeddings, reference_labels, k_nearest_neighbors, gt_ids=None, n_chunks=100, **cfg): """Calculates the distance to the nearest neighbor per object. For every pixel of query_embeddings calculate the distance to the nearest neighbor in the (possibly subsampled) reference_embeddings per object. Args: reference_embeddings: Tensor of shape [height, width, embedding_dim], the embedding vectors for the reference frame. query_embeddings: Tensor of shape [n_query_images, height, width, embedding_dim], the embedding vectors for the query frames. reference_labels: Tensor of shape [height, width, 1], the class labels of the reference frame. max_neighbors_per_object: Integer, the maximum number of candidates for the nearest neighbor query per object after subsampling, or 0 for no subsampling. k_nearest_neighbors: Integer, the number of nearest neighbors to use. gt_ids: Int tensor of shape [n_objs] of the sorted unique ground truth ids in the first frame. If None, it will be derived from reference_labels. n_chunks: Integer, the number of chunks to use to save memory (set to 1 for no chunking). Returns: nn_features: A float32 tensor of nearest neighbor features of shape [n_query_images, height, width, n_objects, feature_dim]. gt_ids: An int32 tensor of the unique sorted object ids present in the reference labels. """ # reference_embeddings = reference_embeddings.detach().cpu() # query_embeddings = query_embeddings.detach().cpu() # reference_labels = reference_labels.detach().cpu() assert (reference_embeddings.shape[:2] == reference_labels.shape[:2]) h, w, _ = query_embeddings.shape reference_labels_flat = reference_labels.reshape([-1]) if gt_ids is None: ref_obj_ids = paddle.unique(reference_labels_flat)[-1] ref_obj_ids = np.arange(0, ref_obj_ids + 1) gt_ids = paddle.to_tensor(ref_obj_ids) gt_ids = int_(gt_ids) else: gt_ids = int_(paddle.arange(0, gt_ids + 1)) embedding_dim = query_embeddings.shape[-1] query_embeddings_flat = query_embeddings.reshape([-1, embedding_dim]) reference_embeddings_flat = reference_embeddings.reshape( [-1, embedding_dim]) nn_features = _nearest_neighbor_features_per_object_in_chunks( reference_embeddings_flat, query_embeddings_flat, reference_labels_flat, gt_ids, k_nearest_neighbors, n_chunks, **cfg) nn_features_dim = nn_features.shape[-1] nn_features = nn_features.reshape( [1, h, w, gt_ids.shape[0], nn_features_dim]) return nn_features.cuda(), gt_ids
def forward(self, inputs): """ forward """ x, y, z, w = paddle.unique(inputs, axis=self.axis, return_index=True, return_inverse=True, return_counts=True) return x + y + z + w
def forward(self, inputs): """ forward """ x, y = paddle.unique(inputs, axis=self.axis, return_index=self.return_index, return_inverse=self.return_inverse, return_counts=self.return_counts) return x + y
def forward(self, input): """ forward """ x = paddle.unique(input, return_index=self.config['return_index'], return_inverse=self.config['return_inverse'], return_counts=self.config['return_counts'], axis=self.config['axis'], dtype=self.config['dtype']) return x
def sample(self, labels): n_sample = self.n_sample n_tries = 2 * n_sample batch_size = labels.shape[0] with paddle.no_grad(): neg_samples = paddle.unique( paddle.multinomial(self.dist, n_tries, replacement=True)) true_log_probs = paddle.gather(self.log_q, labels.flatten()) true_log_probs = paddle.reshape(true_log_probs, shape=[batch_size, -1]) samp_log_probs = paddle.gather(self.log_q, neg_samples) return true_log_probs, samp_log_probs, neg_samples
def test_static_graph(self): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): x = paddle.fluid.data(name='x', shape=[3, 2], dtype='float64') unique, inverse, counts = paddle.unique( x, return_inverse=True, return_counts=True, axis=0) place = paddle.CPUPlace() exe = paddle.static.Executor(place) x_np = np.array([[1, 2], [3, 4], [1, 2]]).astype('float64') result = exe.run(feed={"x": x_np}, fetch_list=[unique, inverse, counts]) np_unique, np_inverse, np_counts = np.unique( x_np, return_inverse=True, return_counts=True, axis=0) self.assertTrue(np.allclose(result[0], np_unique)) self.assertTrue(np.allclose(result[1], np_inverse)) self.assertTrue(np.allclose(result[2], np_counts))
def test_dygraph_attr_dtype(self): paddle.disable_static() x_data = x_data = np.random.randint(0, 10, (120)) x = paddle.to_tensor(x_data) out, indices, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True, dtype="int32") expected_out, np_indices, np_inverse, np_counts = np.unique( x_data, return_index=True, return_inverse=True, return_counts=True) self.assertTrue((out.numpy() == expected_out).all(), True) self.assertTrue((indices.numpy() == np_indices).all(), True) self.assertTrue((inverse.numpy() == np_inverse).all(), True) self.assertTrue((counts.numpy() == np_counts).all(), True) paddle.enable_static()
def func_uva_sample_result(self): paddle.disable_static() if paddle.fluid.core.is_compiled_with_cuda(): row = None if fluid.framework.in_dygraph_mode(): row = paddle.fluid.core.eager.to_uva_tensor( self.row.astype(self.row.dtype), 0) sorted_eid = paddle.fluid.core.eager.to_uva_tensor( self.sorted_eid.astype(self.sorted_eid.dtype), 0) else: row = paddle.fluid.core.to_uva_tensor( self.row.astype(self.row.dtype)) sorted_eid = paddle.fluid.core.to_uva_tensor( self.sorted_eid.astype(self.sorted_eid.dtype)) colptr = paddle.to_tensor(self.colptr) nodes = paddle.to_tensor(self.nodes) edge_src, edge_dst, sample_index, reindex_nodes, edge_eids = \ paddle.incubate.graph_khop_sampler(row, colptr, nodes, self.sample_sizes, sorted_eids=sorted_eid, return_eids=True) edge_src = edge_src.reshape([-1]) edge_dst = edge_dst.reshape([-1]) sample_index = sample_index.reshape([-1]) for i in range(len(edge_src)): edge_src[i] = sample_index[edge_src[i]] edge_dst[i] = sample_index[edge_dst[i]] for n in self.nodes: edge_src_n = edge_src[edge_dst == n] if edge_src_n.shape[0] == 0: continue self.assertTrue( edge_src_n.shape[0] == paddle.unique(edge_src_n).shape[0]) self.assertTrue( edge_src_n.shape[0] == self.sample_sizes[0] or edge_src_n.shape[0] == len(self.dst_src_dict[n])) in_neighbors = np.isin(edge_src_n.numpy(), self.dst_src_dict[n]) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0])
def test_dygraph_api_attr(self): paddle.disable_static() x_data = np.random.random((3, 5, 5)).astype("float32") x = paddle.to_tensor(x_data) out, index, inverse, counts = paddle.unique(x, return_index=True, return_inverse=True, return_counts=True, axis=0) np_out, np_index, np_inverse, np_counts = np.unique( x_data, return_index=True, return_inverse=True, return_counts=True, axis=0) self.assertTrue((out.numpy() == np_out).all(), True) self.assertTrue((index.numpy() == np_index).all(), True) self.assertTrue((inverse.numpy() == np_inverse).all(), True) self.assertTrue((counts.numpy() == np_counts).all(), True) paddle.enable_static()
def apply_single(self, pred, tagmap): if tagmap.numpy()[:, :, 3].sum() == 0: return (paddle.zeros([1]), paddle.zeros([1])) nonzero = paddle.nonzero(tagmap[:, :, 3] > 0) if nonzero.shape[0] == 0: return (paddle.zeros([1]), paddle.zeros([1])) p_inds = paddle.unique(nonzero[:, 0]) num_person = p_inds.shape[0] if num_person == 0: return (paddle.zeros([1]), paddle.zeros([1])) pull = 0 tagpull_num = 0 embs_all = [] person_unvalid = 0 for person_idx in p_inds.numpy(): valid_single = tagmap[person_idx.item()] validkpts = paddle.nonzero(valid_single[:, 3] > 0) valid_single = paddle.index_select(valid_single, validkpts) emb = paddle.gather_nd(pred, valid_single[:, :3]) if emb.shape[0] == 1: person_unvalid += 1 mean = paddle.mean(emb, axis=0) embs_all.append(mean) pull += paddle.mean(paddle.pow(emb - mean, 2), axis=0) tagpull_num += emb.shape[0] pull /= max(num_person - person_unvalid, 1) if num_person < 2: return pull, paddle.zeros([1]) embs_all = paddle.stack(embs_all) A = embs_all.expand([num_person, num_person]) B = A.transpose([1, 0]) diff = A - B diff = paddle.pow(diff, 2) push = paddle.exp(-diff) push = paddle.sum(push) - num_person push /= 2 * num_person * (num_person - 1) return pull, push
def test_tensor_patch_method(self): paddle.disable_static() x_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) y_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) z_np = np.random.uniform(-1, 1, [6, 9]).astype(self.dtype) x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) z = paddle.to_tensor(z_np) a = paddle.to_tensor([[1, 1], [2, 2], [3, 3]]) b = paddle.to_tensor([[1, 1], [2, 2], [3, 3]]) # 1. Unary operation for Tensor self.assertEqual(x.dim(), 2) self.assertEqual(x.ndimension(), 2) self.assertEqual(x.ndim, 2) self.assertEqual(x.size(), [2, 3]) self.assertTrue( np.array_equal(x.sigmoid().numpy(), fluid.layers.sigmoid(x).numpy())) self.assertTrue( np.array_equal(x.logsigmoid().numpy(), fluid.layers.logsigmoid(x).numpy())) self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy())) self.assertTrue( np.array_equal(x.tanh().numpy(), paddle.tanh(x).numpy())) self.assertTrue( np.array_equal(x.atan().numpy(), paddle.atan(x).numpy())) self.assertTrue( np.array_equal(x.tanh_shrink().numpy(), fluid.layers.tanh_shrink(x).numpy())) self.assertTrue(np.array_equal(x.abs().numpy(), paddle.abs(x).numpy())) m = x.abs() self.assertTrue( np.array_equal(m.sqrt().numpy(), paddle.sqrt(m).numpy())) self.assertTrue( np.array_equal(m.rsqrt().numpy(), paddle.rsqrt(m).numpy())) self.assertTrue( np.array_equal(x.ceil().numpy(), paddle.ceil(x).numpy())) self.assertTrue( np.array_equal(x.floor().numpy(), paddle.floor(x).numpy())) self.assertTrue(np.array_equal(x.cos().numpy(), paddle.cos(x).numpy())) self.assertTrue( np.array_equal(x.acos().numpy(), paddle.acos(x).numpy())) self.assertTrue( np.array_equal(x.asin().numpy(), paddle.asin(x).numpy())) self.assertTrue(np.array_equal(x.sin().numpy(), paddle.sin(x).numpy())) self.assertTrue( np.array_equal(x.sinh().numpy(), paddle.sinh(x).numpy())) self.assertTrue( np.array_equal(x.cosh().numpy(), paddle.cosh(x).numpy())) self.assertTrue( np.array_equal(x.round().numpy(), paddle.round(x).numpy())) self.assertTrue( np.array_equal(x.reciprocal().numpy(), paddle.reciprocal(x).numpy())) self.assertTrue( np.array_equal(x.square().numpy(), paddle.square(x).numpy())) self.assertTrue( np.array_equal(x.softplus().numpy(), fluid.layers.softplus(x).numpy())) self.assertTrue( np.array_equal(x.softsign().numpy(), fluid.layers.softsign(x).numpy())) self.assertTrue( np.array_equal(x.rank().numpy(), paddle.rank(x).numpy())) self.assertTrue( np.array_equal(x[0].t().numpy(), paddle.t(x[0]).numpy())) m = paddle.to_tensor(np.random.uniform(1, 2, [3, 3]), 'float32') m = m.matmul(m.t()) self.assertTrue( np.array_equal(m.cholesky().numpy(), paddle.cholesky(m).numpy())) self.assertTrue( np.array_equal(x.is_empty().numpy(), paddle.is_empty(x).numpy())) self.assertTrue( np.array_equal(x.isfinite().numpy(), paddle.isfinite(x).numpy())) self.assertTrue( np.array_equal( x.cast('int32').numpy(), paddle.cast(x, 'int32').numpy())) self.assertTrue( np.array_equal( x.expand([3, 2, 3]).numpy(), paddle.expand(x, [3, 2, 3]).numpy())) self.assertTrue( np.array_equal( x.tile([2, 2]).numpy(), paddle.tile(x, [2, 2]).numpy())) self.assertTrue( np.array_equal(x.flatten().numpy(), paddle.flatten(x).numpy())) index = paddle.to_tensor([0, 1]) self.assertTrue( np.array_equal( x.gather(index).numpy(), paddle.gather(x, index).numpy())) index = paddle.to_tensor([[0, 1], [1, 2]]) self.assertTrue( np.array_equal( x.gather_nd(index).numpy(), paddle.gather_nd(x, index).numpy())) self.assertTrue( np.array_equal( x.reverse([0, 1]).numpy(), paddle.reverse(x, [0, 1]).numpy())) self.assertTrue( np.array_equal( a.reshape([3, 2]).numpy(), paddle.reshape(a, [3, 2]).numpy())) self.assertTrue( np.array_equal( x.slice([0, 1], [0, 0], [1, 2]).numpy(), paddle.slice(x, [0, 1], [0, 0], [1, 2]).numpy())) self.assertTrue( np.array_equal( x.split(2)[0].numpy(), paddle.split(x, 2)[0].numpy())) m = paddle.to_tensor( np.random.uniform(-1, 1, [1, 6, 1, 1]).astype(self.dtype)) self.assertTrue( np.array_equal( m.squeeze([]).numpy(), paddle.squeeze(m, []).numpy())) self.assertTrue( np.array_equal( m.squeeze([1, 2]).numpy(), paddle.squeeze(m, [1, 2]).numpy())) m = paddle.to_tensor([2, 3, 3, 1, 5, 3], 'float32') self.assertTrue( np.array_equal(m.unique()[0].numpy(), paddle.unique(m)[0].numpy())) self.assertTrue( np.array_equal(m.unique_with_counts()[2], paddle.unique_with_counts(m)[2])) self.assertTrue(np.array_equal(x.flip([0]), paddle.flip(x, [0]))) self.assertTrue(np.array_equal(x.unbind(0), paddle.unbind(x, 0))) self.assertTrue(np.array_equal(x.roll(1), paddle.roll(x, 1))) self.assertTrue(np.array_equal(x.cumsum(1), paddle.cumsum(x, 1))) m = paddle.to_tensor(1) self.assertTrue(np.array_equal(m.increment(), paddle.increment(m))) m = x.abs() self.assertTrue(np.array_equal(m.log(), paddle.log(m))) self.assertTrue(np.array_equal(x.pow(2), paddle.pow(x, 2))) self.assertTrue(np.array_equal(x.reciprocal(), paddle.reciprocal(x))) # 2. Binary operation self.assertTrue( np.array_equal( x.matmul(y, True, False).numpy(), paddle.matmul(x, y, True, False).numpy())) self.assertTrue( np.array_equal( x.norm(p='fro', axis=[0, 1]).numpy(), paddle.norm(x, p='fro', axis=[0, 1]).numpy())) self.assertTrue( np.array_equal(x.dist(y).numpy(), paddle.dist(x, y).numpy())) self.assertTrue( np.array_equal(x.cross(y).numpy(), paddle.cross(x, y).numpy())) m = x.expand([2, 2, 3]) n = y.expand([2, 2, 3]).transpose([0, 2, 1]) self.assertTrue( np.array_equal(m.bmm(n).numpy(), paddle.bmm(m, n).numpy())) self.assertTrue( np.array_equal( x.histogram(5, -1, 1).numpy(), paddle.histogram(x, 5, -1, 1).numpy())) self.assertTrue( np.array_equal(x.equal(y).numpy(), paddle.equal(x, y).numpy())) self.assertTrue( np.array_equal( x.greater_equal(y).numpy(), paddle.greater_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.greater_than(y).numpy(), paddle.greater_than(x, y).numpy())) self.assertTrue( np.array_equal( x.less_equal(y).numpy(), paddle.less_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.less_than(y).numpy(), paddle.less_than(x, y).numpy())) self.assertTrue( np.array_equal( x.not_equal(y).numpy(), paddle.not_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.equal_all(y).numpy(), paddle.equal_all(x, y).numpy())) self.assertTrue( np.array_equal( x.allclose(y).numpy(), paddle.allclose(x, y).numpy())) m = x.expand([2, 2, 3]) self.assertTrue( np.array_equal( x.expand_as(m).numpy(), paddle.expand_as(x, m).numpy())) index = paddle.to_tensor([2, 1, 0]) self.assertTrue( np.array_equal( a.scatter(index, b).numpy(), paddle.scatter(a, index, b).numpy())) # 3. Bool tensor operation x = paddle.to_tensor([[True, False], [True, False]]) y = paddle.to_tensor([[False, False], [False, True]]) self.assertTrue( np.array_equal(x.reduce_all().numpy(), paddle.reduce_all(x).numpy())) self.assertTrue( np.array_equal(x.reduce_any().numpy(), paddle.reduce_any(x).numpy())) self.assertTrue( np.array_equal( x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_not(y).numpy(), paddle.logical_not(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_or(y).numpy(), paddle.logical_or(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_xor(y).numpy(), paddle.logical_xor(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy()))
def test_return_inverse(): result = paddle.unique(x, return_inverse='s')
def test_axis(): result = paddle.unique(x, axis='12')
def forward_single(self, emb, instance, kernel, training_mask, bboxes): training_mask = (training_mask > 0.5).long() kernel = (kernel > 0.5).long() instance = instance * training_mask instance_kernel = paddle.reshape((instance * kernel),(-1)) instance = paddle.reshape(instance,(-1)) emb = paddle.reshape(emb,(self.feature_dim, -1)) unique_labels, unique_ids = paddle.unique(instance_kernel, return_inverse=True) num_instance = unique_labels.size(0) if num_instance <= 1: return 0 emb_mean = paddle.zeros((self.feature_dim, num_instance), dtype='float32') for i, lb in enumerate(unique_labels): if lb == 0: continue ind_k = instance_kernel == lb emb_mean[:, i] = paddle.mean(emb[:, ind_k], axis=1) l_agg = paddle.zeros(num_instance, dtype='float32') for i, lb in enumerate(unique_labels): if lb == 0: continue ind = instance == lb emb_ = emb[:, ind] dist = (emb_ - emb_mean[:, i:i + 1]).norm(p=2, dim=0) dist = F.relu(dist - self.delta_v) ** 2 l_agg[i] = paddle.mean(paddle.log(dist + 1.0)) l_agg = paddle.mean(l_agg[1:]) if num_instance > 2: emb_trans = paddle.transpose(emb_mean, perm=[1, 0]) emb_interleave = paddle.tile(emb_trans, repeat_times=[num_instance, 1]) emb_trans = paddle.transpose(emb_mean, perm=[1, 0]) emb_tile = paddle.tile(emb_trans, repeat_times=[num_instance, 1]) emb_band = paddle.reshape(emb_tile,(-1, self.feature_dim)) # print(seg_band) mask = (1 - paddle.eye(num_instance, dtype=np.int8)) mask = paddle.reshape(mask,(-1,1)) mask = paddle.tile(mask, repeat_times=[1, self.feature_dim]) mask = paddle.reshape(mask,(num_instance, num_instance, -1)) mask[0, :, :] = 0 mask[:, 0, :] = 0 mask = paddle.reshape(mask, (num_instance * num_instance, -1)) # print(mask) dist = emb_interleave - emb_band # dist = dist[mask > 0].view(-1, self.feature_dim).norm(p=2, dim=1) dist = paddle.reshape(dist[mask > 0], (-1, self.feature_dim)).norm(p=2, axis=1) dist = F.relu(2 * self.delta_d - dist) ** 2 l_dis = paddle.mean(paddle.log(dist + 1.0)) else: l_dis = 0 l_agg = self.weights[0] * l_agg l_dis = self.weights[1] * l_dis l_reg = paddle.mean(paddle.log(paddle.norm(emb_mean, 2, 0) + 1.0)) * 0.001 loss = l_agg + l_dis + l_reg return loss
def forward(self, points_xyz, center_xyz, features=None): """forward. Args: points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. center_xyz (Tensor): (B, npoint, 3) Centriods. features (Tensor): (B, C, N) Descriptors of the features. Return: Tensor: (B, 3 + C, npoint, sample_num) Grouped feature. """ # if self.max_radius is None, we will perform kNN instead of ball query # idx is of shape [B, npoint, sample_num] if self.max_radius is None: idx = knn(self.sample_num, points_xyz, center_xyz, False) idx = idx.transpose((1, 2)) else: idx = ball_query(self.min_radius, self.max_radius, self.sample_num, points_xyz, center_xyz) if self.uniform_sample: unique_cnt = paddle.zeros((idx.shape[0], idx.shape[1])) for i_batch in range(idx.shape[0]): for i_region in range(idx.shape[1]): unique_ind = paddle.unique(idx[i_batch, i_region, :]) num_unique = unique_ind.shape[0] unique_cnt[i_batch, i_region] = num_unique sample_ind = paddle.randint( 0, num_unique, (self.sample_num - num_unique, ), dtype=paddle.int64) all_ind = paddle.concat( (unique_ind, unique_ind[sample_ind])) idx[i_batch, i_region, :] = all_ind xyz_trans = points_xyz.transpose((1, 2)) # (B, 3, npoint, sample_num) grouped_xyz = grouping_operation(xyz_trans, idx) grouped_xyz_diff = grouped_xyz - \ center_xyz.transpose(1, 2).unsqueeze(-1) # relative offsets if self.normalize_xyz: grouped_xyz_diff /= self.max_radius if features is not None: grouped_features = grouping_operation(features, idx) if self.use_xyz: # (B, C + 3, npoint, sample_num) new_features = paddle.concat( [grouped_xyz_diff, grouped_features], axis=1) else: new_features = grouped_features else: assert (self.use_xyz ), 'Cannot have not features and not use xyz as a feature!' new_features = grouped_xyz_diff ret = [new_features] if self.return_grouped_xyz: ret.append(grouped_xyz) if self.return_unique_cnt: ret.append(unique_cnt) if self.return_grouped_idx: ret.append(idx) if len(ret) == 1: return ret[0] else: return tuple(ret)
def test_step(self, weights, parallel=True, is_save_image=True, **cfg): # 1. Construct model. cfg['MODEL'].head.pretrained = '' cfg['MODEL'].head.test_mode = True model = build_model(cfg['MODEL']) if parallel: model = paddle.DataParallel(model) # 2. Construct data. sequence = cfg["video_path"].split('/')[-1].split('.')[0] obj_nums = 1 images, _ = load_video(cfg["video_path"], 480) print("stage1 load_video success") # [195, 389, 238, 47, 244, 374, 175, 399] # .shape: (502, 480, 600, 3) report_save_dir = cfg.get("output_dir", f"./output/{cfg['model_name']}") if not os.path.exists(report_save_dir): os.makedirs(report_save_dir) # Configuration used in the challenges max_nb_interactions = 8 # Maximum number of interactions # Interactive parameters model.eval() state_dicts_ = load(weights)['state_dict'] state_dicts = {} for k, v in state_dicts_.items(): if 'num_batches_tracked' not in k: state_dicts['head.' + k] = v if ('head.' + k) not in model.state_dict().keys(): print(f'pretrained -----{k} -------is not in model') write_dict(state_dicts, 'model_for_infer.txt', **cfg) model.set_state_dict(state_dicts) inter_file = open( os.path.join( cfg.get("output_dir", f"./output/{cfg['model_name']}"), 'inter_file.txt'), 'w') seen_seq = False with paddle.no_grad(): # Get the current iteration scribbles for scribbles, first_scribble in get_scribbles(): t_total = timeit.default_timer() f, h, w = images.shape[:3] if 'prev_label_storage' not in locals().keys(): prev_label_storage = paddle.zeros([f, h, w]) if len(annotated_frames(scribbles)) == 0: final_masks = prev_label_storage # ToDo To AP-kai: save_path传过来了 submit_masks(cfg["save_path"], final_masks.numpy(), images) continue # if no scribbles return, keep masks in previous round start_annotated_frame = annotated_frames(scribbles)[0] pred_masks = [] pred_masks_reverse = [] if first_scribble: # If in the first round, initialize memories n_interaction = 1 eval_global_map_tmp_dic = {} local_map_dics = ({}, {}) total_frame_num = f else: n_interaction += 1 inter_file.write(sequence + ' ' + 'interaction' + str(n_interaction) + ' ' + 'frame' + str(start_annotated_frame) + '\n') if first_scribble: # if in the first round, extract pixel embbedings. if not seen_seq: seen_seq = True inter_turn = 1 embedding_memory = [] places = paddle.set_device('cpu') for imgs in images: if cfg['PIPELINE'].get('test'): imgs = paddle.to_tensor([ build_pipeline(cfg['PIPELINE'].test)({ 'img1': imgs })['img1'] ]) else: imgs = paddle.to_tensor([imgs]) if parallel: for c in model.children(): frame_embedding = c.head.extract_feature( imgs) else: frame_embedding = model.head.extract_feature( imgs) embedding_memory.append(frame_embedding) del frame_embedding embedding_memory = paddle.concat(embedding_memory, 0) _, _, emb_h, emb_w = embedding_memory.shape ref_frame_embedding = embedding_memory[ start_annotated_frame] ref_frame_embedding = ref_frame_embedding.unsqueeze(0) else: inter_turn += 1 ref_frame_embedding = embedding_memory[ start_annotated_frame] ref_frame_embedding = ref_frame_embedding.unsqueeze(0) else: ref_frame_embedding = embedding_memory[ start_annotated_frame] ref_frame_embedding = ref_frame_embedding.unsqueeze(0) ######## scribble_masks = scribbles2mask(scribbles, (emb_h, emb_w)) scribble_label = scribble_masks[start_annotated_frame] scribble_sample = {'scribble_label': scribble_label} scribble_sample = ToTensor_manet()(scribble_sample) # print(ref_frame_embedding, ref_frame_embedding.shape) scribble_label = scribble_sample['scribble_label'] scribble_label = scribble_label.unsqueeze(0) model_name = cfg['model_name'] output_dir = cfg.get("output_dir", f"./output/{model_name}") inter_file_path = os.path.join( output_dir, sequence, 'interactive' + str(n_interaction), 'turn' + str(inter_turn)) if is_save_image: ref_scribble_to_show = scribble_label.squeeze().numpy() im_ = Image.fromarray( ref_scribble_to_show.astype('uint8')).convert('P', ) im_.putpalette(_palette) ref_img_name = str(start_annotated_frame) if not os.path.exists(inter_file_path): os.makedirs(inter_file_path) im_.save( os.path.join(inter_file_path, 'inter_' + ref_img_name + '.png')) if first_scribble: prev_label = None prev_label_storage = paddle.zeros([f, h, w]) else: prev_label = prev_label_storage[start_annotated_frame] prev_label = prev_label.unsqueeze(0).unsqueeze(0) # check if no scribbles. if not first_scribble and paddle.unique( scribble_label).shape[0] == 1: print( 'not first_scribble and paddle.unique(scribble_label).shape[0] == 1' ) print(paddle.unique(scribble_label)) final_masks = prev_label_storage submit_masks(cfg["save_path"], final_masks.numpy(), images) continue ###inteaction segmentation head if parallel: for c in model.children(): tmp_dic, local_map_dics = c.head.int_seghead( ref_frame_embedding=ref_frame_embedding, ref_scribble_label=scribble_label, prev_round_label=prev_label, global_map_tmp_dic=eval_global_map_tmp_dic, local_map_dics=local_map_dics, interaction_num=n_interaction, seq_names=[sequence], gt_ids=paddle.to_tensor([obj_nums]), frame_num=[start_annotated_frame], first_inter=first_scribble) else: tmp_dic, local_map_dics = model.head.int_seghead( ref_frame_embedding=ref_frame_embedding, ref_scribble_label=scribble_label, prev_round_label=prev_label, global_map_tmp_dic=eval_global_map_tmp_dic, local_map_dics=local_map_dics, interaction_num=n_interaction, seq_names=[sequence], gt_ids=paddle.to_tensor([obj_nums]), frame_num=[start_annotated_frame], first_inter=first_scribble) pred_label = tmp_dic[sequence] pred_label = nn.functional.interpolate(pred_label, size=(h, w), mode='bilinear', align_corners=True) pred_label = paddle.argmax(pred_label, axis=1) pred_masks.append(float_(pred_label)) # np.unique(pred_label) # array([0], dtype=int64) prev_label_storage[start_annotated_frame] = float_( pred_label[0]) if is_save_image: # save image pred_label_to_save = pred_label.squeeze(0).numpy() im = Image.fromarray( pred_label_to_save.astype('uint8')).convert('P', ) im.putpalette(_palette) imgname = str(start_annotated_frame) while len(imgname) < 5: imgname = '0' + imgname if not os.path.exists(inter_file_path): os.makedirs(inter_file_path) im.save(os.path.join(inter_file_path, imgname + '.png')) ####################################### if first_scribble: scribble_label = rough_ROI(scribble_label) ############################## ref_prev_label = pred_label.unsqueeze(0) prev_label = pred_label.unsqueeze(0) prev_embedding = ref_frame_embedding for ii in range(start_annotated_frame + 1, total_frame_num): current_embedding = embedding_memory[ii] current_embedding = current_embedding.unsqueeze(0) prev_label = prev_label if parallel: for c in model.children(): tmp_dic, eval_global_map_tmp_dic, local_map_dics = c.head.prop_seghead( ref_frame_embedding, prev_embedding, current_embedding, scribble_label, prev_label, normalize_nearest_neighbor_distances=True, use_local_map=True, seq_names=[sequence], gt_ids=paddle.to_tensor([obj_nums]), k_nearest_neighbors=cfg['knns'], global_map_tmp_dic=eval_global_map_tmp_dic, local_map_dics=local_map_dics, interaction_num=n_interaction, start_annotated_frame=start_annotated_frame, frame_num=[ii], dynamic_seghead=c.head.dynamic_seghead) else: tmp_dic, eval_global_map_tmp_dic, local_map_dics = model.head.prop_seghead( ref_frame_embedding, prev_embedding, current_embedding, scribble_label, prev_label, normalize_nearest_neighbor_distances=True, use_local_map=True, seq_names=[sequence], gt_ids=paddle.to_tensor([obj_nums]), k_nearest_neighbors=cfg['knns'], global_map_tmp_dic=eval_global_map_tmp_dic, local_map_dics=local_map_dics, interaction_num=n_interaction, start_annotated_frame=start_annotated_frame, frame_num=[ii], dynamic_seghead=model.head.dynamic_seghead) pred_label = tmp_dic[sequence] pred_label = nn.functional.interpolate(pred_label, size=(h, w), mode='bilinear', align_corners=True) pred_label = paddle.argmax(pred_label, axis=1) pred_masks.append(float_(pred_label)) prev_label = pred_label.unsqueeze(0) prev_embedding = current_embedding prev_label_storage[ii] = float_(pred_label[0]) if is_save_image: pred_label_to_save = pred_label.squeeze(0).numpy() im = Image.fromarray( pred_label_to_save.astype('uint8')).convert('P', ) im.putpalette(_palette) imgname = str(ii) while len(imgname) < 5: imgname = '0' + imgname if not os.path.exists(inter_file_path): os.makedirs(inter_file_path) im.save(os.path.join(inter_file_path, imgname + '.png')) ####################################### prev_label = ref_prev_label prev_embedding = ref_frame_embedding ####### # Propagation <- for ii in range(start_annotated_frame): current_frame_num = start_annotated_frame - 1 - ii current_embedding = embedding_memory[current_frame_num] current_embedding = current_embedding.unsqueeze(0) prev_label = prev_label if parallel: for c in model.children(): tmp_dic, eval_global_map_tmp_dic, local_map_dics = c.head.prop_seghead( ref_frame_embedding, prev_embedding, current_embedding, scribble_label, prev_label, normalize_nearest_neighbor_distances=True, use_local_map=True, seq_names=[sequence], gt_ids=paddle.to_tensor([obj_nums]), k_nearest_neighbors=cfg['knns'], global_map_tmp_dic=eval_global_map_tmp_dic, local_map_dics=local_map_dics, interaction_num=n_interaction, start_annotated_frame=start_annotated_frame, frame_num=[current_frame_num], dynamic_seghead=c.head.dynamic_seghead) else: tmp_dic, eval_global_map_tmp_dic, local_map_dics = model.head.prop_seghead( ref_frame_embedding, prev_embedding, current_embedding, scribble_label, prev_label, normalize_nearest_neighbor_distances=True, use_local_map=True, seq_names=[sequence], gt_ids=paddle.to_tensor([obj_nums]), k_nearest_neighbors=cfg['knns'], global_map_tmp_dic=eval_global_map_tmp_dic, local_map_dics=local_map_dics, interaction_num=n_interaction, start_annotated_frame=start_annotated_frame, frame_num=[current_frame_num], dynamic_seghead=model.head.dynamic_seghead) pred_label = tmp_dic[sequence] pred_label = nn.functional.interpolate(pred_label, size=(h, w), mode='bilinear', align_corners=True) pred_label = paddle.argmax(pred_label, axis=1) pred_masks_reverse.append(float_(pred_label)) prev_label = pred_label.unsqueeze(0) prev_embedding = current_embedding #### prev_label_storage[current_frame_num] = float_( pred_label[0]) ### if is_save_image: pred_label_to_save = pred_label.squeeze(0).numpy() im = Image.fromarray( pred_label_to_save.astype('uint8')).convert('P', ) im.putpalette(_palette) imgname = str(current_frame_num) while len(imgname) < 5: imgname = '0' + imgname if not os.path.exists(inter_file_path): os.makedirs(inter_file_path) im.save(os.path.join(inter_file_path, imgname + '.png')) pred_masks_reverse.reverse() pred_masks_reverse.extend(pred_masks) final_masks = paddle.concat(pred_masks_reverse, 0) submit_masks(cfg["save_path"], final_masks.numpy(), images) t_end = timeit.default_timer() print('Total time for single interaction: ' + str(t_end - t_total)) inter_file.close() return None
def test_return_index(): result = paddle.unique(x, return_index=0)
def forward(self, input_ids, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, encoder_output=None, use_cache=False, cache=None): r""" The MBartForSequenceClassification forward method, overrides the __call__() special method. Args: input_ids (Tensor): See :class:`MBartModel`. attention_mask (Tensor, optional): See :class:`MBartModel`. decoder_input_ids (Tensor, `optional`): See :class:`MBartModel`. decoder_attention_mask (Tensor, optional): See :class:`MBartModel`. encoder_output (Tensor, optonal): See :class:`MBartModel`. use_cache (bool, optional): See :class:`MBartModel`. cache (Tensor, optional): See :class:`MBartModel`. Returns: Tensor: Returns tensor `logits`, a tensor of the input text classification logits. Shape as `[batch_size, num_labels]` and dtype as float32. Example: .. code-block:: import paddle from paddlenlp.transformers import MBartForSequenceClassification, MBartTokenizer tokenizer = MBartTokenizer.from_pretrained('bart-base') model = MBartForSequenceClassification.from_pretrained('bart-base') inputs = tokenizer("Welcome to use PaddlePaddle and PaddleNLP!") inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()} logits = model(**inputs) """ output = self.mbart(input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, encoder_output, use_cache, cache) if use_cache: output = output[0] eos_mask = paddle.cast(input_ids == self.mbart.config['eos_token_id'], dtype='int64') if len(paddle.unique(paddle.sum(eos_mask, axis=1))) > 1: raise ValueError( 'All examples must have the same number of <eos> tokens.') output_shape = paddle.shape(output) # TODO(gongenlei): support bool tensor index output = output.masked_select( eos_mask.unsqueeze(-1).astype('bool').tile( [1, 1, output_shape[-1]])) sentence_representation = output.reshape( [output_shape[0], -1, output_shape[-1]])[:, -1, :] logits = self.classifier(sentence_representation) return logits
def test_return_counts(): result = paddle.unique(x, return_counts=3)
def test_tensor_patch_method(self): paddle.disable_static() x_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) y_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) z_np = np.random.uniform(-1, 1, [6, 9]).astype(self.dtype) x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) z = paddle.to_tensor(z_np) a = paddle.to_tensor([[1, 1], [2, 2], [3, 3]]) b = paddle.to_tensor([[1, 1], [2, 2], [3, 3]]) # 1. Unary operation for Tensor self.assertEqual(x.dim(), 2) self.assertEqual(x.ndimension(), 2) self.assertEqual(x.ndim, 2) self.assertEqual(x.size, 6) self.assertEqual(x.numel(), 6) self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy())) self.assertTrue( np.array_equal(x.tanh().numpy(), paddle.tanh(x).numpy())) self.assertTrue( np.array_equal(x.atan().numpy(), paddle.atan(x).numpy())) self.assertTrue(np.array_equal(x.abs().numpy(), paddle.abs(x).numpy())) m = x.abs() self.assertTrue( np.array_equal(m.sqrt().numpy(), paddle.sqrt(m).numpy())) self.assertTrue( np.array_equal(m.rsqrt().numpy(), paddle.rsqrt(m).numpy())) self.assertTrue( np.array_equal(x.ceil().numpy(), paddle.ceil(x).numpy())) self.assertTrue( np.array_equal(x.floor().numpy(), paddle.floor(x).numpy())) self.assertTrue(np.array_equal(x.cos().numpy(), paddle.cos(x).numpy())) self.assertTrue( np.array_equal(x.acos().numpy(), paddle.acos(x).numpy())) self.assertTrue( np.array_equal(x.asin().numpy(), paddle.asin(x).numpy())) self.assertTrue(np.array_equal(x.sin().numpy(), paddle.sin(x).numpy())) self.assertTrue( np.array_equal(x.sinh().numpy(), paddle.sinh(x).numpy())) self.assertTrue( np.array_equal(x.cosh().numpy(), paddle.cosh(x).numpy())) self.assertTrue( np.array_equal(x.round().numpy(), paddle.round(x).numpy())) self.assertTrue( np.array_equal(x.reciprocal().numpy(), paddle.reciprocal(x).numpy())) self.assertTrue( np.array_equal(x.square().numpy(), paddle.square(x).numpy())) self.assertTrue( np.array_equal(x.rank().numpy(), paddle.rank(x).numpy())) self.assertTrue( np.array_equal(x[0].t().numpy(), paddle.t(x[0]).numpy())) self.assertTrue( np.array_equal(x.asinh().numpy(), paddle.asinh(x).numpy())) ### acosh(x) = nan, need to change input t_np = np.random.uniform(1, 2, [2, 3]).astype(self.dtype) t = paddle.to_tensor(t_np) self.assertTrue( np.array_equal(t.acosh().numpy(), paddle.acosh(t).numpy())) self.assertTrue( np.array_equal(x.atanh().numpy(), paddle.atanh(x).numpy())) d = paddle.to_tensor([[1.2285208, 1.3491015, 1.4899898], [1.30058, 1.0688717, 1.4928783], [1.0958099, 1.3724753, 1.8926544]]) d = d.matmul(d.t()) # ROCM not support cholesky if not fluid.core.is_compiled_with_rocm(): self.assertTrue( np.array_equal(d.cholesky().numpy(), paddle.cholesky(d).numpy())) self.assertTrue( np.array_equal(x.is_empty().numpy(), paddle.is_empty(x).numpy())) self.assertTrue( np.array_equal(x.isfinite().numpy(), paddle.isfinite(x).numpy())) self.assertTrue( np.array_equal( x.cast('int32').numpy(), paddle.cast(x, 'int32').numpy())) self.assertTrue( np.array_equal( x.expand([3, 2, 3]).numpy(), paddle.expand(x, [3, 2, 3]).numpy())) self.assertTrue( np.array_equal( x.tile([2, 2]).numpy(), paddle.tile(x, [2, 2]).numpy())) self.assertTrue( np.array_equal(x.flatten().numpy(), paddle.flatten(x).numpy())) index = paddle.to_tensor([0, 1]) self.assertTrue( np.array_equal( x.gather(index).numpy(), paddle.gather(x, index).numpy())) index = paddle.to_tensor([[0, 1], [1, 2]]) self.assertTrue( np.array_equal( x.gather_nd(index).numpy(), paddle.gather_nd(x, index).numpy())) self.assertTrue( np.array_equal( x.reverse([0, 1]).numpy(), paddle.reverse(x, [0, 1]).numpy())) self.assertTrue( np.array_equal( a.reshape([3, 2]).numpy(), paddle.reshape(a, [3, 2]).numpy())) self.assertTrue( np.array_equal( x.slice([0, 1], [0, 0], [1, 2]).numpy(), paddle.slice(x, [0, 1], [0, 0], [1, 2]).numpy())) self.assertTrue( np.array_equal( x.split(2)[0].numpy(), paddle.split(x, 2)[0].numpy())) m = paddle.to_tensor( np.random.uniform(-1, 1, [1, 6, 1, 1]).astype(self.dtype)) self.assertTrue( np.array_equal( m.squeeze([]).numpy(), paddle.squeeze(m, []).numpy())) self.assertTrue( np.array_equal( m.squeeze([1, 2]).numpy(), paddle.squeeze(m, [1, 2]).numpy())) m = paddle.to_tensor([2, 3, 3, 1, 5, 3], 'float32') self.assertTrue( np.array_equal(m.unique()[0].numpy(), paddle.unique(m)[0].numpy())) self.assertTrue( np.array_equal( m.unique(return_counts=True)[1], paddle.unique(m, return_counts=True)[1])) self.assertTrue(np.array_equal(x.flip([0]), paddle.flip(x, [0]))) self.assertTrue(np.array_equal(x.unbind(0), paddle.unbind(x, 0))) self.assertTrue(np.array_equal(x.roll(1), paddle.roll(x, 1))) self.assertTrue(np.array_equal(x.cumsum(1), paddle.cumsum(x, 1))) m = paddle.to_tensor(1) self.assertTrue(np.array_equal(m.increment(), paddle.increment(m))) m = x.abs() self.assertTrue(np.array_equal(m.log(), paddle.log(m))) self.assertTrue(np.array_equal(x.pow(2), paddle.pow(x, 2))) self.assertTrue(np.array_equal(x.reciprocal(), paddle.reciprocal(x))) # 2. Binary operation self.assertTrue( np.array_equal(x.divide(y).numpy(), paddle.divide(x, y).numpy())) self.assertTrue( np.array_equal( x.matmul(y, True, False).numpy(), paddle.matmul(x, y, True, False).numpy())) self.assertTrue( np.array_equal( x.norm(p='fro', axis=[0, 1]).numpy(), paddle.norm(x, p='fro', axis=[0, 1]).numpy())) self.assertTrue( np.array_equal(x.dist(y).numpy(), paddle.dist(x, y).numpy())) self.assertTrue( np.array_equal(x.cross(y).numpy(), paddle.cross(x, y).numpy())) m = x.expand([2, 2, 3]) n = y.expand([2, 2, 3]).transpose([0, 2, 1]) self.assertTrue( np.array_equal(m.bmm(n).numpy(), paddle.bmm(m, n).numpy())) self.assertTrue( np.array_equal( x.histogram(5, -1, 1).numpy(), paddle.histogram(x, 5, -1, 1).numpy())) self.assertTrue( np.array_equal(x.equal(y).numpy(), paddle.equal(x, y).numpy())) self.assertTrue( np.array_equal( x.greater_equal(y).numpy(), paddle.greater_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.greater_than(y).numpy(), paddle.greater_than(x, y).numpy())) self.assertTrue( np.array_equal( x.less_equal(y).numpy(), paddle.less_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.less_than(y).numpy(), paddle.less_than(x, y).numpy())) self.assertTrue( np.array_equal( x.not_equal(y).numpy(), paddle.not_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.equal_all(y).numpy(), paddle.equal_all(x, y).numpy())) self.assertTrue( np.array_equal( x.allclose(y).numpy(), paddle.allclose(x, y).numpy())) m = x.expand([2, 2, 3]) self.assertTrue( np.array_equal( x.expand_as(m).numpy(), paddle.expand_as(x, m).numpy())) index = paddle.to_tensor([2, 1, 0]) self.assertTrue( np.array_equal( a.scatter(index, b).numpy(), paddle.scatter(a, index, b).numpy())) # 3. Bool tensor operation x = paddle.to_tensor([[True, False], [True, False]]) y = paddle.to_tensor([[False, False], [False, True]]) self.assertTrue( np.array_equal( x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_not(y).numpy(), paddle.logical_not(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_or(y).numpy(), paddle.logical_or(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_xor(y).numpy(), paddle.logical_xor(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy())) a = paddle.to_tensor([[1, 2], [3, 4]]) b = paddle.to_tensor([[4, 3], [2, 1]]) self.assertTrue( np.array_equal( x.where(a, b).numpy(), paddle.where(x, a, b).numpy())) x_np = np.random.randn(3, 6, 9, 7) x = paddle.to_tensor(x_np) x_T = x.T self.assertTrue(x_T.shape, [7, 9, 6, 3]) self.assertTrue(np.array_equal(x_T.numpy(), x_np.T)) self.assertTrue(inspect.ismethod(a.dot)) self.assertTrue(inspect.ismethod(a.logsumexp)) self.assertTrue(inspect.ismethod(a.multiplex)) self.assertTrue(inspect.ismethod(a.prod)) self.assertTrue(inspect.ismethod(a.scale)) self.assertTrue(inspect.ismethod(a.stanh)) self.assertTrue(inspect.ismethod(a.add_n)) self.assertTrue(inspect.ismethod(a.max)) self.assertTrue(inspect.ismethod(a.maximum)) self.assertTrue(inspect.ismethod(a.min)) self.assertTrue(inspect.ismethod(a.minimum)) self.assertTrue(inspect.ismethod(a.floor_divide)) self.assertTrue(inspect.ismethod(a.remainder)) self.assertTrue(inspect.ismethod(a.floor_mod)) self.assertTrue(inspect.ismethod(a.multiply)) self.assertTrue(inspect.ismethod(a.logsumexp)) self.assertTrue(inspect.ismethod(a.inverse)) self.assertTrue(inspect.ismethod(a.log1p)) self.assertTrue(inspect.ismethod(a.erf)) self.assertTrue(inspect.ismethod(a.addmm)) self.assertTrue(inspect.ismethod(a.clip)) self.assertTrue(inspect.ismethod(a.trace)) self.assertTrue(inspect.ismethod(a.kron)) self.assertTrue(inspect.ismethod(a.isinf)) self.assertTrue(inspect.ismethod(a.isnan)) self.assertTrue(inspect.ismethod(a.concat)) self.assertTrue(inspect.ismethod(a.broadcast_to)) self.assertTrue(inspect.ismethod(a.scatter_nd_add)) self.assertTrue(inspect.ismethod(a.scatter_nd)) self.assertTrue(inspect.ismethod(a.shard_index)) self.assertTrue(inspect.ismethod(a.chunk)) self.assertTrue(inspect.ismethod(a.stack)) self.assertTrue(inspect.ismethod(a.strided_slice)) self.assertTrue(inspect.ismethod(a.unsqueeze)) self.assertTrue(inspect.ismethod(a.unstack)) self.assertTrue(inspect.ismethod(a.argmax)) self.assertTrue(inspect.ismethod(a.argmin)) self.assertTrue(inspect.ismethod(a.argsort)) self.assertTrue(inspect.ismethod(a.masked_select)) self.assertTrue(inspect.ismethod(a.topk)) self.assertTrue(inspect.ismethod(a.index_select)) self.assertTrue(inspect.ismethod(a.nonzero)) self.assertTrue(inspect.ismethod(a.sort)) self.assertTrue(inspect.ismethod(a.index_sample)) self.assertTrue(inspect.ismethod(a.mean)) self.assertTrue(inspect.ismethod(a.std)) self.assertTrue(inspect.ismethod(a.numel))
def test_dtype(): result = paddle.unique(x, dtype='float64')
def _hard_anchor_sampling(self, X, y_hat, y): """ Args: X (Tensor): reshaped feats, shape = [N, H * W, feat_channels] y_hat (Tensor): reshaped label, shape = [N, H * W] y (Tensor): reshaped predict, shape = [N, H * W] """ batch_size, feat_dim = paddle.shape(X)[0], paddle.shape(X)[-1] classes = [] total_classes = 0 for i in range(batch_size): current_y = y_hat[i] current_classes = paddle.unique(current_y) current_classes = [ x for x in current_classes if x != self.ignore_index ] current_classes = [ x for x in current_classes if (current_y == x).nonzero().shape[0] > self.max_views ] classes.append(current_classes) total_classes += len(current_classes) n_view = self.max_samples // total_classes n_view = min(n_view, self.max_views) X_ = [] y_ = paddle.zeros([total_classes], dtype='float32') X_ptr = 0 for i in range(batch_size): this_y_hat = y_hat[i] current_y = y[i] current_classes = classes[i] for cls_id in current_classes: hard_indices = paddle.logical_and( (this_y_hat == cls_id), (current_y != cls_id)).nonzero() easy_indices = paddle.logical_and( (this_y_hat == cls_id), (current_y == cls_id)).nonzero() num_hard = hard_indices.shape[0] num_easy = easy_indices.shape[0] if num_hard >= n_view / 2 and num_easy >= n_view / 2: num_hard_keep = n_view // 2 num_easy_keep = n_view - num_hard_keep elif num_hard >= n_view / 2: num_easy_keep = num_easy num_hard_keep = n_view - num_easy_keep else: num_hard_keep = num_hard num_easy_keep = n_view - num_hard_keep indices = None if num_hard > 0: perm = paddle.randperm(num_hard) hard_indices = hard_indices[perm[:num_hard_keep]].reshape( (-1, hard_indices.shape[-1])) indices = hard_indices if num_easy > 0: perm = paddle.randperm(num_easy) easy_indices = easy_indices[perm[:num_easy_keep]].reshape( (-1, easy_indices.shape[-1])) if indices is None: indices = easy_indices else: indices = paddle.concat((indices, easy_indices), axis=0) if indices is None: raise UserWarning('hard sampling indice error') X_.append(paddle.index_select(X[i, :, :], indices.squeeze(1))) y_[X_ptr] = float(cls_id) X_ptr += 1 X_ = paddle.stack(X_, axis=0) return X_, y_