def get_dist_subgraphs(self, graph, dist_inds): subg_edge_list = [] if self.num_dist == 1: subg_eids = paddle.greater_equal( dist_inds, paddle.to_tensor(0.)).nonzero().squeeze() subg_edge_list.append(subg_eids) elif self.num_dist == 2: subg_eids = paddle.equal(dist_inds, paddle.to_tensor(0.)).nonzero().squeeze() subg_edge_list.append(subg_eids) subg_eids = paddle.greater_equal( dist_inds, paddle.to_tensor(1.)).nonzero().squeeze() subg_edge_list.append(subg_eids) else: for k in range(self.num_dist): subg_edge_list.append( paddle.equal(dist_inds, paddle.to_tensor( float(k))).nonzero().squeeze())
def accuracy(class_out, label): # 查找neg 0 和pos 1所在的位置 zeros = paddle.zeros_like(label) cond = paddle.greater_equal(label, zeros) picked, _ = np.where(cond.numpy()) picked = paddle.to_tensor(picked, dtype='int32') # 求neg 0 和pos 1的准确率 valid_class_out = paddle.gather(class_out, picked) valid_label = paddle.gather(label, picked) acc = paddle.metric.accuracy(valid_class_out, valid_label) return acc
def topk_sampling(self, probs): topk_probs, _ = paddle.topk(probs, self.topk) ge_cond = paddle.cast( paddle.greater_equal(probs, paddle.unsqueeze(topk_probs[:, -1], [1])), "float32") old_probs = probs probs = probs * ge_cond / paddle.sum(topk_probs, axis=-1, keepdim=True) sampling_ids = layers.sampling_id(probs, dtype="int") probs = old_probs return probs, sampling_ids
def filter_boxes(self, boxes, im_w, im_h, im_s, min_size): min_size = max(min_size, 1.0) xmin, ymin, xmax, ymax = paddle.tensor.split(boxes, axis=1, num_or_sections=4) x_ctr = (xmax + xmin) / 2 + 0.5 y_ctr = (ymax + ymin) / 2 + 0.5 ws = (xmax - xmin) / im_s + 1 hs = (ymax - ymin) / im_s + 1 min_size = np.asarray([min_size], dtype='float32') min_size = paddle.assign(min_size) valid_flag_ws = paddle.greater_equal(ws, min_size) valid_flag_hs = paddle.greater_equal(hs, min_size) valid_flag_x = paddle.less_equal(x_ctr, im_w) valid_flag_y = paddle.less_equal(y_ctr, im_h) valid_flag = paddle.logical_and(valid_flag_ws, valid_flag_hs) valid_flag = paddle.logical_and(valid_flag, valid_flag_x) valid_flag = paddle.logical_and(valid_flag, valid_flag_y) valid_flag = paddle.squeeze(valid_flag, axis=1) valid_inds = paddle.nonzero(valid_flag) return valid_inds
def forward(self, inputs): x = self.bn1(inputs) x = paddle.reshape(x, [1, 3 * 16 * 16]) x = self.ln1(x) x = self.fc1(x) x = paddle.fluid.layers.unsqueeze(input=x, axes=[2]) x = self.relu1(x) y = paddle.fluid.layers.fill_constant(x.shape, dtype=paddle.float32, value=1) x = paddle.stack([x, y], axis=3) x = paddle.slice(x, axes=[0], starts=[0], ends=[1]) x = paddle.exp(x) y += paddle.fluid.layers.uniform_random(y.shape) y = paddle.fluid.layers.reduce_mean(y, dim=1, keep_dim=True) return paddle.greater_equal(x, y)
def forward(self, class_out, label): # 保留neg 0 和pos 1 的数据,忽略掉part -1, landmark -2 zeros = paddle.zeros_like(label) ignore_label = paddle.full_like(label, fill_value=-100) label = paddle.where(paddle.less_than(label, zeros), ignore_label, label) # 求neg 0 和pos 1 的数据70%数据 ones = paddle.ones_like(label) valid_label = paddle.where(paddle.greater_equal(label, zeros), ones, zeros) num_valid = paddle.sum(valid_label) keep_num = int((num_valid * self.keep_ratio).numpy()[0]) # 计算交叉熵损失 loss = self.entropy_loss(input=class_out, label=label) # 取有效数据的70%计算损失 loss, _ = paddle.topk(paddle.squeeze(loss), k=keep_num) return paddle.mean(loss)
def forward(self, mol_batch, x_tree_vecs): """Tree decoding in training Args: mol_batch(list): mol objects in a batch. x_tree_vecs(tensor): tree latent representation. Returns: pred_loss: label prediction loss. stop_loss: topological prediction loss. pred_acc: label prediction accuracy. stop_acc: topological prediction accuracy. """ pred_hiddens, pred_contexts, pred_targets = [], [], [] stop_hiddens, stop_contexts, stop_targets = [], [], [] traces = [] for mol_tree in mol_batch: s = [] dfs(s, mol_tree.nodes[0], -1) traces.append(s) for node in mol_tree.nodes: node.neighbors = [] batch_size = len(mol_batch) pred_hiddens.append(paddle.zeros([len(mol_batch), self.hidden_size])) pred_targets.extend([mol_tree.nodes[0].wid for mol_tree in mol_batch]) pred_contexts.append(paddle.to_tensor(list(range(batch_size)))) max_iter = max([len(tr) for tr in traces]) padding = paddle.zeros([self.hidden_size]) padding.stop_gradient = False h = {} for t in range(max_iter): prop_list = [] batch_list = [] for i, plist in enumerate(traces): if t < len(plist): prop_list.append(plist[t]) batch_list.append(i) cur_x = [] cur_h_nei, cur_o_nei = [], [] for node_x, real_y, _ in prop_list: cur_nei = [h[(node_y.idx, node_x.idx)] for node_y in node_x.neighbors if node_y.idx != real_y.idx] pad_len = MAX_NB - len(cur_nei) cur_h_nei.extend(cur_nei) cur_h_nei.extend([padding] * pad_len) cur_nei = [h[(node_y.idx, node_x.idx)] for node_y in node_x.neighbors] pad_len = MAX_NB - len(cur_nei) cur_o_nei.extend(cur_nei) cur_o_nei.extend([padding] * pad_len) cur_x.append(node_x.wid) cur_x = paddle.to_tensor(cur_x) cur_x = self.embedding(cur_x) cur_h_nei = paddle.reshape(paddle.stack(cur_h_nei, axis=0), shape=[-1, MAX_NB, self.hidden_size]) new_h = GRU(cur_x, cur_h_nei, self.W_z, self.W_r, self.U_r, self.W_h) cur_o_nei = paddle.reshape(paddle.stack(cur_o_nei, axis=0), shape=[-1, MAX_NB, self.hidden_size]) cur_o = paddle.sum(cur_o_nei, axis=1) pred_target, pred_list = [], [] stop_target = [] for i, m in enumerate(prop_list): node_x, node_y, direction = m x, y = node_x.idx, node_y.idx h[(x, y)] = new_h[i] node_y.neighbors.append(node_x) if direction == 1: pred_target.append(node_y.wid) pred_list.append(i) stop_target.append(direction) cur_batch = paddle.to_tensor((batch_list)) stop_hidden = paddle.concat([cur_x, cur_o], axis=1) stop_hiddens.append(stop_hidden) stop_contexts.append(cur_batch) stop_targets.extend(stop_target) if len(pred_list) > 0: batch_list = [batch_list[i] for i in pred_list] cur_batch = paddle.to_tensor(batch_list) pred_contexts.append(cur_batch) cur_pred = paddle.to_tensor(pred_list) pred_hiddens.append(paddle.index_select(axis=0, index=cur_pred, x=new_h)) pred_targets.extend(pred_target) cur_x, cur_o_nei = [], [] for mol_tree in mol_batch: node_x = mol_tree.nodes[0] cur_x.append(node_x.wid) cur_nei = [h[(node_y.idx, node_x.idx)] for node_y in node_x.neighbors] pad_len = MAX_NB - len(cur_nei) cur_o_nei.extend(cur_nei) cur_o_nei.extend([padding] * pad_len) cur_x = paddle.to_tensor(cur_x) cur_x = self.embedding(cur_x) cur_o_nei = paddle.reshape(paddle.stack(cur_o_nei, axis=0), shape=[-1, MAX_NB, self.hidden_size]) cur_o = paddle.sum(cur_o_nei, axis=1) stop_hidden = paddle.concat([cur_x, cur_o], axis=1) stop_hiddens.append(stop_hidden) stop_contexts.append(paddle.to_tensor(list(range(batch_size)))) stop_targets.extend([0] * len(mol_batch)) pred_contexts = paddle.concat(pred_contexts, axis=0) pred_hiddens = paddle.concat(pred_hiddens, axis=0) pred_scores = self.aggregate(pred_hiddens, pred_contexts, x_tree_vecs, 'word') pred_targets = paddle.to_tensor(pred_targets) pred_loss = self.pred_loss(pred_scores, pred_targets) / len(mol_batch) preds = paddle.argmax(pred_scores, axis=1) pred_acc = paddle.equal(preds, pred_targets).astype('float32') pred_acc = paddle.sum(pred_acc) / pred_targets.size stop_contexts = paddle.concat(stop_contexts, axis=0) stop_hiddens = paddle.concat(stop_hiddens, axis=0) stop_hiddens = F.relu(self.U_i(stop_hiddens)) stop_scores = self.aggregate(stop_hiddens, stop_contexts, x_tree_vecs, 'stop') stop_scores = stop_scores.squeeze(-1) stop_targets = paddle.to_tensor(stop_targets).astype('float32') stop_loss = self.stop_loss(stop_scores, stop_targets) / len(mol_batch) stops = paddle.greater_equal(stop_scores, paddle.ones(shape=[1])).astype('float32') stop_acc = paddle.equal(stops, stop_targets).astype('float32') stop_acc = paddle.sum(stop_acc) / stop_targets.size return {'pred_loss': pred_loss, 'stop_loss': stop_loss, 'pred_acc': float(pred_acc.numpy()), 'stop_acc': float(stop_acc.numpy())}
def test_tensor_patch_method(self): paddle.disable_static() x_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) y_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) z_np = np.random.uniform(-1, 1, [6, 9]).astype(self.dtype) x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) z = paddle.to_tensor(z_np) a = paddle.to_tensor([[1, 1], [2, 2], [3, 3]]) b = paddle.to_tensor([[1, 1], [2, 2], [3, 3]]) # 1. Unary operation for Tensor self.assertEqual(x.dim(), 2) self.assertEqual(x.ndimension(), 2) self.assertEqual(x.ndim, 2) self.assertEqual(x.size, 6) self.assertEqual(x.numel(), 6) self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy())) self.assertTrue( np.array_equal(x.tanh().numpy(), paddle.tanh(x).numpy())) self.assertTrue( np.array_equal(x.atan().numpy(), paddle.atan(x).numpy())) self.assertTrue(np.array_equal(x.abs().numpy(), paddle.abs(x).numpy())) m = x.abs() self.assertTrue( np.array_equal(m.sqrt().numpy(), paddle.sqrt(m).numpy())) self.assertTrue( np.array_equal(m.rsqrt().numpy(), paddle.rsqrt(m).numpy())) self.assertTrue( np.array_equal(x.ceil().numpy(), paddle.ceil(x).numpy())) self.assertTrue( np.array_equal(x.floor().numpy(), paddle.floor(x).numpy())) self.assertTrue(np.array_equal(x.cos().numpy(), paddle.cos(x).numpy())) self.assertTrue( np.array_equal(x.acos().numpy(), paddle.acos(x).numpy())) self.assertTrue( np.array_equal(x.asin().numpy(), paddle.asin(x).numpy())) self.assertTrue(np.array_equal(x.sin().numpy(), paddle.sin(x).numpy())) self.assertTrue( np.array_equal(x.sinh().numpy(), paddle.sinh(x).numpy())) self.assertTrue( np.array_equal(x.cosh().numpy(), paddle.cosh(x).numpy())) self.assertTrue( np.array_equal(x.round().numpy(), paddle.round(x).numpy())) self.assertTrue( np.array_equal(x.reciprocal().numpy(), paddle.reciprocal(x).numpy())) self.assertTrue( np.array_equal(x.square().numpy(), paddle.square(x).numpy())) self.assertTrue( np.array_equal(x.rank().numpy(), paddle.rank(x).numpy())) self.assertTrue( np.array_equal(x[0].t().numpy(), paddle.t(x[0]).numpy())) self.assertTrue( np.array_equal(x.asinh().numpy(), paddle.asinh(x).numpy())) ### acosh(x) = nan, need to change input t_np = np.random.uniform(1, 2, [2, 3]).astype(self.dtype) t = paddle.to_tensor(t_np) self.assertTrue( np.array_equal(t.acosh().numpy(), paddle.acosh(t).numpy())) self.assertTrue( np.array_equal(x.atanh().numpy(), paddle.atanh(x).numpy())) d = paddle.to_tensor([[1.2285208, 1.3491015, 1.4899898], [1.30058, 1.0688717, 1.4928783], [1.0958099, 1.3724753, 1.8926544]]) d = d.matmul(d.t()) # ROCM not support cholesky if not fluid.core.is_compiled_with_rocm(): self.assertTrue( np.array_equal(d.cholesky().numpy(), paddle.cholesky(d).numpy())) self.assertTrue( np.array_equal(x.is_empty().numpy(), paddle.is_empty(x).numpy())) self.assertTrue( np.array_equal(x.isfinite().numpy(), paddle.isfinite(x).numpy())) self.assertTrue( np.array_equal( x.cast('int32').numpy(), paddle.cast(x, 'int32').numpy())) self.assertTrue( np.array_equal( x.expand([3, 2, 3]).numpy(), paddle.expand(x, [3, 2, 3]).numpy())) self.assertTrue( np.array_equal( x.tile([2, 2]).numpy(), paddle.tile(x, [2, 2]).numpy())) self.assertTrue( np.array_equal(x.flatten().numpy(), paddle.flatten(x).numpy())) index = paddle.to_tensor([0, 1]) self.assertTrue( np.array_equal( x.gather(index).numpy(), paddle.gather(x, index).numpy())) index = paddle.to_tensor([[0, 1], [1, 2]]) self.assertTrue( np.array_equal( x.gather_nd(index).numpy(), paddle.gather_nd(x, index).numpy())) self.assertTrue( np.array_equal( x.reverse([0, 1]).numpy(), paddle.reverse(x, [0, 1]).numpy())) self.assertTrue( np.array_equal( a.reshape([3, 2]).numpy(), paddle.reshape(a, [3, 2]).numpy())) self.assertTrue( np.array_equal( x.slice([0, 1], [0, 0], [1, 2]).numpy(), paddle.slice(x, [0, 1], [0, 0], [1, 2]).numpy())) self.assertTrue( np.array_equal( x.split(2)[0].numpy(), paddle.split(x, 2)[0].numpy())) m = paddle.to_tensor( np.random.uniform(-1, 1, [1, 6, 1, 1]).astype(self.dtype)) self.assertTrue( np.array_equal( m.squeeze([]).numpy(), paddle.squeeze(m, []).numpy())) self.assertTrue( np.array_equal( m.squeeze([1, 2]).numpy(), paddle.squeeze(m, [1, 2]).numpy())) m = paddle.to_tensor([2, 3, 3, 1, 5, 3], 'float32') self.assertTrue( np.array_equal(m.unique()[0].numpy(), paddle.unique(m)[0].numpy())) self.assertTrue( np.array_equal( m.unique(return_counts=True)[1], paddle.unique(m, return_counts=True)[1])) self.assertTrue(np.array_equal(x.flip([0]), paddle.flip(x, [0]))) self.assertTrue(np.array_equal(x.unbind(0), paddle.unbind(x, 0))) self.assertTrue(np.array_equal(x.roll(1), paddle.roll(x, 1))) self.assertTrue(np.array_equal(x.cumsum(1), paddle.cumsum(x, 1))) m = paddle.to_tensor(1) self.assertTrue(np.array_equal(m.increment(), paddle.increment(m))) m = x.abs() self.assertTrue(np.array_equal(m.log(), paddle.log(m))) self.assertTrue(np.array_equal(x.pow(2), paddle.pow(x, 2))) self.assertTrue(np.array_equal(x.reciprocal(), paddle.reciprocal(x))) # 2. Binary operation self.assertTrue( np.array_equal(x.divide(y).numpy(), paddle.divide(x, y).numpy())) self.assertTrue( np.array_equal( x.matmul(y, True, False).numpy(), paddle.matmul(x, y, True, False).numpy())) self.assertTrue( np.array_equal( x.norm(p='fro', axis=[0, 1]).numpy(), paddle.norm(x, p='fro', axis=[0, 1]).numpy())) self.assertTrue( np.array_equal(x.dist(y).numpy(), paddle.dist(x, y).numpy())) self.assertTrue( np.array_equal(x.cross(y).numpy(), paddle.cross(x, y).numpy())) m = x.expand([2, 2, 3]) n = y.expand([2, 2, 3]).transpose([0, 2, 1]) self.assertTrue( np.array_equal(m.bmm(n).numpy(), paddle.bmm(m, n).numpy())) self.assertTrue( np.array_equal( x.histogram(5, -1, 1).numpy(), paddle.histogram(x, 5, -1, 1).numpy())) self.assertTrue( np.array_equal(x.equal(y).numpy(), paddle.equal(x, y).numpy())) self.assertTrue( np.array_equal( x.greater_equal(y).numpy(), paddle.greater_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.greater_than(y).numpy(), paddle.greater_than(x, y).numpy())) self.assertTrue( np.array_equal( x.less_equal(y).numpy(), paddle.less_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.less_than(y).numpy(), paddle.less_than(x, y).numpy())) self.assertTrue( np.array_equal( x.not_equal(y).numpy(), paddle.not_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.equal_all(y).numpy(), paddle.equal_all(x, y).numpy())) self.assertTrue( np.array_equal( x.allclose(y).numpy(), paddle.allclose(x, y).numpy())) m = x.expand([2, 2, 3]) self.assertTrue( np.array_equal( x.expand_as(m).numpy(), paddle.expand_as(x, m).numpy())) index = paddle.to_tensor([2, 1, 0]) self.assertTrue( np.array_equal( a.scatter(index, b).numpy(), paddle.scatter(a, index, b).numpy())) # 3. Bool tensor operation x = paddle.to_tensor([[True, False], [True, False]]) y = paddle.to_tensor([[False, False], [False, True]]) self.assertTrue( np.array_equal( x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_not(y).numpy(), paddle.logical_not(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_or(y).numpy(), paddle.logical_or(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_xor(y).numpy(), paddle.logical_xor(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy())) a = paddle.to_tensor([[1, 2], [3, 4]]) b = paddle.to_tensor([[4, 3], [2, 1]]) self.assertTrue( np.array_equal( x.where(a, b).numpy(), paddle.where(x, a, b).numpy())) x_np = np.random.randn(3, 6, 9, 7) x = paddle.to_tensor(x_np) x_T = x.T self.assertTrue(x_T.shape, [7, 9, 6, 3]) self.assertTrue(np.array_equal(x_T.numpy(), x_np.T)) self.assertTrue(inspect.ismethod(a.dot)) self.assertTrue(inspect.ismethod(a.logsumexp)) self.assertTrue(inspect.ismethod(a.multiplex)) self.assertTrue(inspect.ismethod(a.prod)) self.assertTrue(inspect.ismethod(a.scale)) self.assertTrue(inspect.ismethod(a.stanh)) self.assertTrue(inspect.ismethod(a.add_n)) self.assertTrue(inspect.ismethod(a.max)) self.assertTrue(inspect.ismethod(a.maximum)) self.assertTrue(inspect.ismethod(a.min)) self.assertTrue(inspect.ismethod(a.minimum)) self.assertTrue(inspect.ismethod(a.floor_divide)) self.assertTrue(inspect.ismethod(a.remainder)) self.assertTrue(inspect.ismethod(a.floor_mod)) self.assertTrue(inspect.ismethod(a.multiply)) self.assertTrue(inspect.ismethod(a.logsumexp)) self.assertTrue(inspect.ismethod(a.inverse)) self.assertTrue(inspect.ismethod(a.log1p)) self.assertTrue(inspect.ismethod(a.erf)) self.assertTrue(inspect.ismethod(a.addmm)) self.assertTrue(inspect.ismethod(a.clip)) self.assertTrue(inspect.ismethod(a.trace)) self.assertTrue(inspect.ismethod(a.kron)) self.assertTrue(inspect.ismethod(a.isinf)) self.assertTrue(inspect.ismethod(a.isnan)) self.assertTrue(inspect.ismethod(a.concat)) self.assertTrue(inspect.ismethod(a.broadcast_to)) self.assertTrue(inspect.ismethod(a.scatter_nd_add)) self.assertTrue(inspect.ismethod(a.scatter_nd)) self.assertTrue(inspect.ismethod(a.shard_index)) self.assertTrue(inspect.ismethod(a.chunk)) self.assertTrue(inspect.ismethod(a.stack)) self.assertTrue(inspect.ismethod(a.strided_slice)) self.assertTrue(inspect.ismethod(a.unsqueeze)) self.assertTrue(inspect.ismethod(a.unstack)) self.assertTrue(inspect.ismethod(a.argmax)) self.assertTrue(inspect.ismethod(a.argmin)) self.assertTrue(inspect.ismethod(a.argsort)) self.assertTrue(inspect.ismethod(a.masked_select)) self.assertTrue(inspect.ismethod(a.topk)) self.assertTrue(inspect.ismethod(a.index_select)) self.assertTrue(inspect.ismethod(a.nonzero)) self.assertTrue(inspect.ismethod(a.sort)) self.assertTrue(inspect.ismethod(a.index_sample)) self.assertTrue(inspect.ismethod(a.mean)) self.assertTrue(inspect.ismethod(a.std)) self.assertTrue(inspect.ismethod(a.numel))
def test_net(cfg, epoch_idx=-1, output_dir=None, test_data_loader=None, test_writer=None, res_gru_net=None): # Load taxonomies of dataset taxonomies = [] with open( cfg.DATASETS[cfg.DATASET.TEST_DATASET.upper()].TAXONOMY_FILE_PATH, encoding='utf-8') as file: taxonomies = json.loads(file.read()) taxonomies = {t['taxonomy_id']: t for t in taxonomies} # # Set up data loader if test_data_loader is None: # Set up data augmentation IMG_SIZE = cfg.CONST.IMG_H, cfg.CONST.IMG_W CROP_SIZE = cfg.CONST.CROP_IMG_H, cfg.CONST.CROP_IMG_W test_transforms = utils.data_transforms.Compose([ utils.data_transforms.CenterCrop(IMG_SIZE, CROP_SIZE), utils.data_transforms.RandomBackground( cfg.TEST.RANDOM_BG_COLOR_RANGE), utils.data_transforms.Normalize(mean=cfg.DATASET.MEAN, std=cfg.DATASET.STD), utils.data_transforms.ToTensor(), ]) dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[ cfg.DATASET.TEST_DATASET](cfg) test_data_loader = paddle.io.DataLoader( dataset=dataset_loader.get_dataset( utils.data_loaders.DatasetType.TEST, cfg.CONST.N_VIEWS_RENDERING, test_transforms), batch_size=1, # num_workers=1, shuffle=False) mode = 'test' else: mode = 'val' # paddle.io.Dataset not support 'str' input dataset_taxonomy = None rendering_image_path_template = cfg.DATASETS.SHAPENET.RENDERING_PATH volume_path_template = cfg.DATASETS.SHAPENET.VOXEL_PATH # Load all taxonomies of the dataset with open('./datasets/ShapeNet.json', encoding='utf-8') as file: dataset_taxonomy = json.loads(file.read()) # print("[INFO]TEST-- open TAXONOMY_FILE_PATH succeess") all_test_taxonomy_id_and_sample_name = [] # Load data for each category for taxonomy in dataset_taxonomy: taxonomy_folder_name = taxonomy['taxonomy_id'] # print('[INFO] %set -- Collecting files of Taxonomy[ID=%s, Name=%s]' % # (mode, taxonomy['taxonomy_id'], taxonomy['taxonomy_name'])) samples = taxonomy[mode] for sample in samples: all_test_taxonomy_id_and_sample_name.append( [taxonomy_folder_name, sample]) # print(len(all_test_taxonomy_id_and_sample_name)) # print(all_test_taxonomy_id_and_sample_name) print('[INFO] Collected files of %set' % (mode)) # Set up networks if res_gru_net is None: res_gru_net = Res_Gru_Net(cfg) print('[INFO] %s Loading weights from %s ...' % (dt.now(), cfg.CONST.WEIGHTS)) res_gru_net_state_dict = paddle.load( os.path.join(cfg.CONST.WEIGHTS, "res_gru_net.pdparams")) res_gru_net.set_state_dict(res_gru_net_state_dict) # Set up loss functions bce_loss = paddle.nn.BCELoss() # Testing loop n_samples = len(test_data_loader) test_iou = dict() res_gru_net_losses = utils.network_utils.AverageMeter() # Switch models to evaluation mode res_gru_net.eval() for sample_idx, (rendering_images, ground_truth_volume) in enumerate(test_data_loader): taxonomy_id = all_test_taxonomy_id_and_sample_name[sample_idx][0] sample_name = all_test_taxonomy_id_and_sample_name[sample_idx][1] # print("all_test_taxonomy_id_and_sample_name") # print(taxonomy_id) # print(sample_name) with paddle.no_grad(): # Get data from data loader rendering_images = utils.network_utils.var_or_cuda( rendering_images) ground_truth_volume = utils.network_utils.var_or_cuda( ground_truth_volume) # Test the res_gru_net, decoder and merger generated_volume = res_gru_net(rendering_images) res_gru_net_loss = bce_loss(generated_volume, ground_truth_volume) * 10 # Append loss and accuracy to average metrics res_gru_net_losses.update(res_gru_net_loss) # IoU per sample sample_iou = [] for th in cfg.TEST.VOXEL_THRESH: _volume = paddle.greater_equal( generated_volume, paddle.to_tensor(th)).astype("float32") intersection = paddle.sum( paddle.multiply(_volume, ground_truth_volume)) union = paddle.sum( paddle.greater_equal( paddle.add(_volume, ground_truth_volume).astype("float32"), paddle.to_tensor(1., dtype='float32')).astype("float32")) sample_iou.append((intersection / union)) # IoU per taxonomy if taxonomy_id not in test_iou: test_iou[taxonomy_id] = {'n_samples': 0, 'iou': []} test_iou[taxonomy_id]['n_samples'] += 1 test_iou[taxonomy_id]['iou'].append(sample_iou) # Append generated volumes to TensorBoard if output_dir and sample_idx < 1: img_dir = output_dir % 'images' # Volume Visualization gv = generated_volume.cpu().numpy() rendering_views = utils.binvox_visualization.get_volume_views( gv, os.path.join(img_dir, 'Reconstructed'), epoch_idx) test_writer.add_image(tag='Reconstructed', img=rendering_views, step=epoch_idx) gtv = ground_truth_volume.cpu().numpy() rendering_views = utils.binvox_visualization.get_volume_views( gtv, os.path.join(img_dir, 'GroundTruth'), epoch_idx) test_writer.add_image(tag='GroundTruth', img=rendering_views, step=epoch_idx) # # Print sample loss and IoU print( '[INFO] %s Test[%d/%d] Taxonomy = %s Sample = %s EDLoss = %.4f IoU = %s' % (dt.now(), sample_idx + 1, n_samples, taxonomy_id, sample_name, res_gru_net_loss, ['%.4f' % si for si in sample_iou])) # Output testing results mean_iou = [] for taxonomy_id in test_iou: test_iou[taxonomy_id]['iou'] = np.mean(test_iou[taxonomy_id]['iou'], axis=0) mean_iou.append(test_iou[taxonomy_id]['iou'] * test_iou[taxonomy_id]['n_samples']) mean_iou = np.sum(mean_iou, axis=0) / n_samples # Print header print( '============================ TEST RESULTS ============================' ) print('Taxonomy', end='\t') print('#Sample', end='\t') print('Baseline', end='\t') for th in cfg.TEST.VOXEL_THRESH: print('t=%.2f' % th, end='\t') print() # Print body for taxonomy_id in test_iou: print('%s' % taxonomies[taxonomy_id]['taxonomy_name'].ljust(8), end='\t') print('%d' % test_iou[taxonomy_id]['n_samples'], end='\t') if 'baseline' in taxonomies[taxonomy_id]: print('%.4f' % taxonomies[taxonomy_id]['baseline'][ '%d-view' % cfg.CONST.N_VIEWS_RENDERING], end='\t\t') else: print('N/a', end='\t\t') for ti in test_iou[taxonomy_id]['iou']: print('%.4f' % ti, end='\t') print() # Print mean IoU for each threshold print('Overall ', end='\t\t\t\t') for mi in mean_iou: print('%.4f' % mi, end='\t') print('\n') # Add testing results to TensorBoard max_iou = np.max(mean_iou) if test_writer is not None: test_writer.add_scalar(tag='Res_Gru_Net/EpochLoss', value=res_gru_net_losses.avg, step=epoch_idx) test_writer.add_scalar(tag='Res_Gru_Net/IoU', value=max_iou, step=epoch_idx) return max_iou
def test_tensor_patch_method(self): paddle.disable_static() x_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) y_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype) z_np = np.random.uniform(-1, 1, [6, 9]).astype(self.dtype) x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) z = paddle.to_tensor(z_np) a = paddle.to_tensor([[1, 1], [2, 2], [3, 3]]) b = paddle.to_tensor([[1, 1], [2, 2], [3, 3]]) # 1. Unary operation for Tensor self.assertEqual(x.dim(), 2) self.assertEqual(x.ndimension(), 2) self.assertEqual(x.ndim, 2) self.assertEqual(x.size(), [2, 3]) self.assertTrue( np.array_equal(x.sigmoid().numpy(), fluid.layers.sigmoid(x).numpy())) self.assertTrue( np.array_equal(x.logsigmoid().numpy(), fluid.layers.logsigmoid(x).numpy())) self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy())) self.assertTrue( np.array_equal(x.tanh().numpy(), paddle.tanh(x).numpy())) self.assertTrue( np.array_equal(x.atan().numpy(), paddle.atan(x).numpy())) self.assertTrue( np.array_equal(x.tanh_shrink().numpy(), fluid.layers.tanh_shrink(x).numpy())) self.assertTrue(np.array_equal(x.abs().numpy(), paddle.abs(x).numpy())) m = x.abs() self.assertTrue( np.array_equal(m.sqrt().numpy(), paddle.sqrt(m).numpy())) self.assertTrue( np.array_equal(m.rsqrt().numpy(), paddle.rsqrt(m).numpy())) self.assertTrue( np.array_equal(x.ceil().numpy(), paddle.ceil(x).numpy())) self.assertTrue( np.array_equal(x.floor().numpy(), paddle.floor(x).numpy())) self.assertTrue(np.array_equal(x.cos().numpy(), paddle.cos(x).numpy())) self.assertTrue( np.array_equal(x.acos().numpy(), paddle.acos(x).numpy())) self.assertTrue( np.array_equal(x.asin().numpy(), paddle.asin(x).numpy())) self.assertTrue(np.array_equal(x.sin().numpy(), paddle.sin(x).numpy())) self.assertTrue( np.array_equal(x.sinh().numpy(), paddle.sinh(x).numpy())) self.assertTrue( np.array_equal(x.cosh().numpy(), paddle.cosh(x).numpy())) self.assertTrue( np.array_equal(x.round().numpy(), paddle.round(x).numpy())) self.assertTrue( np.array_equal(x.reciprocal().numpy(), paddle.reciprocal(x).numpy())) self.assertTrue( np.array_equal(x.square().numpy(), paddle.square(x).numpy())) self.assertTrue( np.array_equal(x.softplus().numpy(), fluid.layers.softplus(x).numpy())) self.assertTrue( np.array_equal(x.softsign().numpy(), fluid.layers.softsign(x).numpy())) self.assertTrue( np.array_equal(x.rank().numpy(), paddle.rank(x).numpy())) self.assertTrue( np.array_equal(x[0].t().numpy(), paddle.t(x[0]).numpy())) m = paddle.to_tensor(np.random.uniform(1, 2, [3, 3]), 'float32') m = m.matmul(m.t()) self.assertTrue( np.array_equal(m.cholesky().numpy(), paddle.cholesky(m).numpy())) self.assertTrue( np.array_equal(x.is_empty().numpy(), paddle.is_empty(x).numpy())) self.assertTrue( np.array_equal(x.isfinite().numpy(), paddle.isfinite(x).numpy())) self.assertTrue( np.array_equal( x.cast('int32').numpy(), paddle.cast(x, 'int32').numpy())) self.assertTrue( np.array_equal( x.expand([3, 2, 3]).numpy(), paddle.expand(x, [3, 2, 3]).numpy())) self.assertTrue( np.array_equal( x.tile([2, 2]).numpy(), paddle.tile(x, [2, 2]).numpy())) self.assertTrue( np.array_equal(x.flatten().numpy(), paddle.flatten(x).numpy())) index = paddle.to_tensor([0, 1]) self.assertTrue( np.array_equal( x.gather(index).numpy(), paddle.gather(x, index).numpy())) index = paddle.to_tensor([[0, 1], [1, 2]]) self.assertTrue( np.array_equal( x.gather_nd(index).numpy(), paddle.gather_nd(x, index).numpy())) self.assertTrue( np.array_equal( x.reverse([0, 1]).numpy(), paddle.reverse(x, [0, 1]).numpy())) self.assertTrue( np.array_equal( a.reshape([3, 2]).numpy(), paddle.reshape(a, [3, 2]).numpy())) self.assertTrue( np.array_equal( x.slice([0, 1], [0, 0], [1, 2]).numpy(), paddle.slice(x, [0, 1], [0, 0], [1, 2]).numpy())) self.assertTrue( np.array_equal( x.split(2)[0].numpy(), paddle.split(x, 2)[0].numpy())) m = paddle.to_tensor( np.random.uniform(-1, 1, [1, 6, 1, 1]).astype(self.dtype)) self.assertTrue( np.array_equal( m.squeeze([]).numpy(), paddle.squeeze(m, []).numpy())) self.assertTrue( np.array_equal( m.squeeze([1, 2]).numpy(), paddle.squeeze(m, [1, 2]).numpy())) m = paddle.to_tensor([2, 3, 3, 1, 5, 3], 'float32') self.assertTrue( np.array_equal(m.unique()[0].numpy(), paddle.unique(m)[0].numpy())) self.assertTrue( np.array_equal(m.unique_with_counts()[2], paddle.unique_with_counts(m)[2])) self.assertTrue(np.array_equal(x.flip([0]), paddle.flip(x, [0]))) self.assertTrue(np.array_equal(x.unbind(0), paddle.unbind(x, 0))) self.assertTrue(np.array_equal(x.roll(1), paddle.roll(x, 1))) self.assertTrue(np.array_equal(x.cumsum(1), paddle.cumsum(x, 1))) m = paddle.to_tensor(1) self.assertTrue(np.array_equal(m.increment(), paddle.increment(m))) m = x.abs() self.assertTrue(np.array_equal(m.log(), paddle.log(m))) self.assertTrue(np.array_equal(x.pow(2), paddle.pow(x, 2))) self.assertTrue(np.array_equal(x.reciprocal(), paddle.reciprocal(x))) # 2. Binary operation self.assertTrue( np.array_equal( x.matmul(y, True, False).numpy(), paddle.matmul(x, y, True, False).numpy())) self.assertTrue( np.array_equal( x.norm(p='fro', axis=[0, 1]).numpy(), paddle.norm(x, p='fro', axis=[0, 1]).numpy())) self.assertTrue( np.array_equal(x.dist(y).numpy(), paddle.dist(x, y).numpy())) self.assertTrue( np.array_equal(x.cross(y).numpy(), paddle.cross(x, y).numpy())) m = x.expand([2, 2, 3]) n = y.expand([2, 2, 3]).transpose([0, 2, 1]) self.assertTrue( np.array_equal(m.bmm(n).numpy(), paddle.bmm(m, n).numpy())) self.assertTrue( np.array_equal( x.histogram(5, -1, 1).numpy(), paddle.histogram(x, 5, -1, 1).numpy())) self.assertTrue( np.array_equal(x.equal(y).numpy(), paddle.equal(x, y).numpy())) self.assertTrue( np.array_equal( x.greater_equal(y).numpy(), paddle.greater_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.greater_than(y).numpy(), paddle.greater_than(x, y).numpy())) self.assertTrue( np.array_equal( x.less_equal(y).numpy(), paddle.less_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.less_than(y).numpy(), paddle.less_than(x, y).numpy())) self.assertTrue( np.array_equal( x.not_equal(y).numpy(), paddle.not_equal(x, y).numpy())) self.assertTrue( np.array_equal( x.equal_all(y).numpy(), paddle.equal_all(x, y).numpy())) self.assertTrue( np.array_equal( x.allclose(y).numpy(), paddle.allclose(x, y).numpy())) m = x.expand([2, 2, 3]) self.assertTrue( np.array_equal( x.expand_as(m).numpy(), paddle.expand_as(x, m).numpy())) index = paddle.to_tensor([2, 1, 0]) self.assertTrue( np.array_equal( a.scatter(index, b).numpy(), paddle.scatter(a, index, b).numpy())) # 3. Bool tensor operation x = paddle.to_tensor([[True, False], [True, False]]) y = paddle.to_tensor([[False, False], [False, True]]) self.assertTrue( np.array_equal(x.reduce_all().numpy(), paddle.reduce_all(x).numpy())) self.assertTrue( np.array_equal(x.reduce_any().numpy(), paddle.reduce_any(x).numpy())) self.assertTrue( np.array_equal( x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_not(y).numpy(), paddle.logical_not(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_or(y).numpy(), paddle.logical_or(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_xor(y).numpy(), paddle.logical_xor(x, y).numpy())) self.assertTrue( np.array_equal( x.logical_and(y).numpy(), paddle.logical_and(x, y).numpy()))
def demo_net(cfg, imgs_path): encoder = Encoder(cfg) decoder = Decoder(cfg) merger = Merger(cfg) print('[INFO] %s Loading weights from %s ...' % (dt.now(), cfg.CONST.WEIGHTS)) encoder_state_dict = paddle.load( os.path.join(cfg.CONST.WEIGHTS, "encoder.pdparams")) encoder.set_state_dict(encoder_state_dict) decoder_state_dict = paddle.load( os.path.join(cfg.CONST.WEIGHTS, "decoder.pdparams")) decoder.set_state_dict(decoder_state_dict) if cfg.NETWORK.USE_MERGER: merger_state_dict = paddle.load( os.path.join(cfg.CONST.WEIGHTS, "merger.pdparams")) merger.set_state_dict(merger_state_dict) # Switch models to evaluation mode encoder.eval() decoder.eval() merger.eval() rendering_images = [] if os.path.isfile(imgs_path): print("demo img") rendering_image = cv2.imread(imgs_path, cv2.IMREAD_UNCHANGED).astype( np.float32) / 255. rendering_image = np.asarray(rendering_image)[np.newaxis, :, :, :] # print(rendering_image.shape) IMG_SIZE = cfg.CONST.IMG_H, cfg.CONST.IMG_W CROP_SIZE = cfg.CONST.CROP_IMG_H, cfg.CONST.CROP_IMG_W test_transforms = utils.data_transforms.Compose([ utils.data_transforms.CenterCrop(IMG_SIZE, CROP_SIZE), utils.data_transforms.RandomBackground( cfg.TEST.RANDOM_BG_COLOR_RANGE), utils.data_transforms.Normalize(mean=cfg.DATASET.MEAN, std=cfg.DATASET.STD), utils.data_transforms.ToTensor(), ]) rendering_image = test_transforms(rendering_image) # print(rendering_image) rendering_image = paddle.reshape(rendering_image, [1, 1, 3, 224, 224]) with paddle.no_grad(): # Get data from data loader rendering_image = utils.network_utils.var_or_cuda(rendering_image) # Test the encoder, decoder and merger image_features = encoder(rendering_image) raw_features, generated_volume = decoder(image_features) if cfg.NETWORK.USE_MERGER: generated_volume = merger(raw_features, generated_volume) else: generated_volume = paddle.mean(generated_volume, axis=1) for th in cfg.TEST.DEMO_VOXEL_THRESH: _volume = paddle.greater_equal( generated_volume, paddle.to_tensor(th)).astype("float32") _volume = paddle.reshape(_volume, [32, 32, 32]) # print(_volume.shape) # print(_volume) # Append generated volumes to TensorBoard if cfg.DIR.OUT_PATH: # Volume Visualization pred_file_name = os.path.join( cfg.DIR.OUT_PATH, imgs_path.split('/')[-1].split('.')[0] + '.obj') print("save ", pred_file_name) utils.voxel.voxel2obj(pred_file_name, _volume.cpu().numpy()) elif os.path.isdir(imgs_path): print("demo dir") rendering_files_path = os.listdir(imgs_path) for rendering_file_path in rendering_files_path: if '.png' not in rendering_file_path: continue print(os.path.join(imgs_path, rendering_file_path)) rendering_image = cv2.imread( os.path.join(imgs_path, rendering_file_path), cv2.IMREAD_UNCHANGED).astype(np.float32) / 255. rendering_image = np.asarray(rendering_image)[np.newaxis, :, :, :] # print(rendering_image.shape) IMG_SIZE = cfg.CONST.IMG_H, cfg.CONST.IMG_W CROP_SIZE = cfg.CONST.CROP_IMG_H, cfg.CONST.CROP_IMG_W test_transforms = utils.data_transforms.Compose([ utils.data_transforms.CenterCrop(IMG_SIZE, CROP_SIZE), utils.data_transforms.RandomBackground( cfg.TEST.RANDOM_BG_COLOR_RANGE), utils.data_transforms.Normalize(mean=cfg.DATASET.MEAN, std=cfg.DATASET.STD), utils.data_transforms.ToTensor(), ]) rendering_image = test_transforms(rendering_image) # print(rendering_image) rendering_image = paddle.reshape(rendering_image, [1, 1, 3, 224, 224]) with paddle.no_grad(): # Get data from data loader rendering_image = utils.network_utils.var_or_cuda( rendering_image) # Test the encoder, decoder and merger image_features = encoder(rendering_image) raw_features, generated_volume = decoder(image_features) if cfg.NETWORK.USE_MERGER: generated_volume = merger(raw_features, generated_volume) else: generated_volume = paddle.mean(generated_volume, axis=1) # for th in cfg.TEST.VOXEL_THRESH: # _volume = paddle.greater_equal(generated_volume, paddle.to_tensor(th)).astype("float32") # print(_volume.shape) # Append generated volumes to TensorBoard if cfg.DIR.OUT_PATH: # Volume Visualization gv = generated_volume.detach().cpu().numpy() pred_file_name = os.path.join( cfg.DIR.OUT_PATH, imgs_path, rendering_file_path.split('.')[0] + '.obj') utils.voxel.voxel2obj( pred_file_name, gv[0, 1] > cfg.TEST.DEMO_VOXEL_THRESH) else: raise Exception("error input path")
def forward(self, inputs, inputs_): """ forward """ x = paddle.greater_equal(inputs, inputs_) return x