def _topk(self, scores): k = self.max_per_img shape_fm = paddle.shape(scores) shape_fm.stop_gradient = True cat, height, width = shape_fm[1], shape_fm[2], shape_fm[3] # batch size is 1 scores_r = paddle.reshape(scores, [cat, -1]) topk_scores, topk_inds = paddle.topk(scores_r, k) topk_scores, topk_inds = paddle.topk(scores_r, k) topk_ys = topk_inds // width topk_xs = topk_inds % width topk_score_r = paddle.reshape(topk_scores, [-1]) topk_score, topk_ind = paddle.topk(topk_score_r, k) k_t = paddle.full(paddle.shape(topk_ind), k, dtype='int64') topk_clses = paddle.cast(paddle.floor_divide(topk_ind, k_t), 'float32') topk_inds = paddle.reshape(topk_inds, [-1]) topk_ys = paddle.reshape(topk_ys, [-1, 1]) topk_xs = paddle.reshape(topk_xs, [-1, 1]) topk_inds = paddle.gather(topk_inds, topk_ind) topk_ys = paddle.gather(topk_ys, topk_ind) topk_xs = paddle.gather(topk_xs, topk_ind) return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
def test_dygraph(self): with fluid.dygraph.guard(): np_x = np.array([2, 3, 8, 7]).astype('int64') np_y = np.array([1, 5, 3, 3]).astype('int64') x = paddle.to_tensor(np_x) y = paddle.to_tensor(np_y) z = paddle.floor_divide(x, y) np_z = z.numpy() z_expected = np.array([2, 0, 2, 2]) self.assertEqual((np_z == z_expected).all(), True) with fluid.dygraph.guard(fluid.CPUPlace()): # divide by zero np_x = np.array([2, 3, 4]) np_y = np.array([0]) x = paddle.to_tensor(np_x) y = paddle.to_tensor(np_y) try: z = x // y except Exception as e: print("Error: Divide by zero encounter in floor_divide\n") # divide by zero np_x = np.array([2]) np_y = np.array([0, 0, 0]) x = paddle.to_tensor(np_x, dtype="int32") y = paddle.to_tensor(np_y, dtype="int32") try: z = x // y except Exception as e: print("Error: Divide by zero encounter in floor_divide\n")
def test_name(self): with fluid.program_guard(fluid.Program()): x = fluid.data(name="x", shape=[2, 3], dtype="int64") y = fluid.data(name='y', shape=[2, 3], dtype='int64') y_1 = paddle.floor_divide(x, y, name='div_res') self.assertEqual(('div_res' in y_1.name), True)
def forward(self, inputs, inputs_): """ forward """ x = paddle.floor_divide(inputs, inputs_) return x
def check_static_result(self, place): # rule 1 with fluid.program_guard(fluid.Program(), fluid.Program()): x = fluid.data(name="x", shape=[3], dtype="float64") y = np.array([1, 2, 3]) self.assertRaises(TypeError, paddle.floor_divide, x=x, y=y) # rule 2: both the inputs are not Tensor with fluid.program_guard(fluid.Program(), fluid.Program()): x = 2 y = 4 res = paddle.floor_divide(x, y) exe = fluid.Executor(place) np_z = exe.run(fluid.default_main_program(), feed={}, fetch_list=[res]) self.assertEqual(np_z[0] == 0., True) # rule 3: with fluid.program_guard(fluid.Program(), fluid.Program()): x = fluid.data(name="x", shape=[3], dtype="float64") y = fluid.data(name="y", shape=[3], dtype="float32") self.assertRaises(TypeError, paddle.floor_divide, x=x, y=y) # rule 4: x is Tensor, y is scalar with fluid.program_guard(fluid.Program(), fluid.Program()): x = fluid.data(name="x", shape=[3], dtype="float64") y = 2 exe = fluid.Executor(place) res = x // y np_z = exe.run(fluid.default_main_program(), feed={"x": np.array([2, 3, 4]).astype('float64')}, fetch_list=[res]) z_expected = np.array([1., 1., 2.]) self.assertEqual((np_z[0] == z_expected).all(), True) # rule 5: y is Tensor, x is scalar with fluid.program_guard(fluid.Program(), fluid.Program()): x = fluid.data(name="x", shape=[3], dtype="float64") y = 2 exe = fluid.Executor(place) res = y // x np_z = exe.run(fluid.default_main_program(), feed={"x": np.array([2, 8, 4]).astype('float64')}, fetch_list=[res]) z_expected = np.array([1., 0., 0.]) self.assertEqual((np_z[0] == z_expected).all(), True) # rule 6: y is Tensor, x is Tensor with fluid.program_guard(fluid.Program(), fluid.Program()): x = fluid.data(name="x", shape=[3], dtype="float64") y = fluid.data(name="y", shape=[3], dtype="float64") exe = fluid.Executor(place) res = x // y np_z = exe.run(fluid.default_main_program(), feed={ "x": np.array([2, 3, 4]).astype('float64'), "y": np.array([1, 5, 2]).astype('float64') }, fetch_list=[res]) z_expected = np.array([2., 0., 2.]) self.assertEqual((np_z[0] == z_expected).all(), True)
def test_dygraph(self): for place in self.places: with fluid.dygraph.guard(place): # rule 1 : avoid numpy.ndarray np_x = np.array([2, 3, 4]) np_y = np.array([1, 5, 2]) x = paddle.to_tensor(np_x) self.assertRaises(TypeError, paddle.floor_divide, x=x, y=np_y) # rule 2: both the inputs are not Tensor z = paddle.floor_divide(3, 2) self.assertEqual(z.numpy()[0] == 1., True) # rule 3: both the inputs are Tensor np_x = np.array([2, 3, 4]) np_y = np.array([1, 5, 2]) x = paddle.to_tensor(np_x, dtype="float32") y = paddle.to_tensor(np_y, dtype="float64") self.assertRaises(TypeError, paddle.floor_divide, x=x, y=y) # rule 4: x is Tensor, y is scalar np_x = np.array([2, 3, 4]) x = paddle.to_tensor(np_x, dtype="int32") y = 2 z = x // y z_expected = np.array([1, 1, 2]) self.assertEqual((z_expected == z.numpy()).all(), True) # rule 5: y is Tensor, x is scalar np_x = np.array([2, 1, 4]) x = paddle.to_tensor(np_x, dtype="int32") y = 2 z = y // x z_expected = np.array([1, 2, 0]) self.assertEqual((z_expected == z.numpy()).all(), True) # rule 6: y is Tensor, x is Tensor np_x = np.array([2, 3, 4]) np_y = np.array([1, 5, 2]) x = paddle.to_tensor(np_x) y = paddle.to_tensor(np_y) z = x // y z_expected = np.array([2., 0., 2.]) self.assertEqual((z_expected == z.numpy()).all(), True) with fluid.dygraph.guard(fluid.CPUPlace()): # divide by zero np_x = np.array([2, 3, 4]) np_y = np.array([0]) x = paddle.to_tensor(np_x) y = paddle.to_tensor(np_y) try: z = x // y except Exception as e: print("Error: Divide by zero encounter in floor_divide\n") # divide by zero np_x = np.array([2]) np_y = np.array([0, 0, 0]) x = paddle.to_tensor(np_x, dtype="int32") y = paddle.to_tensor(np_y, dtype="int32") try: z = x // y except Exception as e: print("Error: Divide by zero encounter in floor_divide\n")