def gen_cost(outputs, gts, loss, params): cost = [] stage = params['stage'] if 'depth' in loss: cost_depth = cost_layers.ele_norm_cost(input=outputs['depth_inv'], label=gts['depth_gt'], weight=gts['weight'], height=params['size_stage'][1][0], width=params['size_stage'][1][1], num_channel=1, cost_type='l1') cost.append(cost_depth) if 'normal' in loss: height, width = params['size_stage'][1] normal = util_layers.norm(outputs['normal'], height, width, 3) label = paddle.layer.bilinear_interp(input=gts['normal_gt'], out_size_x=width, out_size_y=height) label = util_layers.norm(label, height, width, 3) cost_normal = cost_layers.ns_ele_l2_cost(input=normal, label=label, weight=gts['weight'], height=params['size_stage'][1][0], width=params['size_stage'][1][1], num_channel=3) # cost_normal = cost_layers.inner_product_cost(input=outputs['normal'], # label=gts['normal'], # weight=gts['weight'], # height=params['size_stage'][1][0], # width=params['size_stage'][1][1], # num_channel=3) cost.append(cost_normal) if 'trans' in loss: cost_rotation = cost_layers.inner_product_cost( input=outputs['rotation'], label=gts['rotation'], weight=None, height=1, width=1, num_channel=3) cost_translation = cost_layers.ele_norm_cost( input=outputs['translation'], label=gts['translation'], weight=None, height=1, width=1, num_channel=3, cost_type='l1') cost.append(cost_rotation) cost.append(cost_translation) return cost
def gen_cost(outputs, gts, params, cost_name=['depth_l1', 'depth_gradient'], weights=[1.0, 0.4], is_inverse=False): suffix = '' if is_inverse: suffix = '_inv' in_depth_name = 'depth' + suffix gt_depth_name = 'depth_gt' + suffix # depth loss cost = [] if 'depth_l1' in cost_name: cost_depth = cost_layers.ele_norm_cost(input=outputs[in_depth_name], label=gts[gt_depth_name], weight=gts['weight'], height=params['size'][0], width=params['size'][1], num_channel=1, cost_type='l1') cost_depth = util_layers.mul(cost_depth, weights[0]) cost.append(cost_depth) if 'rel_l1' in cost_name: cost_depth = cost_layers.relative_l1(input=outputs[in_depth_name], label=gts[gt_depth_name], weight=gts['weight'], height=params['size'][0], width=params['size'][1]) cost_depth = util_layers.mul(cost_depth, weights[0]) cost.append(cost_depth) if 'depth_gradient' in cost_name: cost_depth_gradient = cost_layers.gradient_cost( input=outputs[in_depth_name], label=gts[gt_depth_name], weight=gts['weight'], height=params['size'][0], width=params['size'][1], num_channel=1, scales=[1, 2, 4, 8]) cost_depth_gradient = util_layers.mul(cost_depth_gradient, weights[1]) cost.append(cost_depth_gradient) return cost
def test_ele_norm_cost(argv): flow_np = np.array([1, 2, 2, 2, 1, 2, 4, 4, 4, 2, 2, 4, 4, 4, 2, 2], dtype=np.float32) flow_gt_np = flow_np + 2 height = 4 width = 4 channel = 1 weight_np = np.ones((height, width), dtype=np.float32).flatten() flow = pd.layer.data(name="flow", type=pd.data_type.dense_vector(height * width), height=height, width=width) flow_gt = pd.layer.data(name="flow_gt", type=pd.data_type.dense_vector(height * width), height=height, width=width) weight = pd.layer.data(name="weight", type=pd.data_type.dense_vector(height * width), height=height, width=width) # cost = cost_layers.math_op(input=flow, act=pd.activation.Sqrt()) cost = cost_layers.ele_norm_cost(input=flow, label=flow_gt, weight=weight, height=height, width=width, num_channel=channel, cost_type='l2') parameters, topo = pd.parameters.create(cost) cost_np = pd.infer(output=topo, parameters=parameters, input=[(flow_np, flow_gt_np, weight_np)], feeding={ 'flow': 0, 'flow_gt': 1, 'weight': 2 }) print cost_np