def ele_one_hot(input, class_num, height, width): input_rs = pd.resize(input=input, size=1) one_hot = pd.one_hot(input=input_rs, class_num=class_num) one_hot = pd.resize(input=one_hot, size=height * width * class_num) one_hot = pd.transpose(input=one_hot, trans_order=[2, 0, 1], height=width, width=class_num, channels=height) return one_hot
def ns_ele_l2_cost(input, label, weight, height, width, num_channel=None, interp='nearest'): assert interp in image_resize_func.keys() # make sure all the input label and weight have the same size input = pd.bilinear_interp(input=input, out_size_x=width, out_size_y=height) label = image_resize_func[interp](input=label, out_size_x=width, out_size_y=height) weight = image_resize_func[interp](input=weight, out_size_x=width, out_size_y=height) # reshape the orignal layer # input has shape c x h x w change to h x w x c input_ts = pd.transpose(input=input, trans_order=[1, 2, 0], height=height, width=width) input_rs = pd.resize(input=input_ts, size=num_channel, height=1, width=1) label_ts = pd.transpose(input=label, trans_order=[1, 2, 0], height=height, width=width) label_rs = pd.resize(input=label_ts, size=num_channel, height=1, width=1) weight_rs = pd.resize(input=weight, size=1, height=1, width=1) cost_rs = pd.mse_cost(input=input_rs, label=label_rs) sqrt_l2_cost = util_layers.math_op(input=cost_rs, act=pd.activation.Sqrt()) sqrt_l2_cost = pd.mixed( size=1, input=[pd.dotmul_operator(a=sqrt_l2_cost, b=weight_rs, scale=1.0)]) sqrt_l2_cost = pd.resize(input=sqrt_l2_cost, size=height * width, height=height, width=width) weight_fac = pd.sum_cost(input=weight) weight_fac = util_layers.math_op(input=weight_fac, act=pd.activation.Inv()) sqrt_l2_cost = pd.scaling(input=sqrt_l2_cost, weight=weight_fac) cost = pd.sum_cost(input=sqrt_l2_cost) return cost
def iou_score(input, label, weight, height, width, class_num, is_cost=True): """ class num is semantic classes plus background, this score can also serve as iou cost for training """ # input = pd.resize(input=input, size=height * width) # label = pd.resize(input=label, size=height * width) weight = pd.nearest_interp(input=weight, out_size_x=width, out_size_y=height) if not is_cost: # if not is cost, then it is eval, we can do # one hot for label. Otherwise input = util_layers.math_op(input=[input, weight], op='dot') input_one_hot = util_layers.ele_one_hot(input, class_num, height, width) else: input_one_hot = input input_one_hot = pd.bilinear_interp(input=input_one_hot, out_size_x=width, out_size_y=height) label = pd.nearest_interp(input=label, out_size_x=width, out_size_y=height) label = util_layers.math_op(input=[label, weight], op='dot') label_one_hot = util_layers.ele_one_hot(label, class_num, height, width) inter = util_layers.math_op(input=[input_one_hot, label_one_hot], op='dot') union = pd.addto(input=[input_one_hot, label_one_hot], act=pd.activation.Linear(), bias_attr=False) inter_neg = pd.slope_intercept(input=inter, slope=-1) union = pd.addto(input=[union, inter_neg], act=pd.activation.Linear(), bias_attr=False) inter = pd.resize(input=inter, size=height * width) inter = pd.sum_cost(input=inter) union = pd.resize(input=union, size=height * width) union = pd.sum_cost(input=union) union_inv = util_layers.math_op(input=union, act=pd.activation.Inv()) iou = pd.mixed(size=1, input=[pd.dotmul_operator(a=inter, b=union_inv, scale=1.0)]) iou = pd.resize(input=iou, size=class_num) if is_cost: iou = pd.sum_cost(iou) return iou
def reduce(input, shape, op, axis=1): """reduce with op in axis dimension shape: [channel, height, width] """ if op == 'sum': if axis == 1: input= pd.transpose(input=input, trans_order=[1, 2, 0], height=shape[1], width=shape[2]) input = pd.resize(input=input, size=shape[0], height=1, width=1) input = pd.sum_cost(input=input) input = pd.resize(input=input, size=shape[1] * shape[2], height=shape[1], width=shape[2]) return input
def inner_product_cost(input, label, weight, height, width, num_channel, interp='nearest', is_angle=False): """If is_angle, we can not back propagate through the angle, only back through the inner product, the loss is not consistent with the evaluation. """ # make sure all the input label and weight have the same size if height > 1 and width > 1: input = pd.bilinear_interp(input=input, out_size_x=width, out_size_y=height) label = pd.bilinear_interp(input=label, out_size_x=width, out_size_y=height) if weight: weight = image_resize_func[interp](input=weight, out_size_x=width, out_size_y=height) size = height * width * num_channel input = util_layers.norm(input, height, width, num_channel, trans_back=False) label = util_layers.norm(label, height, width, num_channel, trans_back=False) inner = pd.mixed(size=size, input=[pd.dotmul_operator(a=input, b=label, scale=1.0)]) inner = pd.resize(input=pd.sum_cost(input=inner), size=height * width, height=height, width=width) if is_angle: inner = util_layers.math_op(input=inner, act=pd.activation.Acos()) else: inner = pd.slope_intercept(input=inner, slope=-1, intercept=1.0) if weight: inner_error = sum_weighted_loss(inner, weight, size=height * width) else: fac = 1.0 / float(height * width) inner = pd.slope_intercept(input=inner, slope=fac, intercept=0.0) inner_error = pd.sum_cost(input=inner) return inner_error
def norm(input, height, width, channel, type='l2', trans_back=True): """Channel wise normalize each layer """ size = height * width * channel if height > 1 or width > 1: input= pd.transpose(input=input, trans_order=[1, 2, 0], height=height, width=width) input = pd.resize(input=input, size=channel) if type == 'l2': norm = pd.mixed(size=size, input=[pd.dotmul_operator(a=input, b=input, scale=1.0)]) norm = pd.sum_cost(input=norm) norm = math_op(norm, pd.activation.Sqrt()) if type == 'l1': norm = math_op(input, pd.activation.Abs()) norm = pd.sum_cost(input=norm) norm_inv = math_op(norm, pd.activation.Inv()) norm_inv = pd.repeat(input=norm_inv, num_repeats=3) input = math_op(input=[input, norm_inv], act=None, op='dot', size=size) if trans_back: input = pd.resize(input=input, size=size) input = pd.transpose(input=input, trans_order=[2, 0, 1], height=width, width=channel, channels=height) return input