def _beta(self, h, u_expr, h_expr): with layer.mixed(bias_attr=False) as dot_h_u_expr: dot_h_u_expr += layer.dotmul_operator(a=h, b=u_expr) with layer.mixed(bias_attr=False) as dot_h_h_expr: dot_h_h_expr += layer.dotmul_operator(a=h, b=h_expr) cat_all = layer.concat(input=[h, u_expr, dot_h_u_expr, dot_h_h_expr]) return cat_all
def test_operator(self): ipt0 = layer.data(name='data', type=data_type.dense_vector(784)) ipt1 = layer.data(name='word', type=data_type.dense_vector(128)) fc0 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid()) fc1 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid()) dotmul_op = layer.dotmul_operator(a=fc0, b=fc1) dotmul0 = layer.mixed(input=dotmul_op) with layer.mixed() as dotmul1: dotmul1 += dotmul_op conv = layer.conv_operator(img=ipt0, filter=ipt1, filter_size=1, num_channels=1, num_filters=128, stride=1, padding=0) conv0 = layer.mixed(input=conv) with layer.mixed() as conv1: conv1 += conv print layer.parse_network(dotmul0) print layer.parse_network(dotmul1) print layer.parse_network(conv0) print layer.parse_network(conv1)
def test_operator(self): ipt0 = layer.data(name='data1', type=data_type.dense_vector(784)) ipt1 = layer.data(name='word1', type=data_type.dense_vector(128)) fc0 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid()) fc1 = layer.fc(input=ipt0, size=100, act=activation.Sigmoid()) dotmul_op = layer.dotmul_operator(a=fc0, b=fc1) dotmul0 = layer.mixed(input=dotmul_op) with layer.mixed() as dotmul1: dotmul1 += dotmul_op conv = layer.conv_operator( img=ipt0, filter=ipt1, filter_size=1, num_channels=1, num_filters=128, stride=1, padding=0) conv0 = layer.mixed(input=conv) with layer.mixed() as conv1: conv1 += conv print layer.parse_network(dotmul0) print layer.parse_network(dotmul1) print layer.parse_network(conv0) print layer.parse_network(conv1)
def inner_product_cost(input, label, weight, height, width, num_channel, interp='nearest', is_angle=False): """If is_angle, we can not back propagate through the angle, only back through the inner product, the loss is not consistent with the evaluation. """ # make sure all the input label and weight have the same size if height > 1 and width > 1: input = pd.bilinear_interp(input=input, out_size_x=width, out_size_y=height) label = pd.bilinear_interp(input=label, out_size_x=width, out_size_y=height) if weight: weight = image_resize_func[interp](input=weight, out_size_x=width, out_size_y=height) size = height * width * num_channel input = util_layers.norm(input, height, width, num_channel, trans_back=False) label = util_layers.norm(label, height, width, num_channel, trans_back=False) inner = pd.mixed(size=size, input=[pd.dotmul_operator(a=input, b=label, scale=1.0)]) inner = pd.resize(input=pd.sum_cost(input=inner), size=height * width, height=height, width=width) if is_angle: inner = util_layers.math_op(input=inner, act=pd.activation.Acos()) else: inner = pd.slope_intercept(input=inner, slope=-1, intercept=1.0) if weight: inner_error = sum_weighted_loss(inner, weight, size=height * width) else: fac = 1.0 / float(height * width) inner = pd.slope_intercept(input=inner, slope=fac, intercept=0.0) inner_error = pd.sum_cost(input=inner) return inner_error
def _step_basic(self, h_cur, u): expanded_h = layer.expand(input=h_cur, expand_as=u) hu = layer.concat(input=[expanded_h, u]) with layer.mixed(bias_attr=False) as dot_hu: dot_hu += layer.dotmul_operator(a=expanded_h, b=u) cat_all = layer.concat(input=[hu, dot_hu]) s = layer.fc(size=1, bias_attr=False, param_attr=Attr.Param(self.name + '.ws'), input=cat_all) return s
def ele_norm_cost(input, label, weight, height=None, width=None, num_channel=None, cost_type='l1'): if height > 1 and width > 1: input = pd.bilinear_interp(input=input, out_size_x=width, out_size_y=height) label = pd.bilinear_interp(input=label, out_size_x=width, out_size_y=height) if weight: weight = pd.nearest_interp(input=weight, out_size_x=width, out_size_y=height) size = height * width * num_channel if weight: input = pd.mixed( size=size, input=[pd.dotmul_operator(a=input, b=weight, scale=1.0)]) label = pd.mixed( size=size, input=[pd.dotmul_operator(a=label, b=weight, scale=1.0)]) cost = cost_func[cost_type](input=input, label=label) fac = pd.sum_cost(input=weight) fac = util_layers.math_op(input=fac, act=pd.activation.Inv()) cost = pd.scaling(input=cost, weight=fac) cost = pd.sum_cost(input=cost) else: cost = cost_func[cost_type](input=input, label=label) fac = 1.0 / float(height * width) cost = pd.slope_intercept(input=cost, slope=fac, intercept=0.0) cost = pd.sum_cost(input=cost) return cost
def sum_weighted_loss(loss, weight, size=1): """Loss has input batch_size x image_size, weight has input batch_size x weight ( i * w ) / sum(W) The output is normalized weighted loss """ weighted_loss = pd.mixed( size=size, input=[pd.dotmul_operator(a=loss, b=weight, scale=1.0)]) weight_fac = pd.sum_cost(input=weight) weight_fac = util_layers.math_op(input=weight_fac, act=pd.activation.Inv()) weighted_loss = pd.scaling(input=loss, weight=weight_fac) weighted_loss = pd.sum_cost(input=weighted_loss) return weighted_loss
def ns_ele_l2_cost(input, label, weight, height, width, num_channel=None, interp='nearest'): assert interp in image_resize_func.keys() # make sure all the input label and weight have the same size input = pd.bilinear_interp(input=input, out_size_x=width, out_size_y=height) label = image_resize_func[interp](input=label, out_size_x=width, out_size_y=height) weight = image_resize_func[interp](input=weight, out_size_x=width, out_size_y=height) # reshape the orignal layer # input has shape c x h x w change to h x w x c input_ts = pd.transpose(input=input, trans_order=[1, 2, 0], height=height, width=width) input_rs = pd.resize(input=input_ts, size=num_channel, height=1, width=1) label_ts = pd.transpose(input=label, trans_order=[1, 2, 0], height=height, width=width) label_rs = pd.resize(input=label_ts, size=num_channel, height=1, width=1) weight_rs = pd.resize(input=weight, size=1, height=1, width=1) cost_rs = pd.mse_cost(input=input_rs, label=label_rs) sqrt_l2_cost = util_layers.math_op(input=cost_rs, act=pd.activation.Sqrt()) sqrt_l2_cost = pd.mixed( size=1, input=[pd.dotmul_operator(a=sqrt_l2_cost, b=weight_rs, scale=1.0)]) sqrt_l2_cost = pd.resize(input=sqrt_l2_cost, size=height * width, height=height, width=width) weight_fac = pd.sum_cost(input=weight) weight_fac = util_layers.math_op(input=weight_fac, act=pd.activation.Inv()) sqrt_l2_cost = pd.scaling(input=sqrt_l2_cost, weight=weight_fac) cost = pd.sum_cost(input=sqrt_l2_cost) return cost
def iou_score(input, label, weight, height, width, class_num, is_cost=True): """ class num is semantic classes plus background, this score can also serve as iou cost for training """ # input = pd.resize(input=input, size=height * width) # label = pd.resize(input=label, size=height * width) weight = pd.nearest_interp(input=weight, out_size_x=width, out_size_y=height) if not is_cost: # if not is cost, then it is eval, we can do # one hot for label. Otherwise input = util_layers.math_op(input=[input, weight], op='dot') input_one_hot = util_layers.ele_one_hot(input, class_num, height, width) else: input_one_hot = input input_one_hot = pd.bilinear_interp(input=input_one_hot, out_size_x=width, out_size_y=height) label = pd.nearest_interp(input=label, out_size_x=width, out_size_y=height) label = util_layers.math_op(input=[label, weight], op='dot') label_one_hot = util_layers.ele_one_hot(label, class_num, height, width) inter = util_layers.math_op(input=[input_one_hot, label_one_hot], op='dot') union = pd.addto(input=[input_one_hot, label_one_hot], act=pd.activation.Linear(), bias_attr=False) inter_neg = pd.slope_intercept(input=inter, slope=-1) union = pd.addto(input=[union, inter_neg], act=pd.activation.Linear(), bias_attr=False) inter = pd.resize(input=inter, size=height * width) inter = pd.sum_cost(input=inter) union = pd.resize(input=union, size=height * width) union = pd.sum_cost(input=union) union_inv = util_layers.math_op(input=union, act=pd.activation.Inv()) iou = pd.mixed(size=1, input=[pd.dotmul_operator(a=inter, b=union_inv, scale=1.0)]) iou = pd.resize(input=iou, size=class_num) if is_cost: iou = pd.sum_cost(iou) return iou
def fusion_layer(self, input1, input2): """ Combine input1 and input2 by concat(input1 .* input2, input1 - input2, input1, input2) """ # fusion layer neg_input2 = layer.slope_intercept(input=input2, slope=-1.0, intercept=0.0) diff1 = layer.addto(input=[input1, neg_input2], act=Act.Identity(), bias_attr=False) diff2 = layer.mixed(bias_attr=False, input=layer.dotmul_operator(a=input1, b=input2)) fused = layer.concat(input=[input1, input2, diff1, diff2]) return fused
def relative_l1(input, label, weight, height, width, interp='nearest', is_inverse=False): """Relative l1 loss for depth """ assert interp in image_resize_func.keys() # make sure all the input label and weight have the same size if height > 1 and width > 1: input = pd.bilinear_interp(input=input, out_size_x=width, out_size_y=height) label = pd.bilinear_interp(input=label, out_size_x=width, out_size_y=height) if weight: weight = image_resize_func[interp](input=weight, out_size_x=width, out_size_y=height) label_inv = util_layers.math_op(input=label, act=pd.activation.Inv()) label_neg = pd.slope_intercept(input=label, slope=-1) diff = pd.addto(input=[input, label_neg], act=pd.activation.Abs(), bias_attr=False) rel_error = pd.mixed( size=1, input=[pd.dotmul_operator(a=diff, b=label_inv, scale=1.0)]) if weight: rel_error = sum_weighted_loss(rel_error, weight, size=height * width) else: fac = 1.0 / float(height * width) inner = pd.slope_intercept(input=inner, slope=fac, intercept=0.0) inner_error = pd.sum_cost(input=inner) return rel_error
def math_op(input, act=pd.activation.Linear(), op='dot', size=0): if not isinstance(input, list): input = [input] if len(input) == 1: # unary operation result = pd.mixed( input=[pd.identity_projection(input=input[0])], act=act) elif len(input) == 2: # binary operation if op == 'dot': result = pd.mixed(size=size, input=pd.dotmul_operator( a=input[0], b=input[1], scale=1.0), act=act) else: raise ValueError('not supporting math op with more than two\ input') return result
def norm(input, height, width, channel, type='l2', trans_back=True): """Channel wise normalize each layer """ size = height * width * channel if height > 1 or width > 1: input= pd.transpose(input=input, trans_order=[1, 2, 0], height=height, width=width) input = pd.resize(input=input, size=channel) if type == 'l2': norm = pd.mixed(size=size, input=[pd.dotmul_operator(a=input, b=input, scale=1.0)]) norm = pd.sum_cost(input=norm) norm = math_op(norm, pd.activation.Sqrt()) if type == 'l1': norm = math_op(input, pd.activation.Abs()) norm = pd.sum_cost(input=norm) norm_inv = math_op(norm, pd.activation.Inv()) norm_inv = pd.repeat(input=norm_inv, num_repeats=3) input = math_op(input=[input, norm_inv], act=None, op='dot', size=size) if trans_back: input = pd.resize(input=input, size=size) input = pd.transpose(input=input, trans_order=[2, 0, 1], height=width, width=channel, channels=height) return input