def gradient_x(bottom):
    dummy_data = L.DummyData(dummy_data_param=dict(
        shape=[dict(dim=[1, 1, 100, 99])]))
    crop_1 = L.Crop(bottom, dummy_data, crop_param=dict(offset=[0, 1]))
    crop_2 = L.Crop(bottom, dummy_data, crop_param=dict(offset=[0, 0]))
    diff = L.Eltwise(crop_1,
                     crop_2,
                     eltwise_param=dict(operation=P.Eltwise.SUM,
                                        coeff=[1.0, -1.0]))
    gradient_x = L.AbsVal(diff)
    return gradient_x
def ResNet_block(split, bottom, nout, ks, stride, projection_stride, pad):
    if projection_stride == 1: #1 代表不需要 1X1 的映射
        scale0 = bottom
    else: #否则经过 1X1,stride=2 的映射
        scale0, relu0 = conv_BN_scale_relu(split, bottom, nout, 1, projection_stride, 0)
        
    scale1, relu1 = conv_BN_scale_relu(split, bottom, nout, ks, projection_stride, pad)
    scale2, relu2 = conv_BN_scale_relu(split, relu1, nout, ks, stride, pad)
    wise = L.Eltwise(scale2, scale0, operation = P.Eltwise.SUM) #数据相加
    wise_relu = L.ReLU(wise, in_place = True)
    return wise_relu
Esempio n. 3
0
def make_net_test(desired_level=2):
    net = caffe.NetSpec()
    net.img0_nomean_resize= L.ImageData(image_data_param=dict(source="tmp/img1.txt",batch_size=1))
    net.img1_nomean_resize= L.ImageData(image_data_param=dict(source="tmp/img2.txt",batch_size=1))
    
    predict_flow_name = 'predict_flow{}'.format(desired_level)

    net = make_pwc_net_encoder_plus(net, net.img0_nomean_resize, net.img1_nomean_resize) 
    net.blob44 = L.Eltwise(getattr(net, predict_flow_name), eltwise_param=dict(operation=1, coeff=20.0))  

    return net.to_proto()
Esempio n. 4
0
def fractal_block(bottom, base_output=64):
    conv1a, bn1a, scale1a = conv_bn_scale(bottom, num_output=base_output * 4)
    bn1b0, scale1b0, relu1b0, conv1b1, bn1b1, scale1b1, relu1b1, conv1b2, bn1b2, scale1b2, relu1b2, conv1b3 = \
        branch(bottom, num_output=base_output)
    eltwise1 = L.Eltwise(conv1a, conv1b3, eltwise_param=dict(operation=1))

    conv2a, bn2a, scale2a = conv_bn_scale(eltwise1, num_output=base_output * 4)
    bn2b0, scale2b0, relu2b0, conv2b1, bn2b1, scale2b1, relu2b1, conv2b2, bn2b2, scale2b2, relu2b2, conv2b3 = \
        branch(eltwise1, num_output=base_output)
    conv12a, bn12a, scale12a = conv_bn_scale(bottom,
                                             num_output=base_output * 4)
    eltwise2 = L.Eltwise(conv2a,
                         conv2b3,
                         conv12a,
                         eltwise_param=dict(operation=1))

    conv3a, bn3a, scale3a = conv_bn_scale(eltwise2, num_output=base_output * 4)
    bn3b0, scale3b0, relu3b0, conv3b1, bn3b1, scale3b1, relu3b1, conv3b2, bn3b2, scale3b2, relu3b2, conv3b3 = \
        branch(eltwise2, num_output=base_output)
    eltwise3 = L.Eltwise(conv3a, conv3b3, eltwise_param=dict(operation=1))

    conv4a, bn4a, scale4a = conv_bn_scale(eltwise3, num_output=base_output * 4)
    bn4b0, scale4b0, relu4b0, conv4b1, bn4b1, scale4b1, relu4b1, conv4b2, bn4b2, scale4b2, relu4b2, conv4b3 = \
        branch(eltwise3, num_output=base_output)
    conv34a, bn34a, scale34a = conv_bn_scale(eltwise2,
                                             num_output=base_output * 4)
    conv1234a, bn1234a, scale1234a = conv_bn_scale(bottom,
                                                   num_output=base_output * 4)
    eltwise4 = L.Eltwise(conv4a,
                         conv4b3,
                         conv34a,
                         conv1234a,
                         eltwise_param=dict(operation=1))

    return conv1a, bn1a, scale1a, bn1b0, scale1b0, relu1b0, conv1b1, bn1b1, scale1b1, relu1b1, conv1b2, bn1b2, \
           scale1b2, relu1b2, conv1b3, eltwise1, conv2a, bn2a, scale2a, bn2b0, scale2b0, relu2b0, conv2b1, bn2b1, \
           scale2b1, relu2b1, conv2b2, bn2b2, scale2b2, relu2b2, conv2b3, conv12a, bn12a, scale12a, eltwise2, \
           conv3a, bn3a, scale3a, bn3b0, scale3b0, relu3b0, conv3b1, bn3b1, scale3b1, relu3b1, conv3b2, bn3b2, \
           scale3b2, relu3b2, conv3b3, eltwise3, conv4a, bn4a, scale4a, bn4b0, scale4b0, relu4b0, conv4b1, bn4b1, \
           scale4b1, relu4b1, conv4b2, bn4b2, scale4b2, relu4b2, conv4b3, conv34a, bn34a, scale34a, conv1234a, \
           bn1234a, scale1234a, eltwise4
def l1_loss(bottom1, bottom2, l_weight):

    diff = L.Eltwise(bottom1,
                     bottom2,
                     eltwise_param=dict(operation=P.Eltwise.SUM, coeff=[1,
                                                                        -1]))
    absval = L.AbsVal(diff)
    loss = L.Reduction(absval,
                       reduction_param=dict(operation=P.Reduction.SUM),
                       loss_weight=l_weight)

    return loss
Esempio n. 6
0
def ResDown(data,cout):    
    # left_=SingleConv(data,cout,kernel_size=3,stride=2,padding=1)
    right_branch1=SingleConv(data,cout,kernel_size=3,stride=2,padding=1)
    right_branch2=SingleConv(data,cout,kernel_size=5,stride=2,padding=2)
    
    right=[right_branch1,right_branch2]
    right=L.Concat(*right)
    right=conv_bn(right,cout,kernel_size=3,stride=1,padding=1)

    data = conv_bn(data,cout,kernel_size=3,stride=2,padding=1)
    # data =  L.Pooling(data, kernel_size=3,stride=2,pad=1, pool=P.Pooling.AVE)
    return L.ReLU(L.Eltwise(data,right,operation=1,engine=3))
def residual_dense_block(F_dn1, input_channel, depth, growth_rate, dropout):
    dense = F_dn1
    for i in range(depth):
        dense = add_layer(dense, channel=growth_rate, dropout=dropout)
    F_dLF = conv_relu(dense,
                      channel=input_channel,
                      kernel=1,
                      stride=1,
                      pad=0,
                      dropout=dropout)
    F_d = L.Eltwise(F_dLF, F_dn1)
    return F_d
Esempio n. 8
0
def _branch(major, net, bottom, nout, has_branch1=False, is_branch_2a=False):
    eltwise_layer = 'res{}'.format(major)
    relu_layer = 'res{}_relu'.format(major)

    stride = 1
    if has_branch1 and not is_branch_2a:
        stride = 2

    branch2_2a = _block_4in1(major, '2a', net, bottom, nout, 0, 1, stride)
    branch2_2b = _block_4in1(major, '2b', net, branch2_2a, nout, 1, 3, 1)
    branch2_2c = _block_3in1(major, '2c', net, branch2_2b, nout * 4, 0, 1, 1)

    if has_branch1:
        branch1 = _block_3in1(major, '1', net, bottom, nout * 4, 0, 1, stride)
        net[eltwise_layer] = L.Eltwise(branch1, branch2_2c)
    else:
        net[eltwise_layer] = L.Eltwise(bottom, branch2_2c)

    net[relu_layer] = L.ReLU(net[eltwise_layer], in_place=True)

    return net[relu_layer]
Esempio n. 9
0
def residual_standard_layers(bottom, param, weight_filler, bias_filler, num_filter):
	conv1 = conv_bn_layers(bottom, num_filter = num_filter, kernel = 3, stride = 1, pad = 1,
		param = param, weight_filler = weight_filler, bias_filler = bias_filler)
	relu1 = L.ReLU(conv1)
	conv2 = conv_bn_layers(relu1, num_filter = num_filter, kernel = 3, stride = 1, pad = 1,
		param = param, weight_filler = weight_filler, bias_filler = bias_filler)
	sum1 = L.Eltwise(conv2, bottom)
	#bn = L.BN(sum1, param = [dict(lr_mult=1), dict(lr_mult=1)], scale_filler=dict(type="constant", value=1), shift_filler=dict(type="constant", value=0))
	#bn = L.BatchNorm(sum1)
	#lrn = L.LRN(bn)
	relu2 = L.ReLU(sum1)
	return relu2
Esempio n. 10
0
def Res3Way(net,
            from_layer,
            deconv_layer,
            block_name,
            use_branch,
            freeze_branch=[False, False, False],
            use_bn=True,
            *branch_param):

    res_layer = []
    if use_branch[0]:
        branch1 = ResBranch(net,
                            from_layer,
                            block_name,
                            "branch1",
                            freeze_branch[0],
                            branch_param[0],
                            use_bn=use_bn)
        res_layer.append(net[branch1])
    else:
        res_layer.append(net[from_layer])

    if use_branch[1]:
        branch2 = ResBranch(net,
                            from_layer,
                            block_name,
                            "branch2",
                            freeze_branch[1],
                            branch_param[1],
                            use_bn=use_bn)
        res_layer.append(net[branch2])

    if use_branch[2]:
        branch3 = ResBranch(net,
                            deconv_layer,
                            block_name,
                            "branch3",
                            freeze_branch[2],
                            branch_param[2],
                            use_bn=use_bn)
        res_layer.append(net[branch3])

    res_name = 'res{}'.format(block_name)

    if len(res_layer) != 1:
        net[res_name] = L.Eltwise(*res_layer)
        relu_name = '{}_relu'.format(res_name)
        net[relu_name] = L.ReLU(net[res_name], in_place=True)
    else:
        relu_name = '{}_relu'.format(res_name)
        net[relu_name] = L.ReLU(res_layer[0], in_place=True)

    return relu_name
Esempio n. 11
0
    def _semantic_regularization(self, xSemPr, xSemLb, semReg):
        ns = self.netspec

        if self.semantics == ATTRIBUTES:
            name = 'SCoRe/semLoss'
            ns[name] = L.SigmoidCrossEntropyLoss(
                *[xSemPr, xSemLb],
                name=name,
                loss_weight=semReg / (len(self.constrains) * np.sqrt(2.)) *
                10.,
                include=dict(phase=caffe.TRAIN))
        else:
            c_keys = [key for key in self.constrains.keys()]
            losses = ['SCoRe/semLoss/%s' % key for key in c_keys]
            scores = ['SCoRe/semLoss/%s/scores' % key for key in c_keys]
            labels = ['SCoRe/semLoss/%s/labels' % key for key in c_keys]

            # Slice semantic scores
            xSemPr_name = [k for k, v in ns.tops.iteritems() if v == xSemPr][0]
            slice_scores = L.Slice(name='SCoRe/semLoss/slice_scores',
                                   bottom=[xSemPr_name],
                                   ntop=len(scores),
                                   top=scores,
                                   in_place=True,
                                   slice_point=np.cumsum(
                                       self.num_states)[:-1].tolist(),
                                   include=dict(phase=caffe.TRAIN))

            # Slice semantic labels
            xSemLb_name = [k for k, v in ns.tops.iteritems() if v == xSemLb][0]
            slice_labels = L.Slice(name='SCoRe/semLoss/slice_labels',
                                   bottom=[xSemLb_name],
                                   ntop=len(labels),
                                   top=labels,
                                   in_place=True,
                                   slice_point=range(1, len(self.constrains)),
                                   include=dict(phase=caffe.TRAIN))

            # Add supervision to each slice
            for i, xLoss in enumerate(losses):
                ns[xLoss] = L.SoftmaxWithLoss(
                    *[slice_scores[i], slice_labels[i]],
                    name=xLoss,
                    loss_weight=semReg / len(self.constrains),
                    include=dict(phase=caffe.TRAIN))

            # Summarize supervisions for display
            ns['SCoRe/semLoss'] = L.Eltwise(
                *[ns[l] for l in losses],
                name='SCoRe/semLoss',
                operation=P.Eltwise.SUM,
                coeff=[semReg / len(self.constrains)] * len(losses),
                include=dict(phase=caffe.TRAIN))
def residual_bottleneck_unit(n, nout, s, newdepth = False):
    """
    This creates the "standard unit" shown on the left side of Figure 5.
    """
    
    bottom = n.__dict__['tops'].keys()[-1] #find the last layer in netspec
    stride = 2 if newdepth and nout > 64 else 1

    n[s + 'conv1'], n[s + 'bn1'], n[s + 'lrn1'] = conv_bn(n[bottom], ks = 1, stride = stride, nout = nout, pad = 0)
    n[s + 'relu1'] = L.ReLU(n[s + 'lrn1'], in_place=True)
    n[s + 'conv2'], n[s + 'bn2'], n[s + 'lrn2'] = conv_bn(n[s + 'relu1'], ks = 3, stride = 1, nout = nout, pad = 1)
    n[s + 'relu2'] = L.ReLU(n[s + 'lrn2'], in_place=True)
    n[s + 'conv3'], n[s + 'bn3'], n[s + 'lrn3'] = conv_bn(n[s + 'relu2'], ks = 1, stride = 1, nout = nout * 4, pad = 0)
   
    if newdepth: 
        n[s + 'conv_expand'], n[s + 'bn_expand'], n[s + 'lrn_expand'] = conv_bn(n[bottom], ks = 1, stride = stride, nout = nout * 4, pad = 0)
        n[s + 'sum'] = L.Eltwise(n[s + 'lrn3'], n[s + 'lrn_expand'])
    else:
        n[s + 'sum'] = L.Eltwise(n[s + 'lrn3'], n[bottom])

    n[s + 'relu3'] = L.ReLU(n[s + 'sum'], in_place=True)
def wrn_expansion(bottom, ks, nout, stride_first_conv=1, pad_first_conv=1):
    conv_1_1 = L.Convolution(bottom, kernel_size=ks, stride=stride_first_conv,
                                num_output=nout, pad=pad_first_conv, bias_term=False, weight_filler=dict(type='msra'))
    batch_norm = L.BatchNorm(conv_1_1, use_global_stats=False, in_place=True, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
    scale = L.Scale(batch_norm, bias_term=True, in_place=True)
    relu = L.ReLU(scale, in_place=True)
    conv_2_1 = L.Convolution(relu, kernel_size=ks, stride=1,
                                num_output=nout, pad=1, bias_term=False, weight_filler=dict(type='msra'))
    conv_1_2 = L.Convolution(bottom, kernel_size=1, stride=stride_first_conv,
                                num_output=nout, pad=0, bias_term=False, weight_filler=dict(type='msra'))
    addition = L.Eltwise(conv_2_1, conv_1_2, operation=P.Eltwise.SUM)
    return addition
Esempio n. 14
0
 def add_block_bn_c_bn(self, bottom, num_output):
     bn1 = L.BatchNorm(bottom, use_global_stats=False, in_place=False)
     bn1 = L.Scale(bn1, bias_term=True)
     conv1 = self.conv(bn1, num_output)
     bn2 = L.BatchNorm(conv1, use_global_stats=False, in_place=False)
     bn2 = L.Scale(bn2, bias_term=True)
     pr2 = L.PReLU(bn2, in_place=True)
     conv2 = self.conv(pr2, num_output)
     bn3 = L.BatchNorm(conv2, use_global_stats=False, in_place=False)
     bn3 = L.Scale(bn3, bias_term=True)
     output = L.Eltwise(bottom, bn3, eltwise_param=dict(operation=1))
     return output
Esempio n. 15
0
def resUnit2(bottom, kernelSize=3, numberOfOutput=16, stride=1, pad=1):
    bn1 = L.BatchNorm(bottom)
    relu1 = L.PReLU(bn1)
    conv1 = L.Convolution(relu1, convolution_param={"engine": 2, "kernel_size": kernelSize, "stride": stride, "num_output": numberOfOutput, "pad": pad, "group": 1})

    bn2 = L.BatchNorm(conv1)
    relu2 = L.PReLU(bn2)
    conv2 = L.Convolution(relu2, convolution_param={"engine": 2, "kernel_size": kernelSize, "stride": stride, "num_output": numberOfOutput, "pad": pad, "group": 1})

    add = L.Eltwise(bottom, conv2)

    return bn1, relu1, conv1, bn2, relu2, conv2, add
Esempio n. 16
0
def make_net_train(lmdb, preselection, batch_size=8, weights = [0, 0, 0.005, 0.01, 0.02, 0.08, 0.32]):

    net = caffe.NetSpec()

    net.img0, net.img1, net.flow_gt, net.aux= L.CustomData(  
        data_param=dict(source=lmdb, preselection_file = preselection, backend=P.Data.LMDB, batch_size=batch_size, 
            preselection_label=1, rand_permute=True, rand_permute_seed=77, slice_point=[3,6,8], encoding=[1,1,2,3], 
            verbose=True),  ntop=4, include=dict(phase=0))

    net.img0_subtract = L.Eltwise(net.img0, eltwise_param=dict(operation=1,coeff=0.00392156862745))  
    net.img1_subtract = L.Eltwise(net.img1, eltwise_param=dict(operation=1,coeff=0.00392156862745))  

    net.img0_aug, net.img0_aug_params = augment_first_image(net.img0_subtract)

    aug_params      = generate_aug_params(net.img0_aug_params, net.img0_subtract, net.img0_aug)    
    net.img1_aug    = augment_second_image(net.img1_subtract, aug_params)

    net.flow_gt_aug     = L.FlowAugmentation(net.flow_gt, net.img0_aug_params, aug_params, augmentation_param=dict(crop_width=448, crop_height=320))
    net.scaled_flow_gt  = L.Eltwise(net.flow_gt_aug, eltwise_param=dict(operation=1,coeff=0.05))  

    net = make_pwc_net_encoder_plus(net, net.img0_aug, net.img1_aug) 
    
    for i in range(1, len(weights)):
        if weights[i] > 0.:
            scaled_flow_name  = 'scaled_flow_gt{}'.format(i)
            predict_flow_name = 'predict_flow{}'.format(i)
            loss_name         = 'loss{}'.format(i)
            setattr(net, scaled_flow_name, L.Downsample(net.scaled_flow_gt, getattr(net, predict_flow_name), propagate_down=[False, False]) )
            setattr(net, loss_name, L.L1Loss(getattr(net, predict_flow_name), getattr(net, scaled_flow_name), loss_weight=weights[i], l1_loss_param=dict(l2_per_location=True)))
    # loss at level 0: don't scale GT
    if weights[0] > 0.:
        net.loss0 = L.L1Loss(net.predict_flow0, net.scaled_flow_gt, loss_weight=weights[0] , l1_loss_param=dict(l2_per_location=True), propagate_down=[True, False])

    net.Silence0 = L.Silence(net.img0, ntop=0)
    net.Silence1 = L.Silence(net.img1, ntop=0)
    net.Silence2 = L.Silence(net.flow_gt, ntop=0)
    net.Silence3 = L.Silence(net.aux, ntop=0)
    # net.Silence4 = L.Silence(net.predict_flow2_scale, ntop=0)

    return net.to_proto()
Esempio n. 17
0
def _block(flag, net, bottom, nout, has_branch1=False, increasing_dims=True, dropout=0.3):
  eltwise_layer = 'block_{}_addition'.format(flag)

  stride = 1
  if has_branch1 and increasing_dims:
    stride = 2

  branch2a = _block_4in1(flag, '2a', net, bottom,   nout, 1, 3, stride)
  if dropout > 0:
    dropout_layer = 'block_{}_dropout'.format(flag)
    net[dropout_layer] = L.Dropout(branch2a, dropout_ratio=dropout)
    branch2b = _block_4in1(flag, '2b', net, net[dropout_layer], nout, 1, 3, 1)
  else:
    branch2b = _block_4in1(flag, '2b', net, branch2a, nout, 1, 3, 1)

  if has_branch1:
    branch1 = _block_4in1(flag, '1', net, bottom, nout, 1, 3, stride)
    net[eltwise_layer] = L.Eltwise(branch1, branch2b)
  else:
    net[eltwise_layer] = L.Eltwise(bottom, branch2b)

  return net[eltwise_layer]
Esempio n. 18
0
def unet_branch(bottom, insert_f, i, o, depad):
    pool = L.Convolution(bottom, kernel_size=2, stride=2, num_output=i, pad=0)
    relu1 = L.ReLU(pool, in_place=True, negative_slope=0.1)
    feat = insert_f(relu1)
    unpool = L.Deconvolution(feat,
                             convolution_param=dict(num_output=o,
                                                    kernel_size=2,
                                                    pad=0,
                                                    stride=2))
    relu2 = L.ReLU(unpool, in_place=True, negative_slope=0.1)
    crop = L.Crop(bottom, relu2, crop_param=dict(axis=2, offset=depad))
    cadd = L.Eltwise(crop, relu2, operation=P.Eltwise.SUM)
    return cadd
Esempio n. 19
0
def make_cunet():
    netoffset = 28
    ch = 3
    input_size = 256 + netoffset * 2
    assert (input_size % 4 == 0)

    data = L.Input(name="input",
                   shape=dict(dim=[1, ch, input_size, input_size]))
    u1 = unet1(data, ch=ch, deconv=False)
    u2 = unet2(u1, ch=ch, deconv=False)
    crop = L.Crop(u1, u2, crop_param=dict(axis=2, offset=20))
    cadd = L.Eltwise(crop, u2, operation=P.Eltwise.SUM)
    return to_proto(cadd)
Esempio n. 20
0
def normalize(bottom, dim):

    bottom_relu = L.ReLU(bottom)
    sum = L.Convolution(bottom_relu,
                        convolution_param = dict(num_output = 1, kernel_size = 1, stride = 1,
                                                 weight_filler = dict(type = 'constant', value = 1),
                                                 bias_filler = dict(type = 'constant', value = 0)),
                        param=[{'lr_mult':0, 'decay_mult':0}, {'lr_mult':0, 'decay_mult':0}])

    denom = L.Power(sum, power=(-1.0), shift=1e-12)
    denom = L.Tile(denom, axis=1, tiles=dim)

    return L.Eltwise(bottom_relu, denom, operation=P.Eltwise.PROD)
Esempio n. 21
0
def createEncoder(n, name, bottom, nout, stride=1, pad=1, freeze=False):
    prefix = name + "a_branch1"
    n[prefix], n[prefix.replace("res","bn")], n[prefix.replace("res","scale")] = conv_BN_scale(bottom, nout, 1, stride=stride, pad=0, freeze=freeze)

    prefix_a = name + "a_branch2a"
    n[prefix_a], n[prefix_a.replace("res","bn")], n[prefix_a.replace("res","scale")], n[prefix_a+"_relu"] = conv_BN_scale_relu(bottom, nout, stride=stride)
    prefix_b = name + "a_branch2b"
    n[prefix_b], n[prefix_b.replace("res","bn")], n[prefix_b.replace("res","scale")] = conv_BN_scale(n[prefix_a+"_relu"], nout)

    n[name+"a"] = L.Eltwise(n[prefix.replace("res","scale")], n[prefix_b.replace("res","scale")], operation=P.Eltwise.SUM)
    n[name+"a_relu"] = L.ReLU(n[name+"a"], in_place=True)

    prefix2_a = name + "b_branch2a"
    n[prefix2_a], n[prefix2_a.replace("res","bn")], n[prefix2_a.replace("res","scale")], n[prefix2_a+"_relu"] = conv_BN_scale_relu(n[name+"a_relu"], nout)
    prefix2_b = name + "b_branch2b"
    n[prefix2_b], n[prefix2_b.replace("res","bn")], n[prefix2_b.replace("res","scale")] = conv_BN_scale(n[prefix2_a+"_relu"], nout)
    n[name+"b"] = L.Eltwise(n[name+"a_relu"], n[prefix2_b.replace("res","scale")], operation=P.Eltwise.SUM)
    n[name+"b_relu"] = L.ReLU(n[name+"b"], in_place=True)
    return {
        "bottom": bottom,
        "top": n[name+"b_relu"]
    }
Esempio n. 22
0
    def eltwise_distance(self, vec1, vec2):
        mult = L.Eltwise(vec1, vec2, operation=0)
        norm_mult = self.normalize(mult,
                                   numtiles=self.visual_embedding_dim[-1])

        score = L.InnerProduct(
            norm_mult,
            num_output=1,
            weight_filler=self.uniform_weight_filler(-0.08, .08),
            param=self.learning_params([[1, 1], [2, 0]],
                                       ['eltwise_dist', 'eltwise_dist_b']))

        return score
Esempio n. 23
0
def incept_2(bottom, num_output):
    # branch 1
    conv_b1_a, bn_b1_a, scale_b1_a = conv_bn(bottom,
                                             num_output,
                                             pad=1,
                                             kernel_size=3,
                                             stride=1)
    # add
    eltw = L.Eltwise(bottom, conv_b1_a, eltwise_param=dict(operation=1))
    rule = L.ReLU(eltw, in_place=True, engine=engine)

    return conv_b1_a, bn_b1_a, scale_b1_a, \
    eltw, rule
Esempio n. 24
0
def ResBlock(data, cout, transform=False, z=False):
    # left_=SingleConv(data,cout,kernel_size=3,stride=2,padding=1)

    z_kernel_size, z_padding = (1, 0) if z else (3, 1)
    right = SingleConv(data,
                       cout,
                       kernel_size=[z_kernel_size, 3, 3],
                       padding=[z_padding, 1, 1])
    right = conv_bn(right, cout, kernel_size=3, stride=1, padding=1)

    if transform:
        data = SingleConv(data, cout)
    return L.ReLU(L.Eltwise(data, right, operation=1, engine=3))
def exp_proto(mode, batchsize, T, exp_T, question_vocab_size, exp_vocab_size):
    n = caffe.NetSpec()
    mode_str = json.dumps({'mode': mode, 'batchsize': batchsize})
    n.exp_att_feature, n.exp, n.exp_out, n.exp_cont_1, n.exp_cont_2 = \
        L.Python(module='exp_data_provider_layer', layer='ExpDataProviderLayer', param_str=mode_str, ntop=5)

    n.exp_embed_ba = L.Embed(n.exp, input_dim=exp_vocab_size, num_output=300, \
        weight_filler=dict(type='uniform', min=-0.08, max=0.08))
    n.exp_embed = L.TanH(n.exp_embed_ba)

    # LSTM1 for Explanation
    n.exp_lstm1 = L.LSTM(\
                   n.exp_embed, n.exp_cont_1,\
                   recurrent_param=dict(\
                       num_output=2048,\
                       weight_filler=dict(type='uniform',min=-0.08,max=0.08),\
                       bias_filler=dict(type='constant',value=0)))

    n.exp_lstm1_dropped = L.Dropout(n.exp_lstm1,
                                    dropout_param={'dropout_ratio': 0.3})

    # Merge with LSTM1 for explanation
    n.exp_att_resh = L.Reshape(
        n.exp_att_feature, reshape_param=dict(shape=dict(dim=[1, -1, 2048])))
    n.exp_att_tiled = L.Tile(n.exp_att_resh, axis=0, tiles=exp_T)
    n.exp_eltwise_all = L.Eltwise(n.exp_lstm1_dropped,
                                  n.exp_att_tiled,
                                  eltwise_param={'operation': P.Eltwise.PROD})
    n.exp_eltwise_all_sqrt = L.SignedSqrt(n.exp_eltwise_all)
    n.exp_eltwise_all_l2 = L.L2Normalize(n.exp_eltwise_all_sqrt)
    n.exp_eltwise_all_drop = L.Dropout(n.exp_eltwise_all_l2,
                                       dropout_param={'dropout_ratio': 0.3})

    # LSTM2 for Explanation
    n.exp_lstm2 = L.LSTM(\
                   n.exp_eltwise_all_drop, n.exp_cont_2,\
                   recurrent_param=dict(\
                       num_output=1024,\
                       weight_filler=dict(type='uniform',min=-0.08,max=0.08),\
                       bias_filler=dict(type='constant',value=0)))
    n.exp_lstm2_dropped = L.Dropout(n.exp_lstm2,
                                    dropout_param={'dropout_ratio': 0.3})

    n.exp_prediction = L.InnerProduct(n.exp_lstm2_dropped,
                                      num_output=exp_vocab_size,
                                      weight_filler=dict(type='xavier'),
                                      axis=2)

    n.silence_exp_prediction = L.Silence(n.exp_prediction, ntop=0)

    return n.to_proto()
Esempio n. 26
0
def expand_dim_n_bottleneck_B(split, bottom, dim_in, features, stride):
    dim_branch_out = int(math.floor(features * (opt.baseWidth / 64)))
    dim_out = dim_in * 4
    branch_container = list()
    for i in range(opt.cardinalty):
        scale1, relu1 = conv_BN_scale_relu(split, bottom, dim_branch_out, 1, 1,
                                           0)
        scale2, relu2 = conv_BN_scale_relu(split, relu1, dim_branch_out, 3,
                                           stride, 1)
        branch_container.append(relu2)
    comb = L.Concat(*branch_container, in_place=True)
    scale_comb, relu_comb = conv_BN_scale_relu(split, comb, dim_out, 1, 1, 0)
    scale_i, relu_i = conv_BN_scale_relu(split, bottom, dim_out, 1, stride, 0)
    return L.Eltwise(relu_comb, relu_i, operation=P.Eltwise.SUM)
Esempio n. 27
0
    def ranking_loss(self, p, n, t, lw=1):

        #For ranking used in paper
        distance_p = self.distance_function(p, t)
        distance_n = self.distance_function(n, t)
        negate_distance_n = L.Power(distance_n, scale=-1)
        max_sum = L.Eltwise(distance_p, negate_distance_n, operation=1)
        max_sum_margin = L.Power(max_sum, shift=self.margin)
        max_sum_margin_relu = L.ReLU(max_sum_margin, in_place=False)
        ranking_loss = L.Reduction(max_sum_margin_relu,
                                   operation=4,
                                   loss_weight=[lw])

        return ranking_loss
def res_block(input,stride=2,num_output=32,pad1=1,pad2=1,MAX_POOL=False):
    block1 = net_block(input=input,kernel_size=3,num_output=num_output,stride=stride,pad=pad1)
    block2 = net_block(input=block1,kernel_size=3,num_output=num_output,stride=1,pad=pad2)
    #block3 = net_block(input=block2,kernel_size=3,num_output=num_output,stride=1,pad=pad2)
    #block4 = eltwise_relu(block1,block2)
    residual_eltwise = L.Eltwise(block1, block2, eltwise_param=dict(operation=1))
    if MAX_POOL:
        maxpool1=L.Pooling(residual_eltwise, pool=P.Pooling.MAX,stride=2,kernel_size=3)
        bn = L.BatchNorm(maxpool1, use_global_stats=False, in_place=True)
        relu=L.ReLU(bn, in_place=True)
    else:
        bn = L.BatchNorm(residual_eltwise, use_global_stats=False, in_place=True)
        relu=L.ReLU(bn, in_place=True)        
    return relu
Esempio n. 29
0
def Inception_ResNet_C(bottom,
                       bottom_size=2048,
                       num1x1=192,
                       num1x3=224,
                       num3x1=256):
    conv1x1 = conv_factory_relu(bottom, 1, num1x1, 1, 0)
    conv1x3_1x1 = conv_factory_relu(bottom, 1, num1x1, 1, 0)
    conv3x1_1x3 = conv_factory_relu_h_w(conv1x3_1x1, 1, 3, num1x3, 1, 0, 1)
    conv3x1 = conv_factory_relu_h_w(conv3x1_1x3, 3, 1, num3x1, 1, 1, 0)

    concat = L.Concat(conv1x1, conv3x1)
    proj = conv_factory(concat, 1, bottom_size)
    residual = L.Eltwise(bottom, proj, operation=P.Eltwise.SUM)
    return residual
Esempio n. 30
0
def ResBody(net, from_layer, block_name, out2a, out2b, out2c, stride, use_branch1):
  # ResBody(net, 'pool1', '2a', 64, 64, 256, 1, True)

  conv_prefix = 'res{}_'.format(block_name)
  conv_postfix = ''
  bn_prefix = 'bn{}_'.format(block_name)
  bn_postfix = ''
  scale_prefix = 'scale{}_'.format(block_name)
  scale_postfix = ''
  use_scale = True

  if use_branch1:
    branch_name = 'branch1'
    ConvBNLayer(net, from_layer, branch_name, use_bn=True, use_relu=False,
        num_output=out2c, kernel_size=1, pad=0, stride=stride, use_scale=use_scale,
        conv_prefix=conv_prefix, conv_postfix=conv_postfix,
        bn_prefix=bn_prefix, bn_postfix=bn_postfix,
        scale_prefix=scale_prefix, scale_postfix=scale_postfix)
    branch1 = '{}{}'.format(conv_prefix, branch_name)
  else:
    branch1 = from_layer

  branch_name = 'branch2a'
  ConvBNLayer(net, from_layer, branch_name, use_bn=True, use_relu=True,
      num_output=out2a, kernel_size=1, pad=0, stride=stride, use_scale=use_scale,
      conv_prefix=conv_prefix, conv_postfix=conv_postfix,
      bn_prefix=bn_prefix, bn_postfix=bn_postfix,
      scale_prefix=scale_prefix, scale_postfix=scale_postfix)
  out_name = '{}{}'.format(conv_prefix, branch_name)

  branch_name = 'branch2b'
  ConvBNLayer(net, out_name, branch_name, use_bn=True, use_relu=True,
      num_output=out2b, kernel_size=3, pad=1, stride=1, use_scale=use_scale,
      conv_prefix=conv_prefix, conv_postfix=conv_postfix,
      bn_prefix=bn_prefix, bn_postfix=bn_postfix,
      scale_prefix=scale_prefix, scale_postfix=scale_postfix)
  out_name = '{}{}'.format(conv_prefix, branch_name)

  branch_name = 'branch2c'
  ConvBNLayer(net, out_name, branch_name, use_bn=True, use_relu=False,
      num_output=out2c, kernel_size=1, pad=0, stride=1, use_scale=use_scale,
      conv_prefix=conv_prefix, conv_postfix=conv_postfix,
      bn_prefix=bn_prefix, bn_postfix=bn_postfix,
      scale_prefix=scale_prefix, scale_postfix=scale_postfix)
  branch2 = '{}{}'.format(conv_prefix, branch_name)

  res_name = 'res{}'.format(block_name)
  net[res_name] = L.Eltwise(net[branch1], net[branch2])
  relu_name = '{}_relu'.format(res_name)
  net[relu_name] = L.ReLU(net[res_name], in_place=True)