Пример #1
0
def _mul(input, *args):
    x = raw__sub__(input, *args)
    if not NET_INITTED:
        return x
    layer_name = log.add_layer(name='mul')
    top_blobs = log.add_blobs([x], name='mul_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
                                  bottom=[log.blobs(input), log.blobs(args[0])], top=top_blobs)
    layer.param.eltwise_param.operation = 0  # product is 1
    log.cnet.add_layer(layer)
    return x
def _pool(type, raw, input, x, kernel_size, stride, padding, ceil_mode):
    # TODO dilation,ceil_mode,return indices
    layer_name = log.add_layer(name='{}_pool'.format(type))
    top_blobs = log.add_blobs([x], name='{}_pool_blob'.format(type))
    layer = caffe_net.Layer_param(name=layer_name, type='Pooling',
                                  bottom=[log.blobs(input)], top=top_blobs)
    # TODO w,h different kernel, stride and padding
    # processing ceil mode
    layer.pool_param(kernel_size=kernel_size, stride=kernel_size if stride is None else stride,
                     pad=padding, type=type.upper())  # , ceil_mode = ceil_mode)
    log.cnet.add_layer(layer)
Пример #3
0
def _relu6(raw, input, inplace=False):
    # FIXME: as dpu do not suppport relu6, try use relu
    x = raw(input, False)
    name = log.add_layer(name='relu')
    log.add_blobs([x], name='relu_blob')
    layer = caffe_net.Layer_param(name=name,
                                  type='ReLU',
                                  bottom=[log.blobs(input)],
                                  top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x
Пример #4
0
def _leaky_relu(raw, input, negative_slope=0.01, inplace=False):
    x = raw(input, negative_slope)
    name = log.add_layer(name='leaky_relu')
    log.add_blobs([x], name='leaky_relu_blob')
    layer = caffe_net.Layer_param(name=name,
                                  type='ReLU',
                                  bottom=[log.blobs(input)],
                                  top=[log.blobs(x)])
    layer.param.relu_param.negative_slope = negative_slope
    log.cnet.add_layer(layer)
    return x
Пример #5
0
def _tanh(raw, input):
    # for tanh activation
    x = raw(input)
    name = log.add_layer(name='tanh')
    log.add_blobs([x], name='tanh_blob')
    layer = caffe_net.Layer_param(name=name,
                                  type='TanH',
                                  bottom=[log.blobs(input)],
                                  top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x
Пример #6
0
def _dropout(raw,input,p=0.5, training=False, inplace=False):
    x=raw(input,p, training, inplace)
    bottom_blobs=[log.blobs(input)]
    layer_name=log.add_layer(name='dropout')
    top_blobs=log.add_blobs([x],name=bottom_blobs[0],with_num=False)
    layer=caffe_net.Layer_param(name=layer_name,type='Dropout',
                                bottom=bottom_blobs,top=top_blobs)
    layer.param.dropout_param.dropout_ratio = p
    layer.param.include.extend([caffe_net.pb.NetStateRule(phase=0)]) # 1 for test, 0 for train
    log.cnet.add_layer(layer)
    return x
Пример #7
0
def _relu(raw, input, inplace=False):
    # for threshold or prelu
    x = raw(input, False)
    name = log.add_layer(name='relu')
    log.add_blobs([x], name='relu_blob')
    layer = caffe_net.Layer_param(name=name,
                                  type='ReLU',
                                  bottom=[log.blobs(input)],
                                  top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x
Пример #8
0
def _isub(input, *args):
    x = raw__isub__(input, *args)
    x=x.clone()
    layer_name = log.add_layer(name='sub')
    top_blobs = log.add_blobs([x], name='sub_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
                                  bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)
    layer.param.eltwise_param.operation = 1 # sum is 1
    layer.param.eltwise_param.coeff.extend([1., -1.])
    log.cnet.add_layer(layer)
    return x
Пример #9
0
def _sigmoid(raw, input):
    # for sigmoid activation
    #pdb.set_trace()
    x = raw(input)
    name = log.add_layer(name='sigmoid')
    log.add_blobs([x], name='sigmoid_blob')
    layer = caffe_net.Layer_param(name=name,
                                  type='Sigmoid',
                                  bottom=[log.get_blobs(input)],
                                  top=[log.get_blobs(x)])
    log.cnet.add_layer(layer)
    return x
Пример #10
0
def _tanh(raw, input):
    # Applies the element-wise function:
    # 
    # torch.nn.Tanh
    # 
    # ​	
    x = raw(input)
    name = log.add_layer(name='tanh')
    log.add_blobs([x], name='tanh_blob')
    layer = caffe_net.Layer_param(name=name, type='TanH',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    log.cnet.add_layer(layer)
Пример #11
0
def _hardtanh(raw, input, min_val, max_val, inplace):
    # Applies the element-wise function:
    #
    # torch.nn.ReLu6
    print('relu6: ', log.blobs(input))
    x = raw(input, min_val, max_val)
    name = log.add_layer(name='relu6')
    log.add_blobs([x], name='relu6_blob')
    layer = caffe_net.Layer_param(name=name, type='ReLU6',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x
Пример #12
0
def _split(raw,tensor, split_size, dim=0):
    # split in pytorch is slice in caffe
    x=raw(tensor, split_size, dim)
    layer_name=log.add_layer('split')
    top_blobs=log.add_blobs(x,name='split_blob')
    layer=caffe_net.Layer_param(name=layer_name, type='Slice',
                                bottom=[log.blobs(tensor)], top=top_blobs)
    slice_num=int(np.floor(tensor.size()[dim]/split_size))
    slice_param=caffe_net.pb.SliceParameter(axis=dim,slice_point=[split_size*i for i in range(1,slice_num)])
    layer.param.slice_param.CopyFrom(slice_param)
    log.cnet.add_layer(layer)
    return x
Пример #13
0
def _threshold(raw,input, threshold, value, inplace=False):
    # for threshold or relu
    if threshold==0 and value==0:
        x = raw(input,threshold, value, False)
        name = log.add_layer(name='relu')
        log.add_blobs([x], name='relu_blob')
        layer = caffe_net.Layer_param(name=name, type='ReLU',
                                      bottom=[log.get_blobs(input)], top=[log.get_blobs(x)])
        log.cnet.add_layer(layer)
        return x
    if value!=0:
        raise NotImplemented("value !=0 not implemented in caffe")
    x=raw(input,input, threshold, value, False)
    bottom_blobs=[log.get_blobs(input)]
    layer_name=log.add_layer(name='threshold')
    top_blobs=log.add_blobs([x],name='threshold_blob')
    layer=caffe_net.Layer_param(name=layer_name,type='Threshold',
                                bottom=bottom_blobs,top=top_blobs)
    layer.param.threshold_param.threshold = threshold
    log.cnet.add_layer(layer)
    return x
Пример #14
0
def ___add__(input, *args):
    x = raw_tensor_op['__add__'](input, *args)
    if not NET_INITED:
        return x
    layer_name = log.add_layer(name='add')
    top_blobs = log.add_blobs([x], name='add_blob')
    if not isinstance(args[0], torch.Tensor):
        layer = caffe_net.Layer_param(name=layer_name,
                                      type='Power',
                                      bottom=[log.blobs(input)],
                                      top=top_blobs)
        layer.param.power_param.shift = args[0]
    else:
        layer = caffe_net.Layer_param(
            name=layer_name,
            type='Eltwise',
            bottom=[log.blobs(input), log.blobs(args[0])],
            top=top_blobs)
        layer.param.eltwise_param.operation = 1  # sum is 1
    log.cnet.add_layer(layer)
    return x
Пример #15
0
def _view(input, *args):
    x = raw_view(input, *args)
    layer_name = log.add_layer(name='view')
    top_blobs = log.add_blobs([x], name='view_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Reshape',
                                  bottom=[log.blobs(input)], top=top_blobs)
    # TODO: reshpae added to nn_tools layer
    dims = list(args)
    dims[0] = 0  # the first dim should be batch_size
    layer.param.reshape_param.shape.CopyFrom(caffe_net.pb.BlobShape(dim=dims))
    log.cnet.add_layer(layer)
    return x
Пример #16
0
def _sigmoid(raw, input):
    # Applies the element-wise function:
    # 
    # Sigmoid(x)= 1/(1+exp(−x))
    # 
    # ​	
    x = raw(input)
    name = log.add_layer(name='sigmoid')
    log.add_blobs([x], name='sigmoid_blob')
    layer = caffe_net.Layer_param(name=name, type='Sigmoid',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    log.cnet.add_layer(layer)
Пример #17
0
def _cat(raw,inputs, dimension=0):
    x=raw(inputs, dimension)
    bottom_blobs=[]
    for input in inputs:
        bottom_blobs.append(log.blobs(input))
    layer_name=log.add_layer(name='cat')
    top_blobs=log.add_blobs([x],name='cat_blob')
    layer=caffe_net.Layer_param(name=layer_name,type='Concat',
                                bottom=bottom_blobs,top=top_blobs)
    layer.param.concat_param.axis =dimension
    log.cnet.add_layer(layer)
    return x
Пример #18
0
def _iadd(input, *args):
    x = raw__iadd__(input, *args)
    if not NET_INITTED:
        return x
    x=x.clone()
    layer_name = log.add_layer(name='add')
    top_blobs = log.add_blobs([x], name='add_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
                                  bottom=[log.blobs(input),log.blobs(args[0])], top=top_blobs)
    layer.param.eltwise_param.operation = 1 # sum is 1
    log.cnet.add_layer(layer)
    return x
Пример #19
0
def ___sub__(input, *args):
    x = raw_tensor_magic_op['__sub__'](input, *args)
    if not NET_INITTED:
        return x
    layer_name = log.add_layer(name='sub')
    top_blobs = log.add_blobs([x], name='sub_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Eltwise',
                                  bottom=[log.get_blobs(input), log.get_blobs(args[0])], top=top_blobs)
    layer.param.eltwise_param.operation = 1 # sum is 1
    layer.param.eltwise_param.coeff.extend([1.,-1.])
    log.cnet.add_layer(layer)
    return x
Пример #20
0
def _softmax(raw, input, dim=None, _stacklevel=3):
    # for F.softmax
    x=raw(input, dim=dim)
    if dim is None:
        dim=F._get_softmax_dim('softmax', input.dim(), _stacklevel)
    name = log.add_layer(name='softmax')
    log.add_blobs([x], name='softmax_blob')
    layer = caffe_net.Layer_param(name=name, type='Softmax',
                                  bottom=[log.get_blobs(input)], top=[log.get_blobs(x)])
    layer.param.softmax_param.axis=dim
    log.cnet.add_layer(layer)
    return x
Пример #21
0
def _batch_norm(raw,
                input,
                running_mean,
                running_var,
                weight=None,
                bias=None,
                training=False,
                momentum=0.1,
                eps=1e-5):
    # because the runing_mean and runing_var will be changed after the _batch_norm operation, we first save the parameters

    x = raw(input, running_mean, running_var, weight, bias, training, momentum,
            eps)
    bottom_blobs = [log.blobs(input)]
    layer_name1 = log.add_layer(name='batch_norm')
    top_blobs = log.add_blobs([x], name='batch_norm_blob')
    layer1 = caffe_net.Layer_param(name=layer_name1,
                                   type='BatchNorm',
                                   bottom=bottom_blobs,
                                   top=top_blobs)
    if running_mean is None or running_var is None:
        # not use global_stats, normalization is performed over the current mini-batch
        layer1.batch_norm_param(use_global_stats=0, eps=eps)
    else:
        layer1.batch_norm_param(use_global_stats=1, eps=eps)
        running_mean_clone = running_mean.clone()
        running_var_clone = running_var.clone()
        layer1.add_data(running_mean_clone.cpu().numpy(),
                        running_var_clone.cpu().numpy(), np.array([1.0]))
    log.cnet.add_layer(layer1)
    if weight is not None and bias is not None:
        layer_name2 = log.add_layer(name='bn_scale')
        layer2 = caffe_net.Layer_param(name=layer_name2,
                                       type='Scale',
                                       bottom=top_blobs,
                                       top=top_blobs)
        layer2.param.scale_param.bias_term = True
        layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
        log.cnet.add_layer(layer2)
    return x
Пример #22
0
def _contiguous(input, *args):
    x = raw__contiguous__(input, *args)
    name = log.add_layer(name='contiguous')

    if log.blobs(x) is None:
        log.add_blobs([x], name='contiguous_blob')

    layer = caffe_net.Layer_param(name=name,
                                  type='NeedRemove',
                                  bottom=[log.blobs(input)],
                                  top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x
Пример #23
0
def _mul(input, *args):
    x = raw__mul__(input, *args)
    if not NET_INITTED:
        return x
    # element wise mul using scale layer
    assert args[0].shape[0] == input.shape[0] and args[0].shape[
        1] == input.shape[1]
    if not (args[0].shape[2] == input.shape[2]
            and args[0].shape[3] == input.shape[3]):
        print(
            "WARNING: DPU cannot handle this implictly-broadcast elementwise multiplication efficiently! {} with {}"
            .format(args[0].shape, input.shape))
        # Handle implicitly broadcast in pytorch, reshape -> scale;
        # Actually this is not support by DPU (2019.10.16)
        # add reshape layer
        assert args[0].shape[2] == 1 and args[0].shape[3] == 1
        layer_name = log.add_layer(name="reshape")
        y = args[0].view(args[0].shape[0], -1)
        layer_name = log.add_layer(name='mul')
        top_blobs = log.add_blobs([x], name='mul_blob')
        layer = caffe_net.Layer_param(name=layer_name,
                                      type='Scale',
                                      bottom=[log.blobs(input),
                                              log.blobs(y)],
                                      top=top_blobs)
        layer.param.scale_param.bias_term = False
        layer.param.scale_param.axis = 0
    else:
        # acutally, dpu only support elementwise...
        layer_name = log.add_layer(name='mul')
        top_blobs = log.add_blobs([x], name='mul_blob')
        layer = caffe_net.Layer_param(
            name=layer_name,
            type='Eltwise',
            bottom=[log.blobs(input), log.blobs(args[0])],
            top=top_blobs)
        layer.param.eltwise_param.operation = 0  # product is 1
    log.cnet.add_layer(layer)
    return x
Пример #24
0
def _linear(raw,input, weight, bias=None):
    x=raw(input,weight,bias)
    layer_name=log.add_layer(name='fc')
    top_blobs=log.add_blobs([x],name='fc_blob')
    layer=caffe_net.Layer_param(name=layer_name,type='InnerProduct',
                                bottom=[log.blobs(input)],top=top_blobs)
    layer.fc_param(x.size()[1],has_bias=bias is not None)
    if bias is not None:
        layer.add_data(weight.cpu().data.numpy(),bias.cpu().data.numpy())
    else:
        layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x
Пример #25
0
def _interpolate(raw, input,size=None, scale_factor=None, mode='nearest', align_corners=None):
    if mode != "nearest" or align_corners != None:
        raise NotImplementedError("not implement F.interpolate totoaly")
    x = raw(input,size , scale_factor ,mode)

    layer_name = log.add_layer(name='upsample')
    top_blobs = log.add_blobs([x], name='upsample_blob'.format(type))
    layer = caffe_net.Layer_param(name=layer_name, type='Upsample',
                                  bottom=[log.blobs(input)], top=top_blobs)

    layer.upsample_param(size =(input.size(2),input.size(3)), scale_factor= scale_factor)
    log.cnet.add_layer(layer)
    return x
Пример #26
0
def _sqrt(input, *args):
    x = raw_tensor_magic_op['sqrt'](input, *args)
    if not NET_INITTED:
        return x
    layer_name = log.add_layer(name='sqrt')
    top_blobs = log.add_blobs([x], name='sqrt_blob')
    layer = caffe_net.Layer_param(name=layer_name,
                                  type='Power',
                                  bottom=[log.get_blobs(input)],
                                  top=top_blobs)
    layer.param.power_param.power = 0.5
    log.cnet.add_layer(layer)
    return x
Пример #27
0
def ___pow__(input, *args):
    x = raw_tensor_magic_op['__pow__'](input, *args)
    if not NET_INITTED:
        return x
    if not isinstance(args[0],int):
        raise NotImplementedError('power only support int now in nn_tools')
    layer_name = log.add_layer(name='power')
    top_blobs = log.add_blobs([x], name='power_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Power',
                                  bottom=[log.get_blobs(input)], top=top_blobs)
    layer.param.power_param.power = args[0]  # product is 1
    log.cnet.add_layer(layer)
    return x
Пример #28
0
def torch_sqrt(raw,*args):
    x = raw(*args)
    if not NET_INITTED:
        return x
    if not isinstance(args[0], int):
        raise NotImplementedError('sqrt only support int now in nn_tools')
    layer_name = log.add_layer(name='sqrt')
    top_blobs = log.add_blobs([x], name='sqrt_blob')
    layer = caffe_net.Layer_param(name=layer_name, type='Power',
                                  bottom=[log.get_blobs(input)], top=top_blobs)
    layer.param.power_param.power = 0.5
    log.cnet.add_layer(layer)
    return x
Пример #29
0
def _proposal(raw, cls_prob, bbox_delta):
    """
    How to support a new layer type:
    layer_name=log.add_layer(layer_type_name)
    top_blobs=log.add_blobs(<output of that layer>)
    layer=caffe_net.Layer_param(xxx)
    <set layer parameters>
    [<layer.add_data(*datas)>]
    log.cnet.add_layer(layer)
    
    Please MUTE the inplace operations to avoid not find in graph

    注意:只有torch.nn.functional中的函数才能转换为caffe中的层
    """
    """ example 1 
    # 定义的参数包括 scale,即输出与输入的尺寸比例,如 2;scale_h、scale_w,
    # 同 scale,分别为 h、w 方向上的尺寸比例;pad_out_h、pad_out_w,仅在 scale 为 2 时
    # 有用,对输出进行额外 padding 在 h、w 方向上的数值;upsample_h、upsample_w,输
    # 出图像尺寸的数值。在 Upsample 的相关代码中,推荐仅仅使用 upsample_h、
    # upsample_w 准确定义 Upsample 层的输出尺寸,其他所有的参数都不推荐继续使用。
    # for nearest _interpolate
    if mode != "nearest" or align_corners != None:
        raise NotImplementedError("not implement F.interpolate totoaly")
    x = raw(input, size, scale_factor,mode)

    layer_name = log.add_layer(name='upsample')
    top_blobs = log.add_blobs([x], name='upsample_blob'.format(type))
    layer = caffe_net.Layer_param(name=layer_name, type='Upsample',
                                  bottom=[log.blobs(input)], top=top_blobs)

    layer.upsample_param(size =(input.size(2),input.size(3)), scale_factor= scale_factor)
    log.cnet.add_layer(layer)
    return x """
    rois, actual_rois_num = raw(cls_prob, bbox_delta)

    bottom_blobs = []
    bottom_blobs.append(log.blobs(cls_prob))
    bottom_blobs.append(log.blobs(bbox_delta))

    top_blobs = log.add_blobs([rois, actual_rois_num], name='proposal_blob')

    layer_name = log.add_layer(name='Proposal')

    layer = caffe_net.Layer_param(name=layer_name,
                                  type='Proposal',
                                  bottom=bottom_blobs,
                                  top=top_blobs)
    layer.proposal_param()
    log.cnet.add_layer(layer)
    return rois, actual_rois_num
    """ example 2
Пример #30
0
def _pool(type,raw,input,x,kernel_size,stride,padding,ceil_mode):
    # TODO dilation,ceil_mode,return indices
    layer_name = log.add_layer(name='{}_pool'.format(type))
    top_blobs = log.add_blobs([x], name='{}_pool_blob'.format(type))
    layer = caffe_net.Layer_param(name=layer_name, type='Pooling',
                                  bottom=[log.get_blobs(input)], top=top_blobs)
    # TODO w,h different kernel, stride and padding
    # processing ceil mode
    layer.pool_param(kernel_size=kernel_size, stride=kernel_size if stride is None else stride,
                     pad=padding, type=type.upper())
    log.cnet.add_layer(layer)
    if ceil_mode==False and stride is not None:
        oheight = (input.size()[2] - _pair(kernel_size)[0] + 2 * _pair(padding)[0]) % (_pair(stride)[0])
        owidth = (input.size()[3] - _pair(kernel_size)[1] + 2 * _pair(padding)[1]) % (_pair(stride)[1])
        if oheight!=0 or owidth!=0:
            caffe_out=raw(input, kernel_size, stride, padding, ceil_mode=True)
            warn="WARN: the output shape miss match at {}: " \
                  "input {} output---Pytorch:{}---Caffe:{}\n" \
                  "This is caused by the different implementation that ceil mode in caffe and the floor mode in pytorch" \
                 ".\n".format(layer_name,input.size(),x.size(),caffe_out.size())+ \
                "WARN: Adding the clip layer `{}` `{}` in caffe prototxt to solve the shape mismatch error in caffe. " \
                "You can remove them manually if you don't need them.\n".format(layer_name + '_slice1',layer_name + '_slice2')
            print(warn)
            global WARNING_STRINGS
            WARNING_STRINGS+=warn
            top_name=top_blobs[0]
            tmp1_name=top_name+'_tmp1'
            drop1_name=top_name+'_drop1'
            tmp2_name=top_name+'_tmp2'
            drop2_name=top_name+'_drop2'
            log.cnet.net.layer[-1].top[0]=tmp1_name

            slice1_layer=caffe_net.Layer_param(name=layer_name+'_slice1',type='Slice',bottom=[tmp1_name],top=[tmp2_name,drop1_name])
            slice1_layer.slice_param(-1,[x.size()[-1]])
            log.cnet.add_layer(slice1_layer)
            slice2_layer = caffe_net.Layer_param(name=layer_name + '_slice2', type='Slice', bottom=[tmp2_name], top=top_blobs+[drop2_name])
            slice2_layer.slice_param(-2, [x.size()[-2]])
            log.cnet.add_layer(slice2_layer)