Exemplo n.º 1
0
def _interpolate(
    raw,
    input,
    size=None,
    scale_factor=None,
    mode='nearest',
    align_corners=None,
):
    # 定义的参数包括 scale,即输出与输入的尺寸比例,如 2;scale_h、scale_w,
    # 同 scale,分别为 h、w 方向上的尺寸比例;pad_out_h、pad_out_w,仅在 scale 为 2 时
    # 有用,对输出进行额外 padding 在 h、w 方向上的数值;upsample_h、upsample_w,输
    # 出图像尺寸的数值。在 Upsample 的相关代码中,推荐仅仅使用 upsample_h、
    # upsample_w 准确定义 Upsample 层的输出尺寸,其他所有的参数都不推荐继续使用。
    # for nearest _interpolate
    if mode != "nearest" or align_corners is not None:
        raise NotImplementedError("not implement F.interpolate totoaly")
    x = raw(input, size, scale_factor, mode)

    layer_name = log.add_layer(name='upsample')
    top_blobs = log.add_blobs([x], name='upsample_blob'.format(type))
    layer = Layer_param(
        name=layer_name,
        type='Upsample',
        bottom=[log.blobs(input)],
        top=top_blobs,
    )

    layer.upsample_param(
        size=(input.size(2), input.size(3)),
        scale_factor=scale_factor,
    )
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 2
0
def _threshold(raw, input, threshold, value, inplace=False):
    # for threshold or relu
    if value != 0:
        raise NotImplementedError("value != 0 not implemented in caffe")
    elif value == 0 and threshold == 0:
        x = raw(input, threshold, value, inplace)
        bottom_blobs = [log.blobs(input)]
        layer_name = log.add_layer(name='relu')
        log.add_blobs([x], name='relu_blob')
        layer = Layer_param(
            name=layer_name,
            type='ReLU',
            bottom=bottom_blobs,
            top=[log.blobs(x)],
        )
        log.cnet.add_layer(layer)
        return x
    else:
        x = raw(input, input, threshold, value, inplace)
        bottom_blobs = [log.blobs(input)]
        layer_name = log.add_layer(name='threshold')
        top_blobs = log.add_blobs([x], name='threshold_blob')
        layer = Layer_param(
            name=layer_name,
            type='Threshold',
            bottom=bottom_blobs,
            top=top_blobs,
        )
        layer.param.threshold_param.threshold = threshold
        log.cnet.add_layer(layer)
        return x
Exemplo n.º 3
0
def _conv2d(raw,
            input,
            weight,
            bias=None,
            stride=1,
            padding=0,
            dilation=1,
            groups=1):
    x = raw(input, weight, bias, stride, padding, dilation, groups)
    layer_name = log.add_layer(name='conv')
    log.add_blobs([x], name='conv_blob')
    layer = Layer_param(
        name=layer_name,
        type='Convolution',
        bottom=[log.blobs(input)],
        top=[log.blobs(x)],
    )
    layer.conv_param(
        x.size()[1],
        weight.size()[2:],
        stride=_pair(stride),
        pad=_pair(padding),
        dilation=_pair(dilation),
        bias_term=bias is not None,
        groups=groups,
    )
    if bias is not None:
        layer.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
    else:
        layer.param.convolution_param.bias_term = False
        layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 4
0
def _permute(input, *args):
    x = raw_permute(input, *args)
    layer_name = log.add_layer(name='permute')
    log.add_blobs([x], name='permute_blob')
    layer = Layer_param(
        name=layer_name,
        type='Permute',
        bottom=[log.blobs(input)],
        top=[log.blobs(x)],
    )
    order1 = args[0]
    order2 = args[1]
    order3 = args[2]
    order4 = args[3]

    layer.permute_param(order1, order2, order3, order4)
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 5
0
def _contiguous(input, *args):
    x = raw_contiguous(input, *args)
    layer_name = log.add_layer(name='contiguous')
    log.add_blobs([x], name='contiguous_blob')
    layer = Layer_param(
        name=layer_name,
        type='NeedRemove',
        bottom=[log.blobs(input)],
        top=[log.blobs(x)],
    )
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 6
0
def _sigmoid(raw, input):
    # Applies the element-wise function:
    # Sigmoid(x)= 1/(1+exp(−x))
    x = raw(input)
    layer_name = log.add_layer(name='sigmoid')
    log.add_blobs([x], name='sigmoid_blob')
    layer = Layer_param(
        name=layer_name,
        type='Sigmoid',
        bottom=[log.blobs(input)],
        top=[log.blobs(x)],
    )
    log.cnet.add_layer(layer)
Exemplo n.º 7
0
def _leaky_relu(raw, input, negative_slope=0.01, inplace=False):
    x = raw(input, negative_slope)
    layer_name = log.add_layer(name='leaky_relu')
    log.add_blobs([x], name='leaky_relu_blob')
    layer = Layer_param(
        name=layer_name,
        type='ReLU',
        bottom=[log.blobs(input)],
        top=[log.blobs(x)],
    )
    layer.param.relu_param.negative_slope = negative_slope
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 8
0
def _tanh(raw, input):
    # for tanh activation
    x = raw(input)
    layer_name = log.add_layer(name='tanh')
    log.add_blobs([x], name='tanh_blob')
    layer = Layer_param(
        name=layer_name,
        type='TanH',
        bottom=[log.blobs(input)],
        top=[log.blobs(x)],
    )
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 9
0
def _relu(raw, input, inplace=False):
    # for threshold or prelu
    x = raw(input, False)
    layer_name = log.add_layer(name='relu')
    log.add_blobs([x], name='relu_blob')
    layer = Layer_param(
        name=layer_name,
        type='ReLU',
        bottom=[log.blobs(input)],
        top=[log.blobs(x)],
    )
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 10
0
def _pool(type, raw, input, x, kernel_size, stride, padding, ceil_mode):
    # TODO dilation, ceil_mode, return indices
    layer_name = log.add_layer(name='{}_pool'.format(type))
    top_blobs = log.add_blobs([x], name='{}_pool_blob'.format(type))
    layer = Layer_param(
        name=layer_name,
        type='Pooling',
        bottom=[log.blobs(input)],
        top=top_blobs,
    )
    # TODO w,h different kernel, stride and padding
    # processing ceil mode
    layer.pool_param(
        kernel_size=kernel_size,
        stride=kernel_size if stride is None else stride,
        pad=padding,
        type=type.upper(),
        ceil_mode=ceil_mode,
    )
    log.cnet.add_layer(layer)
    if ceil_mode is False and stride is not None:
        oheight = (input.size()[2] - _pair(kernel_size)[0] +
                   2 * _pair(padding)[0]) % (_pair(stride)[0])
        owidth = (input.size()[3] - _pair(kernel_size)[1] +
                  2 * _pair(padding)[1]) % (_pair(stride)[1])
        if oheight != 0 or owidth != 0:
            caffe_out = raw(input,
                            kernel_size,
                            stride,
                            padding,
                            ceil_mode=True)
            print("WARNING: the output shape miss match at {}: "
                  "input {} output---Pytorch:{}---Caffe:{}\n"
                  "This is caused by the different implementation "
                  "that ceil mode in caffe and the floor mode in pytorch.\n"
                  "You can add the clip layer in caffe prototxt manually "
                  "if shape mismatch error is caused in caffe. ".format(
                      layer_name, input.size(), x.size(), caffe_out.size()))
Exemplo n.º 11
0
def _hardtanh(raw, input, min_val, max_val, inplace):
    # Applies the element-wise function:
    # torch.nn.ReLu6
    x = raw(input, min_val, max_val)
    layer_name = log.add_layer(name='relu6')
    log.add_blobs([x], name='relu6_blob')
    layer = Layer_param(
        name=layer_name,
        type='ReLU6',
        bottom=[log.blobs(input)],
        top=[log.blobs(x)],
    )
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 12
0
def _mul(input, *args):
    x = raw__mul__(input, *args)
    if not NET_INITTED:
        return x
    layer_name = log.add_layer(name='mul')
    top_blobs = log.add_blobs([x], name='mul_blob')
    layer = Layer_param(
        name=layer_name,
        type='Eltwise',
        bottom=[log.blobs(input), log.blobs(args[0])],
        top=top_blobs,
    )
    layer.param.eltwise_param.operation = 0  # product is 1
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 13
0
def _linear(raw, input, weight, bias=None):
    x = raw(input, weight, bias)
    layer_name = log.add_layer(name='fc')
    top_blobs = log.add_blobs([x], name='fc_blob')
    layer = Layer_param(
        name=layer_name,
        type='InnerProduct',
        bottom=[log.blobs(input)],
        top=top_blobs,
    )
    layer.fc_param(x.size()[1], has_bias=bias is not None)
    if bias is not None:
        layer.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
    else:
        layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 14
0
def _cat(raw, inputs, dim=0):
    x = raw(inputs, dim)
    bottom_blobs = []
    for input in inputs:
        bottom_blobs.append(log.blobs(input))
    layer_name = log.add_layer(name='cat')
    top_blobs = log.add_blobs([x], name='cat_blob')
    layer = Layer_param(
        name=layer_name,
        type='Concat',
        bottom=bottom_blobs,
        top=top_blobs,
    )
    layer.param.concat_param.axis = dim
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 15
0
def _dropout(raw, input, p=0.5, training=False, inplace=False):
    x = raw(input, p, training, inplace)
    bottom_blobs = [log.blobs(input)]
    layer_name = log.add_layer(name='dropout')
    top_blobs = log.add_blobs([x], name=bottom_blobs[0], with_num=False)
    layer = Layer_param(
        name=layer_name,
        type='Dropout',
        bottom=bottom_blobs,
        top=top_blobs,
    )
    layer.param.dropout_param.dropout_ratio = p
    layer.param.include.extend([caffe_pb2.NetStateRule(phase=0)
                                ])  # 1 for test, 0 for train
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 16
0
def _isub(input, *args):
    x = raw__isub__(input, *args)
    if not NET_INITTED:
        return x
    x = x.clone()
    layer_name = log.add_layer(name='sub')
    top_blobs = log.add_blobs([x], name='sub_blob')
    layer = Layer_param(
        name=layer_name,
        type='Eltwise',
        bottom=[log.blobs(input), log.blobs(args[0])],
        top=top_blobs,
    )
    layer.param.eltwise_param.operation = 1  # sum is 1
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 17
0
def _softmax(raw, input, dim=None, _stacklevel=3):
    # for F.softmax
    x = raw(input, dim=dim)
    if dim is None:
        dim = F._get_softmax_dim('softmax', input.dim(), _stacklevel)
    bottom_blobs = [log.blobs(input)]
    layer_name = log.add_layer(name='softmax')
    log.add_blobs([x], name='softmax_blob')
    layer = Layer_param(
        name=layer_name,
        type='Softmax',
        bottom=bottom_blobs,
        top=[log.blobs(x)],
    )
    layer.param.softmax_param.axis = dim
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 18
0
def _add(input, *args):
    x = raw__add__(input, *args)
    if not NET_INITTED:
        return x
    layer_name = log.add_layer(name='add')
    top_blobs = log.add_blobs([x], name='add_blob')
    if log.blobs(args[0]) is None:
        log.add_blobs([args[0]], name='extra_blob')
    else:
        layer = Layer_param(
            name=layer_name,
            type='Eltwise',
            bottom=[log.blobs(input), log.blobs(args[0])],
            top=top_blobs,
        )
        layer.param.eltwise_param.operation = 1  # sum is 1
        log.cnet.add_layer(layer)
    return x
Exemplo n.º 19
0
def _reshape(input, *args):
    x = raw_reshape(input, *args)
    if not NET_INITTED:
        return x
    layer_name = log.add_layer(name='reshape')
    top_blobs = log.add_blobs([x], name='reshape_blob')
    layer = Layer_param(
        name=layer_name,
        type='Reshape',
        bottom=[log.blobs(input)],
        top=top_blobs,
    )
    # TODO: reshpae added to nn_tools layer
    dims = list(args)
    dims[0] = 0  # the first dim should be batch_size
    layer.param.reshape_param.shape.CopyFrom(caffe_pb2.BlobShape(dim=dims))
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 20
0
def _split(raw, input, split_size, dim=0):
    # split in pytorch is slice in caffe
    x = raw(input, split_size, dim)
    layer_name = log.add_layer('split')
    top_blobs = log.add_blobs(x, name='split_blob')
    layer = Layer_param(
        name=layer_name,
        type='Slice',
        bottom=[log.blobs(input)],
        top=top_blobs,
    )
    slice_num = int(np.floor(input.size()[dim] / split_size))
    slice_param = caffe_pb2.SliceParameter(
        axis=dim,
        slice_point=[split_size * i for i in range(1, slice_num)],
    )
    layer.param.slice_param.CopyFrom(slice_param)
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 21
0
def _max(raw, *args):
    x = raw(*args)
    if len(args) == 1:
        # TODO max in one tensor
        assert NotImplementedError
    else:
        bottom_blobs = []
        for arg in args:
            bottom_blobs.append(log.blobs(arg))
        layer_name = log.add_layer(name='max')
        top_blobs = log.add_blobs([x], name='max_blob')
        layer = Layer_param(
            name=layer_name,
            type='Eltwise',
            bottom=bottom_blobs,
            top=top_blobs,
        )
        layer.param.eltwise_param.operation = 2
        log.cnet.add_layer(layer)
    return x
Exemplo n.º 22
0
def _mean(input, *args, **kwargs):
    x = raw_mean(input, *args, **kwargs)
    if not NET_INITTED:
        return x
    layer_name = log.add_layer(name='mean')
    top_blobs = log.add_blobs([x], name='mean_blob')
    layer = Layer_param(
        name=layer_name,
        type='Reduction',
        bottom=[log.blobs(input)],
        top=top_blobs,
    )
    if len(args) == 1:
        dim = args[0]
    elif 'dim' in kwargs:
        dim = kwargs['dim']
    else:
        raise NotImplementedError('mean operation must specify a dim')
    layer.param.reduction_param.operation = 4
    layer.param.reduction_param.axis = dim
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 23
0
def _l2Norm(raw, input, weight, eps):
    # Applies the element-wise function:
    # L2Norm in vgg_ssd
    x = raw(input, weight, eps)
    layer_name = log.add_layer(name='normalize')
    log.add_blobs([x], name='normalize_blob')
    layer = Layer_param(
        name=layer_name,
        type='Normalize',
        bottom=[log.blobs(input)],
        top=[log.blobs(x)],
    )
    layer.norm_param(eps)

    layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 24
0
def _prelu(raw, input, weight):
    # for threshold or prelu
    x = raw(input, weight)
    bottom_blobs = [log.blobs(input)]
    layer_name = log.add_layer(name='prelu')
    log.add_blobs([x], name='prelu_blob')
    layer = Layer_param(
        name=layer_name,
        type='PReLU',
        bottom=bottom_blobs,
        top=[log.blobs(x)],
    )
    if weight.size()[0] == 1:
        layer.param.prelu_param.channel_shared = True
        layer.add_data(weight.cpu().data.numpy()[0])
    else:
        layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x
Exemplo n.º 25
0
def _batch_norm(
    raw,
    input,
    running_mean,
    running_var,
    weight=None,
    bias=None,
    training=False,
    momentum=0.1,
    eps=1e-5,
):
    # because the runing_mean and runing_var will be changed after
    # the _batch_norm operation, we first save the parameters

    x = raw(input, running_mean, running_var, weight, bias, training, momentum,
            eps)
    bottom_blobs = [log.blobs(input)]
    layer_name1 = log.add_layer(name='batch_norm')
    top_blobs = log.add_blobs([x], name='batch_norm_blob')
    layer1 = Layer_param(
        name=layer_name1,
        type='BatchNorm',
        bottom=bottom_blobs,
        top=top_blobs,
    )
    if running_mean is None or running_var is None:
        # not use global_stats, normalization is performed over the current mini-batch
        layer1.batch_norm_param(use_global_stats=0, eps=eps)
    else:
        layer1.batch_norm_param(use_global_stats=1, eps=eps)
        running_mean_clone = running_mean.clone()
        running_var_clone = running_var.clone()
        layer1.add_data(
            running_mean_clone.cpu().numpy(),
            running_var_clone.cpu().numpy(),
            np.array([1.0]),
        )
    log.cnet.add_layer(layer1)
    if weight is not None and bias is not None:
        layer_name2 = log.add_layer(name='bn_scale')
        layer2 = Layer_param(
            name=layer_name2,
            type='Scale',
            bottom=top_blobs,
            top=top_blobs,
        )
        layer2.param.scale_param.bias_term = True
        layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
        log.cnet.add_layer(layer2)
    return x
Exemplo n.º 26
0
def _instance_norm(
    raw,
    input,
    running_mean=None,
    running_var=None,
    weight=None,
    bias=None,
    use_input_stats=True,
    momentum=0.1,
    eps=1e-5,
):
    # TODO: the batch size!=1 view operations
    print(
        "WARNING: The Instance Normalization transfers to Caffe "
        "using BatchNorm, so the batch size should be 1", )
    if running_var is not None or weight is not None:
        # TODO: the affine=True or track_running_stats=True case
        raise NotImplementedError(
            "not implement the affine=True or track_running_stats=True case InstanceNorm"
        )
    x = torch.batch_norm(
        input,
        weight,
        bias,
        running_mean,
        running_var,
        use_input_stats,
        momentum,
        eps,
        torch.backends.cudnn.enabled,
    )
    bottom_blobs = [log.blobs(input)]
    layer_name1 = log.add_layer(name='instance_norm')
    top_blobs = log.add_blobs([x], name='instance_norm_blob')
    layer1 = Layer_param(
        name=layer_name1,
        type='BatchNorm',
        bottom=bottom_blobs,
        top=top_blobs,
    )
    if running_mean is None or running_var is None:
        # not use global_stats, normalization is performed over the current mini-batch
        layer1.batch_norm_param(use_global_stats=0, eps=eps)
        running_mean = torch.zeros(input.size()[1])
        running_var = torch.ones(input.size()[1])
    else:
        layer1.batch_norm_param(use_global_stats=1, eps=eps)
    running_mean_clone = running_mean.clone()
    running_var_clone = running_var.clone()
    layer1.add_data(
        running_mean_clone.cpu().numpy(),
        running_var_clone.cpu().numpy(),
        np.array([1.0]),
    )
    log.cnet.add_layer(layer1)
    if weight is not None and bias is not None:
        layer_name2 = log.add_layer(name='bn_scale')
        layer2 = Layer_param(
            name=layer_name2,
            type='Scale',
            bottom=top_blobs,
            top=top_blobs,
        )
        layer2.param.scale_param.bias_term = True
        layer2.add_data(
            weight.cpu().data.numpy(),
            bias.cpu().data.numpy(),
        )
        log.cnet.add_layer(layer2)
    return x