コード例 #1
0
ファイル: conv.py プロジェクト: tony32769/BlueWhale
def conv_explicit_param_names(
    model, blob_in, blob_out, weight_init=None, bias_init=None, **kwargs
):
    """
    Sets up operators for a convolution layer between `blob_in` and `blob_out`.
    Requires that explicit weight and bias parameters. Assumes NCHW order.

    :param model: The ModelHelper object whose net the operators should be added to
    :blob_in: The input blob for the convolution layer
    :blob_out: The output blob from the convolution layer
    :weight_init: Tuple specifying weight initialization information. Its first
        entry is the name of the caffe2 operator to use in creating the weight param
        and second is a dictionary of kwargs it should be passed.
    :bias_init: Tuple specifying bias initialization information. Its first
        entry is the name of the caffe2 operator to use in creating the bias param
        and second is a dictionary of kwargs it should be passed.
    :dim_in: Number of input channels
    :dim_out: Number of output channels
    :kernel_h: Image height kernel size
    :kernel_w: Image width kernel size
    :weight_name: Name of blob corresponding to an initialized weight parameter
    :bias_name: Name of blob corresponding to an initialized bias parameter
    """
    required_kwargs = [
        'dim_in', 'dim_out', 'weight_name', 'bias_name', 'kernel_h', 'kernel_w'
    ]
    for arg in required_kwargs:
        assert arg in kwargs, "Please supply kwarg {}".format(arg)
    dim_in, dim_out, kernel_h, kernel_w = kwargs['dim_in'], kwargs['dim_out'], \
                                          kwargs['kernel_h'], kwargs['kernel_w']

    WeightInitializer = initializers.update_initializer(
        None, weight_init, ("XavierFill", {})
    )
    BiasInitializer = initializers.update_initializer(
        None, bias_init, ("ConstantFill", {})
    )

    weight_shape = [dim_out, dim_in, kernel_h, kernel_w]

    weight = model.create_param(
        param_name=kwargs['weight_name'],
        shape=weight_shape,
        initializer=WeightInitializer,
        tags=ParameterTags.WEIGHT
    )

    bias = model.create_param(
        param_name=kwargs['bias_name'],
        shape=[dim_out, ],
        initializer=BiasInitializer,
        tags=ParameterTags.BIAS
    )

    return model.net.Conv(
        [blob_in, weight, bias],
        blob_out,
        kernel_h=kernel_h,
        kernel_w=kernel_w,
    )
コード例 #2
0
ファイル: fc.py プロジェクト: awthomp/pytorch-dev
def fc_decomp(
    model, blob_in, blob_out, dim_in, dim_out,
    rank_approx=5, weight_init=None, bias_init=None,
    WeightInitializer=None, BiasInitializer=None, **kwargs
):
    """FC_Decomp version
    Here we assume that the rank of original input is bigger than 5.
    """
    WeightInitializer = initializers.update_initializer(
        WeightInitializer, weight_init, ("XavierFill", {})
    )
    BiasInitializer = initializers.update_initializer(
        BiasInitializer, bias_init, ("ConstantFill", {})
    )
    blob_out = blob_out or model.net.NextName()
    u = model.create_param(
        param_name=blob_out + '_u',
        shape=[dim_out, rank_approx],
        initializer=WeightInitializer,
    )
    v = model.create_param(
        param_name=blob_out + '_v',
        shape=[dim_in, rank_approx],
        initializer=WeightInitializer,
    )
    bias = model.create_param(
        param_name=blob_out + '_b',
        shape=[dim_out, ],
        initializer=BiasInitializer,
    )
    return model.net.FC_Decomp([blob_in, u, v, bias], blob_out, **kwargs)
コード例 #3
0
ファイル: fc.py プロジェクト: darthsuogles/caphi
def fc_decomp(
    model, blob_in, blob_out, dim_in, dim_out,
    rank_approx=5, weight_init=None, bias_init=None,
    WeightInitializer=None, BiasInitializer=None, **kwargs
):
    """FC_Decomp version
    Here we assume that the rank of original input is bigger than 5.
    """
    WeightInitializer = initializers.update_initializer(
        WeightInitializer, weight_init, ("XavierFill", {})
    )
    BiasInitializer = initializers.update_initializer(
        BiasInitializer, bias_init, ("ConstantFill", {})
    )
    blob_out = blob_out or model.net.NextName()
    u = model.create_param(
        param_name=blob_out + '_u',
        shape=[dim_out, rank_approx],
        initializer=WeightInitializer,
    )
    v = model.create_param(
        param_name=blob_out + '_v',
        shape=[dim_in, rank_approx],
        initializer=WeightInitializer,
    )
    bias = model.create_param(
        param_name=blob_out + '_b',
        shape=[dim_out, ],
        initializer=BiasInitializer,
    )
    return model.net.FC_Decomp([blob_in, u, v, bias], blob_out, **kwargs)
コード例 #4
0
ファイル: fc.py プロジェクト: darthsuogles/caphi
def _FC_or_packed_FC(
    model, op_call, blob_in, blob_out, dim_in, dim_out, weight_init=None,
        bias_init=None, WeightInitializer=None, BiasInitializer=None,
        **kwargs
):
    WeightInitializer = initializers.update_initializer(
        WeightInitializer, weight_init, ("XavierFill", {})
    )
    BiasInitializer = initializers.update_initializer(
        BiasInitializer, bias_init, ("ConstantFill", {})
    )
    if not model.init_params:
        WeightInitializer = initializers.ExternalInitializer()
        BiasInitializer = initializers.ExternalInitializer()

    blob_out = blob_out or model.net.NextName()
    bias_tags = [ParameterTags.BIAS]
    if 'freeze_bias' in kwargs:
        bias_tags.append(ParameterTags.COMPUTED_PARAM)

    weight = model.create_param(
        param_name=blob_out + '_w',
        shape=[dim_out, dim_in],
        initializer=WeightInitializer,
        tags=ParameterTags.WEIGHT
    )
    bias = model.create_param(
        param_name=blob_out + '_b',
        shape=[dim_out, ],
        initializer=BiasInitializer,
        tags=bias_tags
    )

    return op_call([blob_in, weight, bias], blob_out, **kwargs)
コード例 #5
0
ファイル: normalization.py プロジェクト: blackxer/AICamera
def spatial_gn(model, blob_in, blob_out, dim_in,
               init_scale=1., init_bias=0.,
               ScaleInitializer=None, BiasInitializer=None,
               RunningMeanInitializer=None, RunningVarianceInitializer=None,
               order="NCHW", **kwargs):
    '''
    Group normalizes the input, cf. https://arxiv.org/abs/1803.08494.
    '''

    blob_out = blob_out or model.net.NextName()
    # Input: input, scale, bias
    # Output: output, group_mean, group_inv_std
    # scale: initialize with init_scale (default 1.)
    # [recommendation: set init_scale = 0. in the last layer for each res block]
    # bias: initialize with init_bias (default 0.)

    if model.init_params:
        scale_init = ("ConstantFill", {'value': init_scale})
        bias_init = ("ConstantFill", {'value': init_bias})

        ScaleInitializer = initializers.update_initializer(
            ScaleInitializer, scale_init, ("ConstantFill", {})
        )
        BiasInitializer = initializers.update_initializer(
            BiasInitializer, bias_init, ("ConstantFill", {})
        )
    else:
        ScaleInitializer = initializers.ExternalInitializer()
        BiasInitializer = initializers.ExternalInitializer()

    scale = model.create_param(
        param_name=blob_out + '_s',
        shape=[dim_in],
        initializer=ScaleInitializer,
        tags=ParameterTags.WEIGHT
    )

    bias = model.create_param(
        param_name=blob_out + '_b',
        shape=[dim_in],
        initializer=BiasInitializer,
        tags=ParameterTags.BIAS
    )

    blob_outs = [blob_out,
                 blob_out + "_mean", blob_out + "_std"]

    blob_outputs = model.net.GroupNorm(
        [blob_in, scale, bias],
        blob_outs,
        **kwargs)
    # Return the output
    return blob_outputs[0]
コード例 #6
0
ファイル: normalization.py プロジェクト: RichieMay/pytorch
def spatial_gn(model, blob_in, blob_out, dim_in,
               init_scale=1., init_bias=0.,
               ScaleInitializer=None, BiasInitializer=None,
               RunningMeanInitializer=None, RunningVarianceInitializer=None,
               order="NCHW", **kwargs):
    '''
    Group normalizes the input, cf. https://arxiv.org/abs/1803.08494.
    '''

    blob_out = blob_out or model.net.NextName()
    # Input: input, scale, bias
    # Output: output, group_mean, group_inv_std
    # scale: initialize with init_scale (default 1.)
    # [recommendation: set init_scale = 0. in the last layer for each res block]
    # bias: initialize with init_bias (default 0.)

    if model.init_params:
        scale_init = ("ConstantFill", {'value': init_scale})
        bias_init = ("ConstantFill", {'value': init_bias})

        ScaleInitializer = initializers.update_initializer(
            ScaleInitializer, scale_init, ("ConstantFill", {})
        )
        BiasInitializer = initializers.update_initializer(
            BiasInitializer, bias_init, ("ConstantFill", {})
        )
    else:
        ScaleInitializer = initializers.ExternalInitializer()
        BiasInitializer = initializers.ExternalInitializer()

    scale = model.create_param(
        param_name=blob_out + '_s',
        shape=[dim_in],
        initializer=ScaleInitializer,
        tags=ParameterTags.WEIGHT
    )

    bias = model.create_param(
        param_name=blob_out + '_b',
        shape=[dim_in],
        initializer=BiasInitializer,
        tags=ParameterTags.BIAS
    )

    blob_outs = [blob_out,
                 blob_out + "_mean", blob_out + "_std"]

    blob_outputs = model.net.GroupNorm(
        [blob_in, scale, bias],
        blob_outs,
        **kwargs)
    # Return the output
    return blob_outputs[0]
コード例 #7
0
def fc_explicit_param_names(model,
                            blob_in,
                            blob_out,
                            weight_init=None,
                            bias_init=None,
                            **kwargs):
    """
    Sets up operators for a fully connected layer between `blob_in`
    and `blob_out`. Requires that explicit weight and bias parameters
    are supplied.

    :param model: The ModelHelper object whose net the operators
        should be added to
    :blob_in: The input blob for the fully connected layer
    :blob_out: The output blob from the fully connected layer
    :weight_init: Tuple specifying weight initialization information. Its first
        entry is the name of the caffe2 operator to use in creating the weight param
        and second is a dictionary of kwargs it should be passed.
    :bias_init: Tuple specifying bias initialization information. Its first
        entry is the name of the caffe2 operator to use in creating the bias param
        and second is a dictionary of kwargs it should be passed.
    :dim_in: Number of nodes in input layer
    :dim_out: Number of nodes in output layer
    :weight_name: Name of blob corresponding to an initialized weight parameter
    :bias_name: Name of blob corresponding to an initialized bias parameter
    """
    required_kwargs = ["dim_in", "dim_out", "weight_name", "bias_name"]
    for arg in required_kwargs:
        assert arg in kwargs, "Please supply kwarg {}".format(arg)
    dim_in, dim_out = kwargs["dim_in"], kwargs["dim_out"]

    WeightInitializer = initializers.update_initializer(
        None, weight_init, ("XavierFill", {}))
    BiasInitializer = initializers.update_initializer(None, bias_init,
                                                      ("ConstantFill", {}))

    weight = model.create_param(
        param_name=kwargs["weight_name"],
        shape=[dim_out, dim_in],
        initializer=WeightInitializer,
        tags=ParameterTags.WEIGHT,
    )
    bias = model.create_param(
        param_name=kwargs["bias_name"],
        shape=[dim_out],
        initializer=BiasInitializer,
        tags=[ParameterTags.BIAS],
    )

    return model.net.FC([blob_in, weight, bias], blob_out)
コード例 #8
0
ファイル: normalization.py プロジェクト: awthomp/pytorch-dev
def moments_with_running_stats(model,
                               blob_in,
                               blob_out,
                               dim_in,
                               RunningMeanInitializer=None,
                               RunningVarianceInitializer=None,
                               order="NCHW",
                               **kwargs):

    if model.init_params:
        rm_init = ("ConstantFill", {'value': 0.0})
        riv_init = ("ConstantFill", {'value': 1.0})

        RunningMeanInitializer = initializers.update_initializer(
            RunningMeanInitializer, rm_init, ("ConstantFill", {}))
        RunningVarianceInitializer = initializers.update_initializer(
            RunningVarianceInitializer, riv_init, ("ConstantFill", {}))
    else:
        RunningMeanInitializer = initializers.ExternalInitializer()
        RunningVarianceInitializer = initializers.ExternalInitializer()

    running_mean = model.create_param(param_name=blob_out + '_rm',
                                      shape=[dim_in],
                                      initializer=RunningMeanInitializer,
                                      tags=ParameterTags.COMPUTED_PARAM)

    # this is just running variance
    running_inv_var = model.create_param(
        param_name=blob_out + '_riv',
        shape=[dim_in],
        initializer=RunningVarianceInitializer,
        tags=ParameterTags.COMPUTED_PARAM)

    blob_outs = [blob_out + "_sm", blob_out + "_sv"]
    if order == 'NCHW':
        blob_outputs = model.net.Moments([blob_in],
                                         blob_outs,
                                         axes=[0, 2, 3],
                                         order=order,
                                         keepdims=False,
                                         **kwargs)
    elif order == 'NHWC':
        blob_outputs = model.net.Moments([blob_in],
                                         blob_outs,
                                         axes=[0, 1, 2],
                                         order=order,
                                         keepdims=False,
                                         **kwargs)
    return blob_outputs
コード例 #9
0
ファイル: fc.py プロジェクト: sra4077/Horizon
def fc_explicit_param_names(
    model, blob_in, blob_out, weight_init=None, bias_init=None, **kwargs
):
    """
    Sets up operators for a fully connected layer between `blob_in`
    and `blob_out`. Requires that explicit weight and bias parameters
    are supplied.

    :param model: The ModelHelper object whose net the operators
        should be added to
    :blob_in: The input blob for the fully connected layer
    :blob_out: The output blob from the fully connected layer
    :weight_init: Tuple specifying weight initialization information. Its first
        entry is the name of the caffe2 operator to use in creating the weight param
        and second is a dictionary of kwargs it should be passed.
    :bias_init: Tuple specifying bias initialization information. Its first
        entry is the name of the caffe2 operator to use in creating the bias param
        and second is a dictionary of kwargs it should be passed.
    :dim_in: Number of nodes in input layer
    :dim_out: Number of nodes in output layer
    :weight_name: Name of blob corresponding to an initialized weight parameter
    :bias_name: Name of blob corresponding to an initialized bias parameter
    """
    required_kwargs = ["dim_in", "dim_out", "weight_name", "bias_name"]
    for arg in required_kwargs:
        assert arg in kwargs, "Please supply kwarg {}".format(arg)
    dim_in, dim_out = kwargs["dim_in"], kwargs["dim_out"]

    WeightInitializer = initializers.update_initializer(
        None, weight_init, ("XavierFill", {})
    )
    BiasInitializer = initializers.update_initializer(
        None, bias_init, ("ConstantFill", {})
    )

    weight = model.create_param(
        param_name=kwargs["weight_name"],
        shape=[dim_out, dim_in],
        initializer=WeightInitializer,
        tags=ParameterTags.WEIGHT,
    )
    bias = model.create_param(
        param_name=kwargs["bias_name"],
        shape=[dim_out],
        initializer=BiasInitializer,
        tags=[ParameterTags.BIAS],
    )

    return model.net.FC([blob_in, weight, bias], blob_out)
コード例 #10
0
def conv_dw(model, inputs, inp, oup, stride, name):
    W_dw = model.create_param(param_name=name + '_dw_w',
                              shape=[inp, 1, 3, 3],
                              initializer=initializers.update_initializer(
                                  None, None, ("XavierFill", {})),
                              tags=ParameterTags.WEIGHT)
    h = inputs.Conv([W_dw], [name + '_dw'],
                    kernel_h=3,
                    kernel_w=3,
                    stride_h=stride,
                    stride_w=stride,
                    pad_b=1,
                    pad_l=1,
                    pad_r=1,
                    pad_t=1,
                    order='NCHW',
                    group=inp)
    h = brew.spatial_bn(model, h, name + '_dw_bn', inp, is_test=True)
    h = brew.relu(model, h, h)

    h = brew.conv(model,
                  h,
                  name + '_sep',
                  dim_in=inp,
                  dim_out=oup,
                  kernel=1,
                  pad=0,
                  stride=1,
                  no_bias=True)
    h = brew.spatial_bn(model, h, name + '_sep_bn', oup, is_test=True)
    h = brew.relu(model, h, h)
    return h
コード例 #11
0
def _FC_or_packed_FC(model,
                     op_call,
                     blob_in,
                     blob_out,
                     dim_in,
                     dim_out,
                     weight_init=None,
                     bias_init=None,
                     WeightInitializer=None,
                     BiasInitializer=None,
                     enable_tensor_core=False,
                     float16_compute=False,
                     **kwargs):
    WeightInitializer = initializers.update_initializer(
        WeightInitializer, weight_init, ("XavierFill", {}))
    BiasInitializer = initializers.update_initializer(BiasInitializer,
                                                      bias_init,
                                                      ("ConstantFill", {}))
    if not model.init_params:
        WeightInitializer = initializers.ExternalInitializer()
        BiasInitializer = initializers.ExternalInitializer()

    blob_out = blob_out or model.net.NextName()
    bias_tags = [ParameterTags.BIAS]
    if 'freeze_bias' in kwargs:
        bias_tags.append(ParameterTags.COMPUTED_PARAM)

    weight = model.create_param(param_name=blob_out + '_w',
                                shape=[dim_out, dim_in],
                                initializer=WeightInitializer,
                                tags=ParameterTags.WEIGHT)
    bias = model.create_param(param_name=blob_out + '_b',
                              shape=[
                                  dim_out,
                              ],
                              initializer=BiasInitializer,
                              tags=bias_tags)

    # enable TensorCore by setting appropriate engine
    if enable_tensor_core:
        kwargs['engine'] = 'TENSORCORE'

    # Enable float 16 compute kernel (relevant for CUDA)
    if float16_compute:
        kwargs['float16_compute'] = True

    return op_call([blob_in, weight, bias], blob_out, **kwargs)
コード例 #12
0
def _FC_or_packed_FC(model,
                     op_call,
                     blob_in,
                     blob_out,
                     dim_in,
                     dim_out,
                     weight_init=None,
                     bias_init=None,
                     weight_initializer=None,
                     bias_initializer=None,
                     **kwargs):
    weight_initializer = initializers.update_initializer(
        weight_initializer, weight_init, ("XavierFill", {}))
    bias_initializer = initializers.update_initializer(bias_initializer,
                                                       bias_init,
                                                       ("ConstantFill", {}))

    blob_out = blob_out or model.net.NextName()
    if model.init_params:
        weight = model.create_param(
            param_name=blob_out + '_w',
            shape=[dim_out, dim_in],
            initializer=weight_initializer,
        )
        bias = model.create_param(
            param_name=blob_out + '_b',
            shape=[
                dim_out,
            ],
            initializer=bias_initializer,
        )
    else:
        weight = core.ScopedBlobReference(blob_out + '_w',
                                          model.param_init_net)
        bias = core.ScopedBlobReference(blob_out + '_b', model.param_init_net)

    if 'freeze_bias' in kwargs:
        model.params.extend([weight])
    else:
        model.params.extend([weight, bias])

    model.weights.append(weight)
    model.biases.append(bias)
    return op_call([blob_in, weight, bias], blob_out, **kwargs)
コード例 #13
0
def spatial_bn(model, blob_in, blob_out, dim_in,
               init_scale=1., init_bias=0.,
               ScaleInitializer=None, BiasInitializer=None,
               RunningMeanInitializer=None, RunningVarianceInitializer=None,
               order="NCHW", **kwargs):
    blob_out = blob_out or model.net.NextName()
    # Input: input, scale, bias, est_mean, est_inv_var
    # Output: output, running_mean, running_inv_var, saved_mean,
    #         saved_inv_var
    # scale: initialize with init_scale (default 1.)
    # bias: initialize with init_bias (default 0.)
    # est mean: zero
    # est var: ones

    if model.init_params:
        scale_init = ("ConstantFill", {'value': init_scale})
        bias_init = ("ConstantFill", {'value': init_bias})
        rm_init = ("ConstantFill", {'value': 0.0})
        riv_init = ("ConstantFill", {'value': 1.0})

        ScaleInitializer = initializers.update_initializer(
            ScaleInitializer, scale_init, ("ConstantFill", {})
        )
        BiasInitializer = initializers.update_initializer(
            BiasInitializer, bias_init, ("ConstantFill", {})
        )
        RunningMeanInitializer = initializers.update_initializer(
            RunningMeanInitializer, rm_init, ("ConstantFill", {})
        )
        RunningVarianceInitializer = initializers.update_initializer(
            RunningVarianceInitializer, riv_init, ("ConstantFill", {})
        )
    else:
        ScaleInitializer = initializers.ExternalInitializer()
        BiasInitializer = initializers.ExternalInitializer()
        RunningMeanInitializer = initializers.ExternalInitializer()
        RunningVarianceInitializer = initializers.ExternalInitializer()

    scale = model.create_param(
        param_name=blob_out + '_s',
        shape=[dim_in],
        initializer=ScaleInitializer,
        tags=ParameterTags.WEIGHT
    )

    bias = model.create_param(
        param_name=blob_out + '_b',
        shape=[dim_in],
        initializer=BiasInitializer,
        tags=ParameterTags.BIAS
    )

    running_mean = model.create_param(
        param_name=blob_out + '_rm',
        shape=[dim_in],
        initializer=RunningMeanInitializer,
        tags=ParameterTags.COMPUTED_PARAM
    )

    running_inv_var = model.create_param(
        param_name=blob_out + '_riv',
        shape=[dim_in],
        initializer=RunningVarianceInitializer,
        tags=ParameterTags.COMPUTED_PARAM
    )

    blob_outs = [blob_out, running_mean, running_inv_var,
                 blob_out + "_sm", blob_out + "_siv"]
    if 'is_test' in kwargs and kwargs['is_test']:
        blob_outputs = model.net.SpatialBN(
            [blob_in, scale, bias, blob_outs[1], blob_outs[2]], [blob_out],
            order=order, **kwargs)
        return blob_outputs
    else:
        blob_outputs = model.net.SpatialBN(
            [blob_in, scale, bias, blob_outs[1], blob_outs[2]], blob_outs,
            order=order, **kwargs)
        # Return the output
        return blob_outputs[0]
コード例 #14
0
ファイル: conv.py プロジェクト: bhuWenDongchao/caffe2
def _ConvBase(
    model,
    is_nd,
    blob_in,
    blob_out,
    dim_in,
    dim_out,
    kernel,
    weight_init=None,
    bias_init=None,
    WeightInitializer=None,
    BiasInitializer=None,
    group=1,
    transform_inputs=None,
    use_cudnn=False,
    order="NCHW",
    cudnn_exhaustive_search=False,
    ws_nbytes_limit=None,
    **kwargs
):
    kernels = []
    if is_nd:
        if not isinstance(kernel, list):
            kernels = [kernel]
        else:
            kernels = kernel
    else:
        if isinstance(kernel, list):
            assert len(kernel) == 2, "Conv support only a 2D kernel."
            kernels = kernel
        else:
            kernels = [kernel] * 2

    requested_engine = kwargs.get('engine')
    if requested_engine is not None:
        if use_cudnn and requested_engine != 'CUDNN':
            raise ValueError(
                'When use_cudnn=True, the only engine you can specify is '
                '"CUDNN"')
        elif not use_cudnn and requested_engine == 'CUDNN':
            raise ValueError(
                'When use_cudnn=False, the only engine you can specify is '
                '""')

    if use_cudnn:
        kwargs['engine'] = 'CUDNN'
        kwargs['exhaustive_search'] = cudnn_exhaustive_search
        if ws_nbytes_limit:
            kwargs['ws_nbytes_limit'] = ws_nbytes_limit

    use_bias =\
            False if ("no_bias" in kwargs and kwargs["no_bias"]) else True
    blob_out = blob_out or model.net.NextName()
    weight_shape = [dim_out]
    if order == "NCHW":
        weight_shape.append(int(dim_in / group))
        weight_shape.extend(kernels)
    else:
        weight_shape.extend(kernels)
        weight_shape.append(int(dim_in / group))

    WeightInitializer = initializers.update_initializer(
        WeightInitializer, weight_init, ("XavierFill", {})
    )
    BiasInitializer = initializers.update_initializer(
        BiasInitializer, bias_init, ("ConstantFill", {})
    )
    if not model.init_params:
        WeightInitializer = initializers.ExternalInitializer()
        BiasInitializer = initializers.ExternalInitializer()

    weight = model.create_param(
        param_name=blob_out + '_w',
        shape=weight_shape,
        initializer=WeightInitializer,
        tags=ParameterTags.WEIGHT
    )
    if use_bias:
        bias = model.create_param(
            param_name=blob_out + '_b',
            shape=[dim_out, ],
            initializer=BiasInitializer,
            tags=ParameterTags.BIAS
        )

    if use_bias:
        inputs = [blob_in, weight, bias]
    else:
        inputs = [blob_in, weight]

    if transform_inputs is not None:
        transform_inputs(model, blob_out, inputs)

    # For the operator, we no longer need to provide the no_bias field
    # because it can automatically figure this out from the number of
    # inputs.
    if 'no_bias' in kwargs:
        del kwargs['no_bias']
    if group != 1:
        kwargs['group'] = group
    if is_nd:
        return model.net.Conv(
            inputs,
            blob_out,
            kernels=kernels,
            order=order,
            **kwargs)
    else:
        if isinstance(kernel, list):
            return model.net.Conv(
                inputs,
                blob_out,
                kernel_h=kernel[0],
                kernel_w=kernel[1],
                order=order,
                **kwargs)
        else:
            return model.net.Conv(
                inputs,
                blob_out,
                kernel=kernel,
                order=order,
                **kwargs)
コード例 #15
0
ファイル: conv.py プロジェクト: wolfviking0/caffe2_SSD
def _ConvBase(model,
              is_nd,
              blob_in,
              blob_out,
              dim_in,
              dim_out,
              kernel,
              weight_init=None,
              bias_init=None,
              WeightInitializer=None,
              BiasInitializer=None,
              group=1,
              transform_inputs=None,
              use_cudnn=False,
              order="NCHW",
              cudnn_exhaustive_search=False,
              ws_nbytes_limit=None,
              **kwargs):
    kernels = []
    if is_nd:
        if not isinstance(kernel, list):
            kernels = [kernel]
        else:
            kernels = kernel
    else:
        if isinstance(kernel, list):
            assert len(kernel) == 2, "Conv support only a 2D kernel."
            kernels = kernel
        else:
            kernels = [kernel] * 2

    requested_engine = kwargs.get('engine')
    if requested_engine is not None:
        if use_cudnn and requested_engine != 'CUDNN':
            raise ValueError(
                'When use_cudnn=True, the only engine you can specify is '
                '"CUDNN"')
        elif not use_cudnn and requested_engine == 'CUDNN':
            raise ValueError(
                'When use_cudnn=False, the only engine you can specify is '
                '""')

    if use_cudnn:
        kwargs['engine'] = 'CUDNN'
        kwargs['exhaustive_search'] = cudnn_exhaustive_search
        if ws_nbytes_limit:
            kwargs['ws_nbytes_limit'] = ws_nbytes_limit

    use_bias =\
            False if ("no_bias" in kwargs and kwargs["no_bias"]) else True
    blob_out = blob_out or model.net.NextName()
    weight_shape = [dim_out]
    if order == "NCHW":
        weight_shape.append(int(dim_in / group))
        weight_shape.extend(kernels)
    else:
        weight_shape.extend(kernels)
        weight_shape.append(int(dim_in / group))

    WeightInitializer = initializers.update_initializer(
        WeightInitializer, weight_init, ("XavierFill", {}))
    BiasInitializer = initializers.update_initializer(BiasInitializer,
                                                      bias_init,
                                                      ("ConstantFill", {}))
    if not model.init_params:
        WeightInitializer = initializers.ExternalInitializer()
        BiasInitializer = initializers.ExternalInitializer()

    weight = model.create_param(param_name=blob_out + '_w',
                                shape=weight_shape,
                                initializer=WeightInitializer,
                                tags=ParameterTags.WEIGHT)
    if use_bias:
        bias = model.create_param(param_name=blob_out + '_b',
                                  shape=[
                                      dim_out,
                                  ],
                                  initializer=BiasInitializer,
                                  tags=ParameterTags.BIAS)

    if use_bias:
        inputs = [blob_in, weight, bias]
    else:
        inputs = [blob_in, weight]

    if transform_inputs is not None:
        transform_inputs(model, blob_out, inputs)

    # For the operator, we no longer need to provide the no_bias field
    # because it can automatically figure this out from the number of
    # inputs.
    if 'no_bias' in kwargs:
        del kwargs['no_bias']
    if group != 1:
        kwargs['group'] = group
    if is_nd:
        return model.net.Conv(inputs,
                              blob_out,
                              kernels=kernels,
                              order=order,
                              **kwargs)
    else:
        if isinstance(kernel, list):
            return model.net.Conv(inputs,
                                  blob_out,
                                  kernel_h=kernel[0],
                                  kernel_w=kernel[1],
                                  order=order,
                                  **kwargs)
        else:
            return model.net.Conv(inputs,
                                  blob_out,
                                  kernel=kernel,
                                  order=order,
                                  **kwargs)
コード例 #16
0
    def DeformableConv(self,
                       blob_in,
                       offset,
                       prefix,
                       dim_in,
                       dim_out,
                       kernel,
                       stride=1,
                       pad=1,
                       weight_init=None,
                       bias_init=None,
                       WeightInitializer=None,
                       BiasInitializer=None,
                       dilation=1,
                       no_bias=1,
                       group=1,
                       deformable_group=4,
                       order="NCHW",
                       cudnn_exhaustive_search=False,
                       **kwargs):
        kernels = []
        if isinstance(kernel, list):
            assert len(kernel) == 2, "Conv support only a 2D kernel."
            kernels = kernel
        else:
            kernels = [kernel] * 2

        blob_out = prefix
        weight_shape = [dim_out]
        if order == "NCHW":
            weight_shape.append(int(dim_in / group))
            weight_shape.extend(kernels)
        else:
            weight_shape.extend(kernels)
            weight_shape.append(int(dim_in / group))
        WeightInitializer = initializers.update_initializer(
            WeightInitializer, weight_init, ("XavierFill", {}))
        BiasInitializer = initializers.update_initializer(
            BiasInitializer, bias_init, ("ConstantFill", {}))
        if not self.init_params:
            WeightInitializer = initializers.ExternalInitializer()
            BiasInitializer = initializers.ExternalInitializer()

        weight = self.create_param(param_name=blob_out + '_w',
                                   shape=weight_shape,
                                   initializer=WeightInitializer,
                                   tags=ParameterTags.WEIGHT)
        if not no_bias:
            bias = self.create_param(param_name=blob_out + '_b',
                                     shape=[
                                         dim_out,
                                     ],
                                     initializer=BiasInitializer,
                                     tags=ParameterTags.BIAS)

        if no_bias:
            inputs = [blob_in, offset, weight]
        else:
            inputs = [blob_in, offset, weight, bias]
        if isinstance(kernel, list):
            return self.net.DeformConv(inputs,
                                       blob_out,
                                       kernel_h=kernel[0],
                                       kernel_w=kernel[1],
                                       pad=pad,
                                       stride=stride,
                                       dilation=dilation,
                                       order=order,
                                       deformable_group=deformable_group,
                                       use_cudnn=self.use_cudnn)
        else:
            return self.net.DeformConv(inputs,
                                       blob_out,
                                       kernel=kernel,
                                       pad=pad,
                                       stride=stride,
                                       dilation=dilation,
                                       order=order,
                                       deformable_group=deformable_group,
                                       use_cudnn=self.use_cudnn)