Exemplo n.º 1
0
def conv_layer(input, num_filters, filter_size, init, strides=(1,1), nonlinearity=relu):
    if nonlinearity is None:
        nonlinearity = lambda x: x

    channel_count = input.shape[0]

    b_param = parameter(shape=(num_filters, 1, 1))
    w_param = parameter(shape=(num_filters, channel_count, filter_size[0], filter_size[1]), init=init)
    r       = convolution(w_param, input, (channel_count, strides[0], strides[1])) + b_param
    r       = nonlinearity(r)

    return r
Exemplo n.º 2
0
def Convolution(filter_shape,        # e.g. (3,3)
                num_filters=None,    # e.g. 64 or None (which means 1 channel and don't add a dimension_
                activation=activation_default_or_None,
                init=init_default_or_glorot_uniform,
                pad=pad_default_or_False,
                strides=1,
                sharing=True,     # (must be True currently)
                bias=bias_default_or_True,
                init_bias=init_bias_default_or_0,
                reduction_rank=1, # (must be 1 currently)
                transpose=False,  # (must be False currently)
                max_temp_mem_size_in_samples=0):
    #UntestedBranchError("Convolution")
    activation = _resolve_activation(activation)
    pad  = pad  if _is_given(pad ) else _current_default_options.pad
    bias = bias if _is_given(bias) else _current_default_options.bias
    # TODO: there must be a Python trick to do this as a function call on locals or so
    if reduction_rank != 1:
        NotImplementedError("Convolution: reduction_rank other than 1 currently not supported")
    if transpose:
        NotImplementedError("Convolution: transpose option currently not supported")
    if not sharing:
        NotImplementedError("Convolution: sharing option currently must be True")
    output_channels_shape = _as_tuple(num_filters)
    output_rank = len(output_channels_shape)
    filter_rank = len(filter_shape)
    kernel_shape = _INFERRED * reduction_rank + filter_shape # kernel := filter plus reductionDims

    # parameters bound to this Function
    #init_kernel = glorot_uniform(filter_rank=-filter_rank, output_rank=1)
    init_kernel = _initializer_for(init, Record(filter_rank=filter_rank, output_rank=-1))
    # BUGBUG: It is very confusing that output_rank is negative, esp. since that means count from the start. Solution: add a flag
    W = Parameter(output_channels_shape + kernel_shape,             init=init_kernel, name='W')                   # (K, C, H, W) aka [ W x H x C x K ]
    b = Parameter(output_channels_shape + (1,) * len(filter_shape), init=init_bias,   name='b') if bias else None # (K,    1, 1) aka [ 1 x 1 x     K ]

    # expression
    x = Placeholder(name='convolution_arg')
    # TODO: update the parameter order of convolution() to match the optional ones as in here? (options order matches Keras)
    apply_x = convolution (W, x,
                           strides=_as_tuple(strides),
                           sharing=_as_tuple(sharing),
                           auto_padding=_as_tuple(pad),
                           # TODO: can we rename auto_padding to pad?
                           transpose=transpose,
                           max_temp_mem_size_in_samples=max_temp_mem_size_in_samples)
    if bias:
        apply_x = apply_x + b
    apply_x = apply_x >> activation
    return Block(apply_x, 'Convolution', Record(W=W, b=b))
Exemplo n.º 3
0
def Convolution(filter_shape,        # e.g. (3,3)
                num_filters=None,    # e.g. 64 or None (which means 1 channel and don't add a dimension_
                activation=activation_default_or_None,
                init=init_default_or_glorot_uniform,
                pad=pad_default_or_False,
                strides=1,
                sharing=True,     # (must be True currently)
                bias=bias_default_or_True,
                init_bias=init_bias_default_or_0,
                reduction_rank=1, # (must be 1 currently)
                transpose=False,  # (must be False currently)
                max_temp_mem_size_in_samples=0):
    #UntestedBranchError("Convolution")
    activation = _resolve_activation(activation)
    pad  = pad  if _is_given(pad ) else _current_default_options.pad
    bias = bias if _is_given(bias) else _current_default_options.bias
    # TODO: there must be a Python trick to do this as a function call on locals or so
    if reduction_rank != 1:
        NotImplementedError("Convolution: reduction_rank other than 1 currently not supported")
    if transpose:
        NotImplementedError("Convolution: transpose option currently not supported")
    if not sharing:
        NotImplementedError("Convolution: sharing option currently must be True")
    output_channels_shape = _as_tuple(num_filters)
    output_rank = len(output_channels_shape)
    filter_rank = len(filter_shape)
    kernel_shape = _INFERRED * reduction_rank + filter_shape # kernel := filter plus reductionDims

    # parameters bound to this Function
    #init_kernel = glorot_uniform(filter_rank=-filter_rank, output_rank=1)
    init_kernel = _initializer_for(init, Record(filter_rank=filter_rank, output_rank=-1))
    # BUGBUG: It is very confusing that output_rank is negative, esp. since that means count from the start. Solution: add a flag
    W = Parameter(output_channels_shape + kernel_shape,             init=init_kernel, name='W')                   # (K, C, H, W) aka [ W x H x C x K ]
    b = Parameter(output_channels_shape + (1,) * len(filter_shape), init=init_bias,   name='b') if bias else None # (K,    1, 1) aka [ 1 x 1 x     K ]

    # expression
    x = Placeholder(name='convolution_arg')
    # TODO: update the parameter order of convolution() to match the optional ones as in here? (options order matches Keras)
    apply_x = convolution (W, x,
                           strides=_as_tuple(strides),
                           sharing=_as_tuple(sharing),
                           auto_padding=_as_tuple(pad),
                           # TODO: can we rename auto_padding to pad?
                           transpose=transpose,
                           max_temp_mem_size_in_samples=max_temp_mem_size_in_samples)
    if bias:
        apply_x = apply_x + b
    apply_x = apply_x >> activation
    return Block(apply_x, 'Convolution', Record(W=W, b=b))
Exemplo n.º 4
0
def convolution(input, name, **kwargs):
    dim = __weights_dict[name]['weights'].ndim

    weight = np.transpose(__weights_dict[name]['weights'],
                          [dim - 1, dim - 2] + list(range(0, dim - 2)))
    w = cntk.Parameter(init=weight, name=name + '_weight')

    input = cntk.transpose(input, [dim - 2] + list(range(0, dim - 2)))

    layer = ops.convolution(w, input, **kwargs)
    if 'bias' in __weights_dict[name]:
        bias = np.reshape(__weights_dict[name]['bias'], [-1] + [1] * (dim - 2))
        b = cntk.Parameter(init=bias, name=name + '_bias')
        layer = layer + b
    layer = cntk.transpose(layer, list(range(1, dim - 1)) + [0])
    return layer
Exemplo n.º 5
0
 def _conv_ops(weights, data):
     return ops.convolution(weights, data, strides=(cntk.InferredDimension, ) + \
                            ops.sanitize_shape(stride), auto_padding=[False, pad, pad])
Exemplo n.º 6
0
 def _conv_ops(weights, data):
     return ops.convolution(weights, data, strides=(cntk.InferredDimension, ) + \
                            ops.sanitize_shape(stride), auto_padding=[False, pad, pad])