def BinaryConvolution(operand,
                      filter_shape,
                      num_filters=1,
                      channels = 1,
                      init=C.glorot_uniform(),
                      pad=False,
                      strides=1,
                      bias=True,
                      init_bias=0,
                      op_name='BinaryConvolution', name=''):
    """ arguments:
            operand: tensor to convolve
            filter_shape: tuple indicating filter size
            num_filters: number of filters to use 
            channels: number of incoming channels
            init: type of initialization to use for weights
    """
    kernel_shape = (num_filters, channels) + filter_shape
    W = C.parameter(shape=kernel_shape, init=init, name="filter")

    binary_convolve_operand_p = C.placeholder(operand.shape, operand.dynamic_axes, name="operand")
    binary_convolve = C.convolution(CustomMultibit(W, 1), CustomMultibit(binary_convolve_operand_p, 1), auto_padding=[False, pad, pad], strides=[strides])
    r = C.as_block(binary_convolve, [(binary_convolve_operand_p, operand)], 'binary_convolve')

    bias_shape = (num_filters, 1, 1)
    b = C.parameter(shape=bias_shape, init=init_bias, name="bias")
    r = r + b

    # apply learnable param relu
    P = C.parameter(shape=r.shape, init=init, name="prelu")
    r = C.param_relu(P, r)
    return r
Exemplo n.º 2
0
def BinaryConvolution(operand,
                      filter_shape,
                      num_filters=1,
                      channels = 1,
                      init=C.glorot_uniform(),
                      pad=False,
                      strides=1,
                      bias=True,
                      init_bias=0,
                      op_name='BinaryConvolution', name=''):
    """ arguments:
            operand: tensor to convolve
            filter_shape: tuple indicating filter size
            num_filters: number of filters to use 
            channels: number of incoming channels
            init: type of initialization to use for weights
    """
    kernel_shape = (num_filters, channels) + filter_shape
    W = C.parameter(shape=kernel_shape, init=init, name="filter")

    binary_convolve_operand_p = C.placeholder(operand.shape, operand.dynamic_axes, name="operand")
    binary_convolve = C.convolution(CustomMultibit(W, 1), CustomMultibit(binary_convolve_operand_p, 1), auto_padding=[False, pad, pad], strides=[strides])
    r = C.as_block(binary_convolve, [(binary_convolve_operand_p, operand)], 'binary_convolve')

    bias_shape = (num_filters, 1, 1)
    b = C.parameter(shape=bias_shape, init=init_bias, name="bias")
    r = r + b

    # apply learnable param relu
    P = C.parameter(shape=r.shape, init=init, name="prelu")
    r = C.param_relu(P, r)
    return r
Exemplo n.º 3
0
def resblock_basic(inp, num_filters):
    c1 = C.layers.Convolution(
        (3, 3), num_filters, init=C.he_normal(), pad=True, bias=False)(inp)
    c1 = C.layers.BatchNormalization(map_rank=1)(c1)
    c1 = C.param_relu(C.Parameter(c1.shape, init=C.he_normal()), c1)

    c2 = C.layers.Convolution(
        (3, 3), num_filters, init=C.he_normal(), pad=True, bias=False)(c1)
    c2 = C.layers.BatchNormalization(map_rank=1)(c2)
    return inp + c2
Exemplo n.º 4
0
    def test_prelu_activation_layer(self):
        """Test a model with a single CNTK PReLU activation layer against the
        equivalent ELL predictor. This verifies that the import functions
        reshape and reorder values appropriately and that the equivalent ELL
        layer produces comparable output
        """

        # Create a test set of alpha parameters to use for both CNTK and ELL
        # layers
        # Input order for CNTK is channels, rows, columns
        alphaValues = np.linspace(
            1, 2, num=16 * 10 * 10, dtype=np.float32).reshape(16, 10, 10)

        # create an ELL Tensor from the alpha parameters, which re-orders and
        # produces an appropriately dimensioned tensor
        alphaTensor = cntk_converters.\
            get_tensor_from_cntk_convolutional_weight_value_shape(
                alphaValues, alphaValues.shape)

        inputValues = np.linspace(
            -5, 5, num=16 * 10 * 10, dtype=np.float32).reshape(16, 10, 10)

        # Evaluate a PReLU CNTK layer
        x = input((16, 10, 10))
        p = parameter(shape=x.shape, init=alphaValues, name="prelu")
        cntkModel = param_relu(p, x)

        # Create the equivalent ELL predictor
        layerParameters = ell.neural.LayerParameters(
            # Input order for ELL is rows, columns, channels
            ell.math.TensorShape(10, 10, 16),
            ell.neural.NoPadding(),
            ell.math.TensorShape(10, 10, 16),
            ell.neural.NoPadding(),
            ell.nodes.PortType.smallReal)
        layer = ell.neural.PReLUActivationLayer(layerParameters, alphaTensor)
        predictor = ell.neural.NeuralNetworkPredictor([layer])

        cntkResults = cntkModel(inputValues)
        orderedCntkResults = cntk_converters.get_vector_from_cntk_array(
            cntkResults)
        orderedInputValues = cntk_converters.get_vector_from_cntk_array(
            inputValues)
        ellResults = predictor.Predict(orderedInputValues)

        # Compare the results
        np.testing.assert_array_equal(
            orderedCntkResults, ellResults,
            'results for PReLU Activation layer do not match!')

        # now run same over ELL compiled model
        self.verify_compiled(
            predictor, orderedInputValues, orderedCntkResults,
            "prelu_activation", "test")
Exemplo n.º 5
0
def SRResNet(h0):
    print('Generator inp shape: ', h0.shape)
    with C.layers.default_options(init=C.he_normal(), bias=False):

        h1 = C.layers.Convolution((9, 9), 64, pad=True)(h0)
        h1 = C.param_relu(C.Parameter(h1.shape, init=C.he_normal()), h1)

        h2 = resblock_basic_stack(h1, 16, 64)

        h3 = C.layers.Convolution((3, 3), 64, activation=None, pad=True)(h2)
        h3 = C.layers.BatchNormalization(map_rank=1)(h3)

        h4 = h1 + h3
        # here

        h5 = C.layers.ConvolutionTranspose2D(
            (3, 3), 64, pad=True, strides=(2, 2), output_shape=(224, 224))(h4)
        h5 = C.param_relu(C.Parameter(h5.shape, init=C.he_normal()), h5)

        h6 = C.layers.Convolution((3, 3), 3, pad=True)(h5)

        return h6
Exemplo n.º 6
0
    def convolve(x):
        r = C.convolution(W,
                          x,
                          auto_padding=[False, pad, pad],
                          strides=[strides])
        r.name = name

        if bias:
            r = r + b
        if activation:
            # apply learnable param relu
            P = C.parameter(shape=r.shape, init=init_activation, name="prelu")
            r = C.param_relu(P, r)
        return r
Exemplo n.º 7
0
 def prelu(x):
     return param_relu(alpha, x)
Exemplo n.º 8
0
 def prelu(x):
     return param_relu(alpha, x)
Exemplo n.º 9
0
def test_PRelu(tmpdir):
    data = np.asarray([[-1, -0.5, 0, 1, 2]])
    alpha = C.constant(value=[[0.5, 0.5, 0.5, 0.5, 0.5]])
    model = C.param_relu(alpha, data)
    verify_no_input(model, tmpdir, 'PRelu_0')
Exemplo n.º 10
0
def lrelu(input, leak=0.2, name=""):
    return C.param_relu(C.constant((np.ones(input.shape)*leak).astype(np.float32)), input, name=name)
Exemplo n.º 11
0
def test_PRelu(tmpdir):
    data = np.asarray([[-1, -0.5, 0, 1, 2]])
    alpha = C.constant(value=[[0.5, 0.5, 0.5, 0.5, 0.5]])
    model = C.param_relu(alpha, data)
    verify_no_input(model, tmpdir, 'PRelu_0')