Esempio n. 1
0
def create_simpleConvolutionsRelu(args, num_output_final):
    """Create network in barrista."""
    print("Using simple convolutions network with ReLU units, num_output is",
          "16, 32 *", args.numLayers, ", 16, 1 and a final Sigmoid.",
          "kernel_pad is", args.kernel_pad)
    layers = []

    filler = design.PROTODETAIL.FillerParameter()
    filler.type = args.filler_type
    # filler.value = 1

    kernel = 2 * args.kernel_pad + 1
    pad = args.kernel_pad

    layers.append(
        ConvolutionLayer(Convolution_kernel_size=kernel,
                         Convolution_num_output=16,
                         Convolution_stride=1,
                         Convolution_pad=pad,
                         Convolution_weight_filler=filler,
                         bottoms=['images']))
    layers.append(ReLULayer())

    for i in range(args.numLayers):
        layers.append(
            ConvolutionLayer(Convolution_kernel_size=kernel,
                             Convolution_num_output=32,
                             Convolution_stride=1,
                             Convolution_pad=pad,
                             Convolution_weight_filler=filler))
        layers.append(ReLULayer())

    layers.append(
        ConvolutionLayer(Convolution_kernel_size=kernel,
                         Convolution_num_output=16,
                         Convolution_stride=1,
                         Convolution_pad=pad,
                         Convolution_weight_filler=filler))
    layers.append(ReLULayer())

    layers.append(
        ConvolutionLayer(Convolution_kernel_size=kernel,
                         Convolution_num_output=num_output_final,
                         Convolution_pad=pad,
                         Convolution_weight_filler=filler,
                         tops=['RS_est']))

    return layers
Esempio n. 2
0
def recover_reflectance_shading(args, num_output_final):
    """Recover reflectance and shading from estimation."""
    layers = []

    RS_est_mode = args.RS_est_mode.split('-')[0]
    if RS_est_mode in ['RS']:
        # if we want to estimate R and S at the same time, nothing needs
        # to be recovered, just directly give R and S
        layers.append(
            SliceLayer(
                name='slice_RS',
                Slice_axis=1,
                Slice_slice_point=[3],
                bottoms=['RS_est'],
                tops=['reflectance', 'shading'],
            ))
        return layers  # already return here
    elif RS_est_mode in ['rDirectly']:
        layers.append(
            ReLULayer(
                name='pass_on_r_to_reflectance',
                bottoms=['RS_est'],
                tops=['reflectance'],
            ))
        layers.append(
            ReLULayer(
                name='pass_on_r_to_shading_dummy',
                bottoms=['RS_est'],
                tops=['shading'],
            ))
        return layers  # already return here

    estimation = 'RS_est'

    # recover reflectance and shading:
    layers.append(
        PythonLayer(Python_module='recover_reflectance_shading_layer',
                    Python_layer='RecoverReflectanceShadingLayer',
                    Python_param_str=args.RS_est_mode,
                    name='recover_reflectance_shading',
                    bottoms=[estimation, 'images'],
                    tops=['reflectance', 'shading']))
    return layers
Esempio n. 3
0
def create_cascadeSkipLayers(args, num_output_final):
    """Create network in barrista."""
    layers = []

    # if you want to use a Prelu, do the following below:
    # layers.append(PReLULayer())
    # or SigmoidLayer

    filler = design.PROTODETAIL.FillerParameter()
    filler.type = args.filler_type
    # filler.value = 1

    kernel = 2 * args.kernel_pad + 1
    pad = args.kernel_pad + (args.dilation - 1)
    num_output = 2**args.num_filters_log
    print("This net has skip layers and a sigmoid in the end!",
          "Using a convolutional network with ReLU units.", "It has",
          args.numLayers, "layers with", num_output,
          "filters each, then a conv layer with num_output",
          "filters (dependent on the RS_estimation mode)",
          "The kernels are of size", kernel, "with a padding of", pad)

    if args.numLayers >= 1:
        i = 0
        layers.append(
            ConvolutionLayer(
                name='conv{}_level0'.format(i),
                bottoms=['images'],
                tops=['conv{}_level0'.format(i)],
                Convolution_num_output=num_output,
                Convolution_kernel_size=kernel,
                Convolution_pad=pad,
                Convolution_dilation=args.dilation,
                Convolution_weight_filler=filler,
            ))
        if args.use_batch_normalization:
            layers.append(
                BatchNormLayer(
                    name='bn{}_level0'.format(i),
                    bottoms=['conv{}_level0'.format(i)],
                    tops=['conv{}_level0'.format(i)],
                ))
        layers.append(
            ReLULayer(
                name='relu{}_level0'.format(i),
                bottoms=['conv{}_level0'.format(i)],
                tops=['conv{}_level0'.format(i)],
            ))

        for i in range(1, args.numLayers):
            layers.append(
                ConvolutionLayer(
                    name='conv{}_level0'.format(i),
                    bottoms=['conv{}_level0'.format(i - 1)],
                    tops=['conv{}_level0'.format(i)],
                    Convolution_num_output=num_output,
                    Convolution_kernel_size=kernel,
                    Convolution_pad=pad,
                    Convolution_dilation=args.dilation,
                    Convolution_weight_filler=filler,
                ))
            if args.use_batch_normalization:
                layers.append(
                    BatchNormLayer(
                        name='bn{}_level0'.format(i),
                        bottoms=['conv{}_level0'.format(i)],
                        tops=['conv{}_level0'.format(i)],
                    ))
            layers.append(
                ReLULayer(
                    name='relu{}_level0'.format(i),
                    bottoms=['conv{}_level0'.format(i)],
                    tops=['conv{}_level0'.format(i)],
                ))

        layers.append(
            ConcatLayer(
                name='concat_skip_layers_level0',
                bottoms=[
                    'conv{}_level0'.format(i) for i in range(args.numLayers)
                ],
                tops=['concat_skip_layers_level0'],
            ))
        layers.append(
            ConvolutionLayer(
                name='fuse_skip_layers_level0',
                bottoms=['concat_skip_layers_level0'],
                tops=['RS_est_before_sigmoid_level0'],
                Convolution_num_output=num_output_final,
                Convolution_kernel_size=1,
                Convolution_pad=0,
                Convolution_weight_filler=filler,
            ))
        layers.append(
            SigmoidLayer(
                name='sigmoid_after_fusing_level0',
                bottoms=['RS_est_before_sigmoid_level0'],
                tops=['RS_est_level0'],
            ))
    else:
        # catch dummy case of numLayers == 0
        layers.append(
            ConvolutionLayer(name='conv0_level0',
                             Convolution_num_output=num_output_final,
                             Convolution_kernel_size=kernel,
                             Convolution_pad=pad,
                             Convolution_dilation=args.dilation,
                             Convolution_weight_filler=filler,
                             bottoms=['images'],
                             tops=['RS_est_before_sigmoid_level0']))
        layers.append(
            SigmoidLayer(
                name='sigmoid_after_fusing_level0',
                bottoms=['RS_est_before_sigmoid_level0'],
                tops=['RS_est_level0'],
            ))

    # recover reflectance and shading:
    layers.append(
        PythonLayer(Python_module='recover_reflectance_shading_layer',
                    Python_layer='RecoverReflectanceShadingLayer',
                    Python_param_str=args.RS_est_mode,
                    name='recover_reflectance_shading_inner_level0',
                    bottoms=['RS_est_level0', 'images'],
                    tops=['reflectance_level0', 'shading_level0']))
    # add loss layers for inner levels
    # compute whdr hinge loss
    bottoms = ['reflectance_level0', args.comparisonsType]
    if args.dataset == 'sintel':
        bottoms.append('albedos')
    layers.append(
        PythonLayer(Python_module='whdr_hinge_loss_layer',
                    Python_layer='WhdrHingeLossLayer',
                    Python_param_str=(args.whdr_delta_margin_ratio_dense),
                    name='loss_whdr_hinge_level0',
                    bottoms=bottoms,
                    tops=['loss_whdr_hinge_level0'],
                    loss_weights=[args.loss_scale_whdr],
                    include_stages=['fit']))
    # compute 'real' WHDR as 'accuracy' for evaluation
    bottoms = ['reflectance_level0', 'comparisons']
    if args.dataset == 'sintel':
        bottoms.append('albedos')
    layers.append(
        PythonLayer(
            Python_module='whdr_layer',
            Python_layer='WhdrLayer',
            Python_param_str="0.1",
            name='whdr_original_level0',
            bottoms=bottoms,
            tops=['whdr_original_level0'],
            # do not account as loss layer:
            loss_weights=[0],
            include_stages=['fit']))

    # define what to pass on to the next level
    level1_input = 'reflectance_level0'

    # add concatenation to input into next level
    # layers.append(
    #     ConcatLayer(
    #         name='concat_output_level0_to_input_level1',
    #         bottoms=['images', 'reflectance_level0', 'shading_level0'],
    #         tops=['concat_output_level0_to_input_level1'],
    #     )
    # )
    # level1_input = 'concat_output_level0_to_input_level1'

    if args.numLayers >= 1:
        i = 0
        layers.append(
            ConvolutionLayer(
                name='conv{}_level1'.format(i),
                bottoms=[level1_input],
                tops=['conv{}_level1'.format(i)],
                Convolution_num_output=num_output,
                Convolution_kernel_size=kernel,
                Convolution_pad=pad,
                Convolution_dilation=args.dilation,
                Convolution_weight_filler=filler,
            ))
        if args.use_batch_normalization:
            layers.append(
                BatchNormLayer(
                    name='bn{}_level1'.format(i),
                    bottoms=['conv{}_level1'.format(i)],
                    tops=['conv{}_level1'.format(i)],
                ))
        layers.append(
            ReLULayer(
                name='relu{}_level1'.format(i),
                bottoms=['conv{}_level1'.format(i)],
                tops=['conv{}_level1'.format(i)],
            ))

        for i in range(1, args.numLayers):
            layers.append(
                ConvolutionLayer(
                    name='conv{}_level1'.format(i),
                    bottoms=['conv{}_level1'.format(i - 1)],
                    tops=['conv{}_level1'.format(i)],
                    Convolution_num_output=num_output,
                    Convolution_kernel_size=kernel,
                    Convolution_pad=pad,
                    Convolution_dilation=args.dilation,
                    Convolution_weight_filler=filler,
                ))
            if args.use_batch_normalization:
                layers.append(
                    BatchNormLayer(
                        name='bn{}_level1'.format(i),
                        bottoms=['conv{}_level1'.format(i)],
                        tops=['conv{}_level1'.format(i)],
                    ))
            layers.append(
                ReLULayer(
                    name='relu{}_level1'.format(i),
                    bottoms=['conv{}_level1'.format(i)],
                    tops=['conv{}_level1'.format(i)],
                ))

        layers.append(
            ConcatLayer(
                name='concat_skip_layers_level1',
                bottoms=[
                    'conv{}_level1'.format(i) for i in range(args.numLayers)
                ],
                tops=['concat_skip_layers_level1'],
            ))
        layers.append(
            ConvolutionLayer(
                name='fuse_skip_layers_level1',
                bottoms=['concat_skip_layers_level1'],
                tops=['RS_est_before_sigmoid_level1'],
                Convolution_num_output=num_output_final,
                Convolution_kernel_size=1,
                Convolution_pad=0,
                Convolution_weight_filler=filler,
            ))
        layers.append(
            SigmoidLayer(
                name='sigmoid_after_fusing_level1',
                bottoms=['RS_est_before_sigmoid_level1'],
                tops=['RS_est'],
            ))
    else:
        # catch dummy case of numLayers == 0
        layers.append(
            ConvolutionLayer(name='conv0_level1',
                             Convolution_num_output=num_output_final,
                             Convolution_kernel_size=kernel,
                             Convolution_pad=pad,
                             Convolution_dilation=args.dilation,
                             Convolution_weight_filler=filler,
                             bottoms=[level1_input],
                             tops=['RS_est_before_sigmoid_level1']))
        layers.append(
            SigmoidLayer(
                name='sigmoid_after_fusing_level1',
                bottoms=['RS_est_before_sigmoid_level1'],
                tops=['RS_est'],
            ))
        # last output needs to be 'RS_est' (without a level description)
    return layers
Esempio n. 4
0
def create_convStaticSkipLayers(args, num_output_final):
    """Create network in barrista."""
    layers = []

    # if you want to use a Prelu, do the following below:
    # layers.append(PReLULayer())
    # or SigmoidLayer

    filler = design.PROTODETAIL.FillerParameter()
    filler.type = args.filler_type
    # filler.value = 1

    kernel = 2 * args.kernel_pad + 1
    pad = args.kernel_pad + (args.dilation - 1)
    num_output = 2**args.num_filters_log
    print("This net has skip layers and a sigmoid in the end!",
          "Using a convolutional network with ReLU units.", "It has",
          args.numLayers, "layers with", num_output,
          "filters each, then a conv layer with num_output",
          "filters (dependent on the RS_estimation mode)",
          "The kernels are of size", kernel, "with a padding of", pad)

    if args.numLayers >= 1:
        i = 0
        layers.append(
            ConvolutionLayer(
                name='conv{}'.format(i),
                bottoms=['images'],
                tops=['conv{}'.format(i)],
                Convolution_num_output=num_output,
                Convolution_kernel_size=kernel,
                Convolution_pad=pad,
                Convolution_dilation=args.dilation,
                Convolution_weight_filler=filler,
            ))
        if args.use_batch_normalization:
            layers.append(
                BatchNormLayer(
                    name='bn{}'.format(i),
                    bottoms=['conv{}'.format(i)],
                    tops=['conv{}'.format(i)],
                ))
        layers.append(
            ReLULayer(
                name='relu{}'.format(i),
                bottoms=['conv{}'.format(i)],
                tops=['conv{}'.format(i)],
            ))

        for i in range(1, args.numLayers):
            layers.append(
                ConvolutionLayer(
                    name='conv{}'.format(i),
                    bottoms=['conv{}'.format(i - 1)],
                    tops=['conv{}'.format(i)],
                    Convolution_num_output=num_output,
                    Convolution_kernel_size=kernel,
                    Convolution_pad=pad,
                    Convolution_dilation=args.dilation,
                    Convolution_weight_filler=filler,
                ))
            if args.use_batch_normalization:
                layers.append(
                    BatchNormLayer(
                        name='bn{}'.format(i),
                        bottoms=['conv{}'.format(i)],
                        tops=['conv{}'.format(i)],
                    ))
            layers.append(
                ReLULayer(
                    name='relu{}'.format(i),
                    bottoms=['conv{}'.format(i)],
                    tops=['conv{}'.format(i)],
                ))

        layers.append(
            ConcatLayer(
                name='concat_skip_layers',
                bottoms=['conv{}'.format(i) for i in range(args.numLayers)],
                tops=['concat_skip_layers'],
            ))
        layers.append(
            ConvolutionLayer(
                name='fuse_skip_layers',
                bottoms=['concat_skip_layers'],
                tops=['RS_est_before_sigmoid'],
                Convolution_num_output=num_output_final,
                Convolution_kernel_size=1,
                Convolution_pad=0,
                Convolution_weight_filler=filler,
            ))
        layers.append(
            SigmoidLayer(
                name='sigmoid_after_fusing',
                bottoms=['RS_est_before_sigmoid'],
                tops=['RS_est'],
            ))
    else:
        # catch dummy case of numLayers == 0
        layers.append(
            ConvolutionLayer(name='conv0',
                             Convolution_num_output=num_output_final,
                             Convolution_kernel_size=kernel,
                             Convolution_pad=pad,
                             Convolution_dilation=args.dilation,
                             Convolution_weight_filler=filler,
                             bottoms=['images'],
                             tops=['RS_est_before_sigmoid']))
        layers.append(
            SigmoidLayer(
                name='sigmoid_after_fusing',
                bottoms=['RS_est_before_sigmoid'],
                tops=['RS_est'],
            ))

    return layers
Esempio n. 5
0
def create_uNet(args, num_output_final):
    """Create network in barrista.

    Create a network in barrista similar to a combination of U-Net and
    the one in the learning data-driven reflectance priors paper.
    """
    print("Using u-net")
    layers = []

    filler = design.PROTODETAIL.FillerParameter()
    filler.type = args.filler_type

    kernel = 2 * args.kernel_pad + 1
    pad = args.kernel_pad

    # going down with local features in the U
    layers.append(
        ConvolutionLayer(Convolution_num_output=16,
                         Convolution_kernel_size=3,
                         Convolution_stride=2,
                         Convolution_pad=1,
                         Convolution_weight_filler=filler,
                         name='Conv1',
                         bottoms=['images']))
    for i in range(args.numLayers):
        layers.append(ReLULayer())
        layers.append(
            ConvolutionLayer(Convolution_num_output=16,
                             Convolution_kernel_size=kernel,
                             Convolution_pad=pad,
                             Convolution_weight_filler=filler))

    layers.append(ReLULayer(tops=['L1']))
    layers.append(
        ConvolutionLayer(Convolution_num_output=32,
                         Convolution_kernel_size=3,
                         Convolution_stride=2,
                         Convolution_pad=1,
                         Convolution_weight_filler=filler,
                         name='Conv2'))
    for i in range(args.numLayers):
        layers.append(ReLULayer())
        layers.append(
            ConvolutionLayer(Convolution_num_output=32,
                             Convolution_kernel_size=kernel,
                             Convolution_pad=pad,
                             Convolution_weight_filler=filler))
    layers.append(ReLULayer(tops=['L2']))
    layers.append(
        ConvolutionLayer(Convolution_num_output=64,
                         Convolution_kernel_size=3,
                         Convolution_stride=2,
                         Convolution_pad=1,
                         Convolution_weight_filler=filler,
                         name='Conv3'))
    for i in range(args.numLayers):
        layers.append(ReLULayer())
        layers.append(
            ConvolutionLayer(Convolution_num_output=64,
                             Convolution_kernel_size=kernel,
                             Convolution_pad=pad,
                             Convolution_weight_filler=filler))
    layers.append(ReLULayer(tops=['L3']))
    layers.append(
        ConvolutionLayer(Convolution_num_output=64,
                         Convolution_kernel_size=7,
                         Convolution_stride=1,
                         Convolution_pad=3,
                         Convolution_weight_filler=filler,
                         name='Conv4'))
    for i in range(args.numLayers):
        layers.append(ReLULayer())
        layers.append(
            ConvolutionLayer(Convolution_num_output=64,
                             Convolution_kernel_size=kernel,
                             Convolution_pad=pad,
                             Convolution_weight_filler=filler))
    layers.append(ReLULayer(tops=['local']))

    # lower path with the image global features
    layers.append(
        PythonLayer(Python_module='resize_layer',
                    Python_layer='ResizeLayer',
                    name='resize',
                    bottoms=['images'],
                    tops=['resized']))

    layers.append(
        ConvolutionLayer(Convolution_num_output=32,
                         Convolution_kernel_size=5,
                         Convolution_stride=4,
                         Convolution_pad=2,
                         Convolution_weight_filler=filler,
                         name='Conv5',
                         bottoms=['resized']))
    # for i in range(args.numLayers):
    #     layers.append(ReLULayer())
    #     layers.append(ConvolutionLayer(Convolution_num_output=32,
    #                                    Convolution_kernel_size=5,
    #                                    Convolution_pad=2,
    #                                    Convolution_weight_filler=filler))
    layers.append(ReLULayer())
    layers.append(
        ConvolutionLayer(Convolution_num_output=32,
                         Convolution_kernel_size=5,
                         Convolution_stride=4,
                         Convolution_pad=2,
                         Convolution_weight_filler=filler,
                         name='Conv6'))
    # for i in range(args.numLayers):
    #     layers.append(ReLULayer())
    #     layers.append(ConvolutionLayer(Convolution_num_output=32,
    #                                    Convolution_kernel_size=5,
    #                                    Convolution_pad=2,
    #                                    Convolution_weight_filler=filler))
    layers.append(ReLULayer())
    layers.append(
        ConvolutionLayer(Convolution_num_output=32,
                         Convolution_kernel_size=5,
                         Convolution_stride=4,
                         Convolution_pad=2,
                         Convolution_weight_filler=filler,
                         name='Conv7'))
    # for i in range(args.numLayers):
    #     layers.append(ReLULayer())
    #     layers.append(ConvolutionLayer(Convolution_num_output=32,
    #                                    Convolution_kernel_size=5,
    #                                    Convolution_pad=2,
    #                                    Convolution_weight_filler=filler))
    layers.append(ReLULayer())
    layers.append(
        ConvolutionLayer(Convolution_num_output=64,
                         Convolution_kernel_size=3,
                         Convolution_stride=1,
                         Convolution_pad=0,
                         Convolution_weight_filler=filler,
                         name='Conv8'))
    layers.append(ReLULayer(tops=['global_1']))

    # combine the local and global features
    layers.append(
        PythonLayer(Python_module='broadcast_layer',
                    Python_layer='BroadcastLayer',
                    name='broadcast',
                    bottoms=['global_1', 'local'],
                    tops=['global']))
    layers.append(ConcatLayer(bottoms=['local', 'global'], name='Concatenate'))
    for i in range(args.numLayers):
        layers.append(
            ConvolutionLayer(Convolution_num_output=64,
                             Convolution_kernel_size=kernel,
                             Convolution_pad=pad,
                             Convolution_weight_filler=filler))
        layers.append(ReLULayer())
    layers.append(
        ConvolutionLayer(Convolution_num_output=64,
                         Convolution_kernel_size=3,
                         Convolution_pad=1,
                         Convolution_weight_filler=filler))
    layers.append(ReLULayer(tops=['R3']))

    # going up again
    layers.append(
        DeconvolutionLayer(Convolution_num_output=64,
                           Convolution_kernel_size=2,
                           Convolution_stride=2,
                           Convolution_weight_filler=filler,
                           tops=['R3d']))
    layers.append(ConcatLayer(bottoms=['L2', 'R3d']))
    for i in range(args.numLayers):
        layers.append(
            ConvolutionLayer(Convolution_num_output=32,
                             Convolution_kernel_size=kernel,
                             Convolution_pad=pad,
                             Convolution_weight_filler=filler))
        layers.append(ReLULayer())
    layers.append(
        ConvolutionLayer(Convolution_num_output=32,
                         Convolution_kernel_size=3,
                         Convolution_pad=1,
                         Convolution_weight_filler=filler))
    layers.append(ReLULayer(tops=['R2']))

    layers.append(
        DeconvolutionLayer(Convolution_num_output=16,
                           Convolution_kernel_size=2,
                           Convolution_stride=2,
                           Convolution_weight_filler=filler,
                           tops=['R2d']))
    layers.append(ConcatLayer(bottoms=['L1', 'R2d']))
    for i in range(args.numLayers):
        layers.append(
            ConvolutionLayer(Convolution_num_output=16,
                             Convolution_kernel_size=kernel,
                             Convolution_pad=pad,
                             Convolution_weight_filler=filler))
        layers.append(ReLULayer())
    layers.append(
        ConvolutionLayer(Convolution_num_output=16,
                         Convolution_kernel_size=3,
                         Convolution_pad=1,
                         Convolution_weight_filler=filler))
    layers.append(ReLULayer(tops=['R1']))

    layers.append(
        DeconvolutionLayer(Convolution_num_output=3,
                           Convolution_kernel_size=2,
                           Convolution_stride=2,
                           Convolution_weight_filler=filler,
                           tops=['R1d']))
    layers.append(ConcatLayer(bottoms=['images', 'R1d']))

    for i in range(args.numLayers):
        layers.append(
            ConvolutionLayer(Convolution_num_output=3,
                             Convolution_kernel_size=kernel,
                             Convolution_pad=pad,
                             Convolution_weight_filler=filler))
        layers.append(ReLULayer())

    layers.append(
        ConvolutionLayer(Convolution_num_output=num_output_final,
                         Convolution_kernel_size=3,
                         Convolution_pad=1,
                         Convolution_weight_filler=filler,
                         tops=['RS_est']))

    return layers
Esempio n. 6
0
def create_convIncreasing(args, num_output_final):
    """Create network in barrista."""
    layers = []

    do_batch_normalization = False

    filler = design.PROTODETAIL.FillerParameter()
    filler.type = args.filler_type
    # filler.value = 1

    kernel = 2 * args.kernel_pad + 1
    pad = args.kernel_pad

    if args.numLayers >= 1:
        num_output = 2**args.num_filters_log
        num_outputs = [num_output]

        layers.append(
            ConvolutionLayer(Convolution_num_output=num_output,
                             Convolution_kernel_size=kernel,
                             Convolution_pad=pad,
                             Convolution_weight_filler=filler,
                             bottoms=['images']))
        if do_batch_normalization:
            layers.append(BatchNormLayer())
        layers.append(ReLULayer())

        for i in range(1, args.numLayers):
            num_output *= 2
            num_outputs.append(num_output)
            layers.append(
                ConvolutionLayer(Convolution_num_output=num_output,
                                 Convolution_kernel_size=kernel,
                                 Convolution_pad=pad,
                                 Convolution_weight_filler=filler))
            if do_batch_normalization:
                layers.append(BatchNormLayer())
            layers.append(ReLULayer())

        num_outputs.append(num_output_final)
        layers.append(
            ConvolutionLayer(Convolution_num_output=num_output_final,
                             Convolution_kernel_size=1,
                             Convolution_pad=0,
                             Convolution_weight_filler=filler,
                             tops=['RS_est']))
    else:
        num_outputs = [num_output_final]
        layers.append(
            ConvolutionLayer(Convolution_num_output=num_output_final,
                             Convolution_kernel_size=kernel,
                             Convolution_pad=pad,
                             Convolution_weight_filler=filler,
                             bottoms=['images'],
                             tops=['RS_est']))

    print("Using a convolutional network with ReLU units where num_output.",
          "increases. It has", args.numLayers, "layers with", num_outputs,
          "filters, then a conv layer with num_output filters",
          "dependent on RS_est mode", "The kernels are of size", kernel,
          "with a padding of", pad)

    return layers
Esempio n. 7
0
netspec = design.NetSpecification([[10, 3, 51, 51], [10]],
                                  inputs=['data', 'annotations'],
                                  predict_inputs=['data'],
                                  predict_input_shapes=[[10, 3, 51, 51]])

# This is a VGG like convolutional network. This could now even be created
# procedural!
layers = []
conv_params = {
    'Convolution_kernel_size': 3,
    'Convolution_num_output': 32,
    'Convolution_pad': 1
}

layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
layers.append(PoolingLayer(Pooling_kernel_size=2))
layers.append(DropoutLayer(Dropout_dropout_ratio=0.25))

conv_params['Convolution_num_output'] = 64
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
layers.append(ConvolutionLayer(**conv_params))
layers.append(ReLULayer())
layers.append(PoolingLayer(Pooling_kernel_size=2))
layers.append(DropoutLayer(Dropout_dropout_ratio=0.25))

layers.append(InnerProductLayer(InnerProduct_num_output=256))
layers.append(ReLULayer())