Пример #1
0
    def __call__(self, input_var=None, use_from=None, use_up_to='classifier', training=False, force_global_pooling=False, check_global_pooling=True, returns_net=False, verbose=0):

        assert use_from is None, 'This should not be set because it is for forward compatibility.'
        input_var = self.get_input_var(input_var)

        callback = NnpNetworkPass(verbose)
        callback.remove_and_rewire('ImageAugmentationX')
        callback.set_variable('InputX', input_var)
        self.configure_global_average_pooling(
            callback, force_global_pooling, check_global_pooling, 'AveragePooling')
        callback.set_batch_normalization_batch_stat_all(training)
        index = 0 if self.num_layers == 18 else 1
        self.use_up_to(use_up_to, callback, index=index)
        if not training:
            callback.fix_parameters()
        batch_size = input_var.shape[0]
        net = self.nnp.get_network(
            'Training', batch_size=batch_size, callback=callback)
        if returns_net:
            return net
        return list(net.outputs.values())[0]
Пример #2
0
    def __call__(self,
                 input_var=None,
                 use_from=None,
                 use_up_to='classifier',
                 training=False,
                 force_global_pooling=False,
                 check_global_pooling=True,
                 returns_net=False,
                 verbose=0):
        input_var = self.get_input_var(input_var)

        callback = NnpNetworkPass(verbose)
        callback.remove_and_rewire('ImageAugmentationX')
        callback.set_variable('TrainingInput', input_var)
        self.configure_global_average_pooling(callback,
                                              force_global_pooling,
                                              check_global_pooling,
                                              'NIN/AveragePooling',
                                              by_type=False)
        callback.set_batch_normalization_batch_stat_all(training)
        self.use_up_to(use_up_to, callback)
        if not training:
            callback.remove_and_rewire('NIN/Dropout')
            callback.fix_parameters()
        batch_size = input_var.shape[0]
        net = self.nnp.get_network('Training',
                                   batch_size=batch_size,
                                   callback=callback)
        if returns_net:
            return net
        else:
            return list(net.outputs.values())[0]
Пример #3
0
    def __call__(self,
                 input_var=None,
                 use_from=None,
                 use_up_to='detection',
                 training=False,
                 returns_net=False,
                 verbose=0):

        assert use_from is None, 'This should not be set because it is for forward compatibility.'
        input_var = self.get_input_var(input_var)
        nnp_input_size = self.get_nnp_input_size()
        callback = NnpNetworkPass(verbose)
        callback.set_variable('x', input_var)
        callback.set_batch_normalization_batch_stat_all(training)
        self.use_up_to(use_up_to, callback)
        if use_up_to != 'detection':
            self.use_up_to('Arange', callback)
            self.use_up_to('Arange2', callback)
            funcs_to_drop = ('Reshape_3', 'Arange', 'Arange_2')
            callback.drop_function(*funcs_to_drop)
            if use_up_to == 'lastconv':
                callback.drop_function('Convolution_23')

        # Output dimension of reshape, arange, slice etc functions are taken from .nnp file.
        # These dimensions depend on the input image size with which the nnp file was created.
        # When different input image size is given to the model, these dimensions will change and therefore
        # shape of output from these functions need to be generalized whenevr they are generated.
        # The same has been done in below callbacks.

        # Reshape operation for simulating darknet reorg bug
        @callback.on_generate_function_by_name('Reshape')
        def reshape_for_darknet_reorg_bug(f):
            s = f.inputs[0].variable.shape
            stride = 2
            r = f.proto.reshape_param
            r.shape.dim[:] = [
                s[0],
                int(s[1] / stride / stride), s[2], stride, s[3], stride
            ]
            return f

        # Reshape operation for simulating darknet reorg bug
        @callback.on_generate_function_by_name('Reshape_2')
        def reshape_for_darknet_reorg_bug(f):
            s = f.inputs[0].variable.shape
            r = f.proto.reshape_param
            r.shape.dim[:] = [
                s[0], s[1] * s[2] * s[3] * s[1] * s[2], s[4] // s[1],
                s[5] // s[2]
            ]
            return f

        # Reshape operation for output variable of yolov2 function in yolov2_activate.
        @callback.on_generate_function_by_name('Reshape_3')
        def reshape_yolov2_activate(f):
            s = f.inputs[0].variable.shape
            anchors = 5
            r = f.proto.reshape_param
            num_class = r.shape.dim[2] - 5
            s_add = (s[0], anchors, num_class + 5) + (s[2:])
            r.shape.dim[:] = s_add
            return f

        # Slicing the variable y in yolov2_activate to get t_xy
        @callback.on_generate_function_by_name('Slice')
        def slicing_t_xy(f):
            s = (f.inputs[0].variable.shape)
            s = list(s)
            s[2] = 2
            r = f.proto.slice_param
            r.stop[:] = [s[0], s[1], s[2], s[3], s[4]]
            return f

        # Arange operation in range of zero to width of input variable
        @callback.on_generate_function_by_name('Arange')
        def arange__yolov2_image_coordinate_xs(f):
            s = input_var.shape
            r = f.proto.arange_param
            r.stop = s[3] // 32
            return f

        # Arange operation in range of zero to height of input variable
        @callback.on_generate_function_by_name('Arange_2')
        def arange_yolov2_image_coordinate_ys(f):
            s = input_var.shape
            r = f.proto.arange_param
            r.stop = s[2] // 32
            return f

        # Slicing the variable y in yolov2_activate to get t_wh
        @callback.on_generate_function_by_name('Slice_2')
        def slicing_t_wh(f):
            s = list(f.inputs[0].variable.shape)
            s[2] = 4
            r = f.proto.slice_param
            r.stop[:] = [s[0], s[1], s[2], s[3], s[4]]
            return f

        # Slicing the variable y in yolov2_activate to get t_o
        @callback.on_generate_function_by_name('Slice_3')
        def slicing_t_o(f):
            s = list(f.inputs[0].variable.shape)
            s[2] = 5
            r = f.proto.slice_param
            r.stop[:] = [s[0], s[1], s[2], s[3], s[4]]
            return f

        # Slicing the variable y in yolov2_activate to get t_p
        @callback.on_generate_function_by_name('Slice_4')
        def slicing_t_p(f):
            s = list(f.inputs[0].variable.shape)
            r = f.proto.slice_param
            r.stop[:] = [s[0], s[1], s[2], s[3], s[4]]
            return f

        # Reshape the output of Arange to get xs
        @callback.on_generate_function_by_name('Reshape_4')
        def reshape_yolov2_image_coordinate_xs(f):
            s = f.inputs[0].variable.shape
            r = f.proto.reshape_param
            r.shape.dim[3] = s[0]
            return f

        # Reshape operation to get t_x
        @callback.on_generate_function_by_name('Reshape_5')
        def reshape__yolov2_image_coordinate_t_x(f):
            s = f.inputs[0].variable.shape
            r = f.proto.reshape_param
            r.shape.dim[:] = [s[0], s[1], s[0] // s[0], s[2], s[3]]
            return f

        # Reshape the output of Arange_2 to get ys
        @callback.on_generate_function_by_name('Reshape_6')
        def reshape_yolov2_image_coordinate_ys(f):
            s = f.inputs[0].variable.shape
            r = f.proto.reshape_param
            r.shape.dim[2] = s[0]
            return f

        # Reshape the output of Arange to get t_y
        @callback.on_generate_function_by_name('Reshape_7')
        def reshape_yolov2_image_coordinate_t_y(f):
            s = f.inputs[0].variable.shape
            r = f.proto.reshape_param
            r.shape.dim[:] = [s[0], s[1], s[0] // s[0], s[2], s[3]]
            return f

        # Reshape the final variable y
        @callback.on_generate_function_by_name('Reshape_8')
        def reshape_output_variable_y(f):
            s = f.inputs[0].variable.shape
            r = f.proto.reshape_param
            r.shape.dim[:] = [s[0], s[1] * s[2] * s[3], s[4]]
            return f

        # Scaler division by width in function Reshape_4 in yolov2_image_coordinate
        @callback.on_generate_function_by_name('MulScalar_2')
        def mul__yolov2_image_coordinate_t_x(f):
            input_arr_shape = list(input_var.shape[2:])
            r = f.proto.mul_scalar_param
            s = f.proto.mul_scalar_param.val
            r.val = s * (nnp_input_size[1] / input_arr_shape[1])
            return f

        # Scaler division by height in function Reshape_6 in yolov2_image_coordinate
        @callback.on_generate_function_by_name('MulScalar_3')
        def mul_yolov2_image_coordinate_t_y(f):
            input_arr_shape = list(input_var.shape[2:])
            r = f.proto.mul_scalar_param
            s = f.proto.mul_scalar_param.val
            r.val = s * (nnp_input_size[0] / input_arr_shape[0])
            return f

        # Reshape biases and multiply with t_wh to rescale it.
        @callback.on_function_pass_by_name('Mul2')
        def reshape_biases(f, variables, param_scope):
            bias_param_name = f.inputs[1].proto.name
            with nn.parameter_scope('', param_scope):
                biases = nn.parameter.get_parameter(bias_param_name)
                s = list(input_var.shape)
                k = (np.array(
                    [nnp_input_size[1] // 32,
                     nnp_input_size[0] // 32]).reshape(1, 1, 2, 1, 1))
                m = (np.array([s[3] // 32, s[2] // 32]).reshape(1, 1, 2, 1, 1))
                biases = (biases.d) * k
                biases = (biases) / m
                biases = nn.Variable.from_numpy_array(biases)
                nn.parameter.set_parameter('biases', biases)

        if not training:
            callback.fix_parameters()
        batch_size = input_var.shape[0]
        net = self.nnp.get_network('runtime',
                                   batch_size=batch_size,
                                   callback=callback)
        if returns_net:
            return net
        return list(net.outputs.values())[0]
Пример #4
0
    def __call__(self, input_var=None, use_from=None, use_up_to='classifier', training=False, returns_net=False, verbose=0):

        assert use_from is None, 'This should not be set because it is for forward compatibility.'
        input_var = self.get_input_var(input_var)

        callback = NnpNetworkPass(verbose)
        callback.remove_and_rewire('ImageAugmentationX')
        callback.set_variable('TrainingInput', input_var)
        callback.set_batch_normalization_batch_stat_all(training)
        self.use_up_to(use_up_to, callback)
        if not training:
            callback.remove_and_rewire(
                'VGG{}/Dropout_1'.format(self.num_layers))
            callback.remove_and_rewire(
                'VGG{}/Dropout_2'.format(self.num_layers))
            callback.fix_parameters()
        batch_size = input_var.shape[0]
        net = self.nnp.get_network(
            'Training', batch_size=batch_size, callback=callback)
        if returns_net:
            return net
        return list(net.outputs.values())[0]
Пример #5
0
    def __call__(self,
                 input_var=None,
                 use_from=None,
                 use_up_to='segmentation',
                 training=False,
                 returns_net=False,
                 verbose=0):

        assert use_from is None, 'This should not be set because it is for forward compatibility.'
        input_var = self.get_input_var(input_var)
        callback = NnpNetworkPass(verbose)
        callback.set_variable('x', input_var)
        '''
        Shape of output dimension for Interpolate and AveragePooling function depends on input image size
        and if these dimensions are taken from .nnp file, shape mis-matching will be encountered. So 
        these dimensions has been set according to input image shape in below callbacks.
        '''

        # changing kernel and stride dimension for AveragePoling
        @callback.on_generate_function_by_name('AveragePooling')
        def average_pooling_shape(f):
            s = f.inputs[0].variable.shape
            kernel_dim = f.proto.average_pooling_param
            kernel_dim.kernel.dim[:] = [s[2], s[3]]
            pool_stride = f.proto.average_pooling_param
            pool_stride.stride.dim[:] = [s[2], s[3]]
            return f

        # changing output size for all Interpolate functions, using the below callbacks.
        @callback.on_generate_function_by_name('Interpolate')
        def interpolate_output_shape(f):
            s = input_var.shape
            s1 = f.inputs[0].variable.shape
            w, h = s[2], s[3]
            for i in range(4):
                w = (w - 1) // 2 + 1
                h = (h - 1) // 2 + 1
            op_shape = f.proto.interpolate_param
            op_shape.output_size[:] = [w, h]
            return f

        @callback.on_generate_function_by_name('Interpolate_2')
        def interpolate_output_shape(f):
            s = input_var.shape
            w, h = s[2], s[3]
            for i in range(2):
                w = (w - 1) // 2 + 1
                h = (h - 1) // 2 + 1
            op_shape = f.proto.interpolate_param
            op_shape.output_size[:] = [w, h]
            return f

        @callback.on_generate_function_by_name('Interpolate_3')
        def interpolate_output_shape(f):
            s = input_var.shape
            op_shape = f.proto.interpolate_param
            op_shape.output_size[:] = [s[2], s[3]]
            return f

        callback.set_batch_normalization_batch_stat_all(training)
        self.use_up_to(use_up_to, callback)
        if not training:
            callback.fix_parameters()
        batch_size = input_var.shape[0]
        net = self.nnp.get_network('runtime',
                                   batch_size=batch_size,
                                   callback=callback)
        if returns_net:
            return net
        return list(net.outputs.values())[0]
Пример #6
0
    def __call__(self,
                 input_var=None,
                 use_from=None,
                 use_up_to='classifier',
                 training=False,
                 force_global_pooling=False,
                 check_global_pooling=True,
                 returns_net=False,
                 verbose=0,
                 with_aux_tower=False):
        if not training:
            assert not with_aux_tower, "Aux Tower should be disabled when inference process."

        input_var = self.get_input_var(input_var)

        callback = NnpNetworkPass(verbose)
        callback.remove_and_rewire('ImageAugmentationX')
        callback.set_variable('InputX', input_var)
        self.configure_global_average_pooling(callback, force_global_pooling,
                                              check_global_pooling,
                                              'AveragePooling_3')
        callback.set_batch_normalization_batch_stat_all(training)
        if with_aux_tower:
            self.use_up_to('_aux_classifier_1', callback)
            funcs_to_drop1 = ("Affine_2", "SoftmaxCrossEntropy",
                              "MulScalarLoss1")

            self.use_up_to('_aux_classifier_2', callback)
            funcs_to_drop2 = ("Affine_4", "SoftmaxCrossEntropy_2",
                              "MulScalarLoss2")
        else:
            self.use_up_to('_branching_point_1', callback)
            funcs_to_drop1 = ("AveragePooling", "Convolution_22", "ReLU_22",
                              "Affine", "ReLU_23", "Dropout", "Affine_2",
                              "SoftmaxCrossEntropy", "MulScalarLoss1")

            self.use_up_to('_branching_point_2', callback)
            funcs_to_drop2 = ("AveragePooling_2", "Convolution_41", "ReLU_42",
                              "Affine_3", "ReLU_43", "Dropout_2", "Affine_4",
                              "SoftmaxCrossEntropy_2", "MulScalarLoss2")
        callback.drop_function(*funcs_to_drop1)
        callback.drop_function(*funcs_to_drop2)
        if not training:
            callback.remove_and_rewire('Dropout_3')
            callback.fix_parameters()
        self.use_up_to(use_up_to, callback)
        batch_size = input_var.shape[0]
        net = self.nnp.get_network('Train',
                                   batch_size=batch_size,
                                   callback=callback)
        if returns_net:
            return net
        elif with_aux_tower:
            return list(net.outputs.values())
        else:
            return list(net.outputs.values())[0]
Пример #7
0
    def __call__(self,
                 input_var=None,
                 use_from=None,
                 use_up_to='classifier',
                 training=False,
                 force_global_pooling=False,
                 check_global_pooling=True,
                 returns_net=False,
                 verbose=0,
                 with_aux_tower=False):

        input_var = self.get_input_var(input_var)

        callback = NnpNetworkPass(verbose)
        callback.remove_and_rewire('ImageAugmentationX')
        callback.set_variable('TrainingInput', input_var)
        self.use_up_to(use_up_to, callback)
        if not training:
            callback.remove_and_rewire('TrainNet/Dropout')
            callback.remove_and_rewire('TrainNet/Dropout_2')
            callback.fix_parameters()
        batch_size = input_var.shape[0]
        net = self.nnp.get_network('Training',
                                   batch_size=batch_size,
                                   callback=callback)
        if returns_net:
            return net
        else:
            return list(net.outputs.values())[0]
Пример #8
0
def DLAUp(x, test, residual_root=False, channel_last=False):
    r, hidden = dla_imagenet(x,
                             num_classes=1000,
                             num_layers=34,
                             test=test,
                             channel_last=channel_last)
    callback = NnpNetworkPass(True)
    callback.remove_and_rewire('fc')
    ochannels = [256, 128, 64, 32]
    with nn.parameter_scope("up16"):
        x = upsample(hidden['level5'],
                     ochannels[0],
                     test,
                     kernel_size=4,
                     channel_last=channel_last)
        hidden['up16'] = x
    with nn.parameter_scope("up8"):
        x = root(x, [hidden['level4']],
                 ochannels[0],
                 test,
                 kernel_size=3,
                 channel_last=channel_last)
        x = upsample(x,
                     ochannels[1],
                     test,
                     kernel_size=4,
                     channel_last=channel_last)
        hidden['up8'] = x
    with nn.parameter_scope("up4"):
        with nn.parameter_scope("residual_level3"):
            level4up = upsample(hidden['level4'],
                                ochannels[1],
                                test,
                                kernel_size=4,
                                channel_last=channel_last)
            with nn.parameter_scope("level3up_root"):
                level3up = root(level4up, [hidden['level3']],
                                ochannels[1],
                                test,
                                kernel_size=3,
                                channel_last=channel_last)
            with nn.parameter_scope("x_root"):
                x = root(x, [level3up],
                         ochannels[1],
                         test,
                         kernel_size=1,
                         channel_last=channel_last)
        x = upsample(x,
                     ochannels[2],
                     test,
                     kernel_size=4,
                     channel_last=channel_last)
        hidden['up4'] = x
    with nn.parameter_scope("up2_b"):
        level3up_b = upsample(level3up,
                              ochannels[2],
                              test,
                              kernel_size=4,
                              channel_last=channel_last)
    with nn.parameter_scope("up2_c"):
        level3up_c = upsample(hidden['level3'],
                              ochannels[2],
                              test,
                              kernel_size=4,
                              channel_last=channel_last)
        with nn.parameter_scope("level3up_c_root"):
            level3up_c = root(hidden['level2'], [level3up_c],
                              ochannels[2],
                              test,
                              kernel_size=3,
                              channel_last=channel_last)
        with nn.parameter_scope("level2up_root"):
            level2up = root(level3up_b, [level3up_c],
                            ochannels[2],
                            test,
                            kernel_size=3,
                            channel_last=channel_last)
        with nn.parameter_scope("x_root"):
            x = root(x, [level2up],
                     ochannels[2],
                     test,
                     kernel_size=3,
                     channel_last=channel_last)
    return x