コード例 #1
0
    def test_different_filters(self, normalize_weights, input_dependent,
                               data_format, kernel_size, dilations):
        spatial_rank = 2
        filters = 16
        if data_format == 'channels_last':
            input_shape = (1, 32, 32, 3)
        if data_format == 'channels_first':
            input_shape = (1, 3, 32, 32)

        images = tf.constant(np.random.randn(*input_shape), dtype=tf.float32)

        layer = layers.LowRankLocallyConnected2D(
            filters=filters,
            kernel_size=kernel_size,
            dilations=dilations,
            strides=(1, 1),
            padding='same',
            spatial_rank=spatial_rank,
            normalize_weights=normalize_weights,
            share_row_combining_weights=False,
            share_col_combining_weights=False,
            data_format=data_format,
            input_dependent=input_dependent)

        output = layer(images)

        self.evaluate(tf.global_variables_initializer())
        self.evaluate(output)
コード例 #2
0
    def __init__(self, config, variable_scope='simple_network'):
        super(SimpleNetwork, self).__init__()
        self.variable_scope = variable_scope
        self.config = copy.deepcopy(config)
        filters_list = self.config.num_filters_list
        depth = len(filters_list)
        self.pass_is_training_list = []
        self.layers_list = []

        if self.config.num_channels < 1:
            raise ValueError('num_channels should be > 0')

        input_channels = self.config.num_channels
        if self.config.coord_conv:
            # Add two coordinate conv channels.
            input_channels = input_channels + 2

        if len(self.config.layer_types) < depth:
            self.config.layer_types.extend(
                ['conv2d'] * (depth - len(self.config.layer_types)))
        chin = input_channels
        for i, (kernel_size, num_filters, strides, layer_type) in enumerate(
                zip(self.config.kernel_size_list, filters_list,
                    self.config.strides_list, self.config.layer_types)):
            padding = 'valid'
            if layer_type == 'conv2d':
                chout = num_filters
                layer = tf.keras.layers.Conv2D(
                    filters=chout,
                    kernel_size=kernel_size,
                    strides=(strides, strides),
                    padding=padding,
                    activation=None,
                    use_bias=not self.config.batch_norm,
                    kernel_initializer=self.config.kernel_initializer,
                    name=os.path.join(self.variable_scope, 'layer%d' % i,
                                      layer_type))
            elif layer_type == 'wide_conv2d':
                # Conv. layer with equivalent params to low rank locally connected.
                if self.config.rank < 1:
                    raise ValueError('rank should be > 0 for %s layer.' %
                                     layer_type)
                chout = int((self.config.rank * chin + num_filters) /
                            float(chin + num_filters) * num_filters)
                layer = tf.keras.layers.Conv2D(
                    filters=chout if i <
                    (depth - 1) else int(num_filters * self.config.rank),
                    kernel_size=kernel_size,
                    strides=(strides, strides),
                    padding=padding,
                    activation=None,
                    use_bias=not self.config.batch_norm,
                    kernel_initializer=self.config.kernel_initializer,
                    name=os.path.join(self.variable_scope, 'layer%d' % i,
                                      layer_type))

            elif layer_type == 'locally_connected2d':
                # Full locally connected layer.
                chout = num_filters
                layer = tf.keras.layers.LocallyConnected2D(
                    filters=chout,
                    kernel_size=(kernel_size, kernel_size),
                    strides=(strides, strides),
                    padding=padding,
                    activation=None,
                    use_bias=True,  # not self.config.batch_norm,
                    name=os.path.join(self.variable_scope, 'layer%d' % i,
                                      layer_type),
                    kernel_initializer=self.config.kernel_initializer)
            elif layer_type == 'low_rank_locally_connected2d':
                if self.config.rank < 1:
                    raise ValueError('rank should be > 0 for %s layer.' %
                                     layer_type)
                chout = num_filters
                layer = layers.LowRankLocallyConnected2D(
                    filters=chout,
                    kernel_size=(kernel_size, kernel_size),
                    strides=(strides, strides),
                    padding=padding,
                    activation=None,
                    use_bias=not self.config.batch_norm,
                    name=os.path.join(self.variable_scope, 'layer%d' % i,
                                      layer_type),
                    kernel_initializer=self.config.kernel_initializer,
                    combining_weights_initializer=(
                        self.config.combining_weights_initializer),
                    spatial_rank=self.config.rank,
                    normalize_weights=self.config.normalize_weights,
                    input_dependent=config.input_dependent,
                    share_row_combining_weights=self.config.
                    share_row_combining_weights,
                    share_col_combining_weights=self.config.
                    share_col_combining_weights)
            else:
                raise ValueError('Can not recognize layer %s type.' %
                                 layer_type)

            chin = chout

            self.layers_list.append(layer)
            self.pass_is_training_list.append(False)
            if self.config.batch_norm:
                layer = tf.keras.layers.BatchNormalization(trainable=True,
                                                           momentum=MOMENTUM,
                                                           epsilon=EPS)

                self.layers_list.append(layer)
                self.pass_is_training_list.append(True)

            layer = tf.keras.layers.ReLU()
            self.layers_list.append(layer)
            self.pass_is_training_list.append(False)

        if self.config.global_avg_pooling:
            self.layers_list.append(tf.keras.layers.GlobalAveragePooling2D())
        else:
            self.layers_list.append(tf.keras.layers.Flatten())

        self.pass_is_training_list.append(False)

        self.layers_list.append(
            tf.keras.layers.Dense(units=self.config.num_classes,
                                  activation=None,
                                  use_bias=True,
                                  name='logits'))
        self.pass_is_training_list.append(False)
コード例 #3
0
    def test_variable_shapes(self, normalize_weights, share_combining_weights,
                             data_format):
        spatial_rank = 2
        kernel_size = 3
        filters = 16
        if data_format == 'channels_last':
            input_shape = [2, 32, 32, 3]
        elif data_format == 'channels_first':
            input_shape = [2, 3, 32, 32]

        images = tf.constant(np.random.randn(*tuple(input_shape)),
                             dtype=tf.float32)

        # Test handling variable input size if weights shared across one dimension.
        if share_combining_weights == (True, False):
            input_shape_ = input_shape[:]
            if data_format == 'channels_last':
                input_shape_[2] = None
            elif data_format == 'channels_first':
                input_shape_[3] = None
            images = tf.placeholder_with_default(images,
                                                 shape=tuple(input_shape_))

        elif share_combining_weights == (False, True):
            input_shape_ = input_shape[:]
            if data_format == 'channels_last':
                input_shape_[1] = None
            elif data_format == 'channels_first':
                input_shape_[2] = None
            images = tf.placeholder_with_default(images,
                                                 shape=tuple(input_shape_))

        layer = layers.LowRankLocallyConnected2D(
            filters=filters,
            kernel_size=(kernel_size, kernel_size),
            strides=(1, 1),
            spatial_rank=spatial_rank,
            normalize_weights=normalize_weights,
            share_row_combining_weights=share_combining_weights[0],
            share_col_combining_weights=share_combining_weights[1],
            data_format=data_format,
            input_dependent=False)
        output = layer(images)

        var_dict = {v.op.name: v for v in tf.global_variables()}

        # Make sure all generated weights are tracked in layer.weights.
        self.assertLen(var_dict, len(layer.weights))

        # Make sure the number of weights generated is correct.
        if share_combining_weights[0] and share_combining_weights[1]:
            # weights rows, weights cols, bias (rows, cols, channels), kernel bases
            self.assertLen(var_dict, 6)
        else:
            self.assertLen(var_dict, 4)

        self.evaluate(tf.global_variables_initializer())
        combining_weights = self.evaluate(layer.combining_weights)
        if data_format == 'channels_last':
            self.assertEqual(
                self.evaluate(output).shape,
                (input_shape[0], input_shape[1] - kernel_size + 1,
                 input_shape[2] - kernel_size + 1, filters))
        elif data_format == 'channels_first':
            self.assertEqual(
                self.evaluate(output).shape, (
                    input_shape[0],
                    filters,
                    input_shape[2] - kernel_size + 1,
                    input_shape[3] - kernel_size + 1,
                ))
        if normalize_weights == 'softmax':
            self.assertNDArrayNear(np.sum(combining_weights, axis=-1),
                                   np.ones(combining_weights.shape[:-1],
                                           dtype=np.float32),
                                   err=1e-5)
        elif normalize_weights == 'norm':
            self.assertNDArrayNear(np.sqrt(
                np.sum(combining_weights**2, axis=-1)),
                                   np.ones(combining_weights.shape[:-1],
                                           dtype=np.float32),
                                   err=1e-5)
コード例 #4
0
    def test_correct_output(self, normalize_weights, input_dependent,
                            data_format, combining_weights_initializer):
        spatial_rank = 2
        kernel_size = 3
        filters = 16
        input_chs = 3
        if data_format == 'channels_last':
            input_shape = (1, 32, 32, input_chs)
        if data_format == 'channels_first':
            input_shape = (1, input_chs, 32, 32)

        images = tf.constant(np.random.randn(*input_shape), dtype=tf.float32)
        layer1 = tf.keras.layers.LocallyConnected2D(filters=filters,
                                                    kernel_size=(kernel_size,
                                                                 kernel_size),
                                                    strides=(1, 1),
                                                    padding='valid',
                                                    data_format=data_format)

        layer2 = layers.LowRankLocallyConnected2D(
            filters=filters,
            kernel_size=(kernel_size, kernel_size),
            strides=(1, 1),
            padding='valid',
            spatial_rank=spatial_rank,
            normalize_weights=normalize_weights,
            combining_weights_initializer=combining_weights_initializer,
            share_row_combining_weights=False,
            share_col_combining_weights=False,
            data_format=data_format,
            input_dependent=input_dependent)

        output1 = layer1(images)
        output2 = layer2(images)

        assign_ops = []

        # Kernel from locally connected network.
        kernel1 = layer1.kernel

        combining_weights = layer2.combining_weights
        if input_dependent:
            combining_weights = tf.reduce_mean(combining_weights, axis=0)
        # Kernel from low rank locally connected network.
        kernel2 = tf.tensordot(
            combining_weights,
            tf.reshape(layer2.kernel_bases,
                       (layer2.kernel_size[0], layer2.kernel_size[1],
                        input_chs, layer2.spatial_rank, layer2.filters)),
            [[-1], [-2]],
            name='kernel')
        kernel2 = kernel_low_rank_lc_to_lc(kernel2, data_format)

        assign_ops.append(tf.assign(kernel1, kernel2))

        # Test results consistent with keras locallyconnected2d layer.
        self.evaluate(tf.global_variables_initializer())
        for op in assign_ops:
            self.evaluate(op)

        max_error = np.max(np.abs(self.evaluate(output1 - output2)))
        self.assertLess(max_error, 1e-5)
コード例 #5
0
    def test_implementations(self, normalize_weights, data_format):
        spatial_rank = 2
        kernel_size = 3
        filters = 16
        if data_format == 'channels_last':
            input_shape = (2, 32, 32, 3)
        if data_format == 'channels_first':
            input_shape = (2, 3, 32, 32)

        images = tf.constant(np.random.randn(*input_shape), dtype=tf.float32)
        layer1 = layers.LowRankLocallyConnected2D(
            filters=filters,
            kernel_size=(kernel_size, kernel_size),
            strides=(1, 1),
            padding='valid',
            spatial_rank=spatial_rank,
            normalize_weights=normalize_weights,
            share_row_combining_weights=False,
            share_col_combining_weights=False,
            data_format=data_format,
            input_dependent=False)

        layer2 = layers.LowRankLocallyConnected2D(
            filters=filters,
            kernel_size=(kernel_size, kernel_size),
            strides=(1, 1),
            padding='valid',
            spatial_rank=spatial_rank,
            normalize_weights=normalize_weights,
            share_row_combining_weights=True,
            share_col_combining_weights=True,
            data_format=data_format,
            input_dependent=False)

        output1 = layer1(images)
        weights1 = tf.global_variables()

        output2 = layer2(images)
        weights2 = list(set(tf.global_variables()) - set(weights1))

        # Weights from separable_weights implementation.
        kernel_weights1 = [
            v for v in weights1 if 'combining_weights' in v.op.name
        ][0]
        spatial_bias1 = [v for v in weights1 if 'spatial_bias' in v.op.name][0]
        bias_channels1 = [v for v in weights1
                          if 'bias_channels' in v.op.name][0]
        kernel_bases1 = [v for v in weights1 if 'kernel_bases' in v.op.name][0]

        # Weights from No separable_weights implementation.
        kernel_weights_col2 = [
            v for v in weights2 if 'combining_weights_col' in v.op.name
        ][0]
        kernel_weights_row2 = [
            v for v in weights2 if 'combining_weights_row' in v.op.name
        ][0]
        bias_row2 = [v for v in weights2 if 'bias_row' in v.op.name][0]
        bias_col2 = [v for v in weights2 if 'bias_col' in v.op.name][0]
        bias_channels2 = [v for v in weights2
                          if 'bias_channels' in v.op.name][0]
        kernel_bases2 = [v for v in weights2 if 'kernel_bases' in v.op.name][0]

        # Assign No separable_weights to separable_weights.
        assign_ops = []
        assign_ops.append(tf.assign(kernel_bases1, kernel_bases2))
        assign_ops.append(tf.assign(spatial_bias1, bias_col2 + bias_row2))
        assign_ops.append(tf.assign(bias_channels1, bias_channels2))
        assign_ops.append(
            tf.assign(
                kernel_weights1, kernel_weights_col2[tf.newaxis] +
                kernel_weights_row2[:, tf.newaxis]))
        assign_ops = tf.group(assign_ops)

        # Test different implementations give same result.
        self.evaluate(tf.global_variables_initializer())
        self.evaluate(assign_ops)
        max_error = np.max(np.abs(self.evaluate(output1 - output2)))
        self.assertLess(max_error, 1e-5)