コード例 #1
0
def resize(img):
    return resize_images(img, 66, 200, 'channels_first')
コード例 #2
0
 def call(self, inputs, **kwargs):
     return K.resize_images(inputs,
                            self.height,
                            self.width,
                            data_format="channels_last",
                            interpolation=self.interpolation)
コード例 #3
0
    def build_explanation_model(self,
                                input_dim,
                                output_dim,
                                loss,
                                downsample_factors=(1, )):
        num_indices, num_channels, steps, downsampling_factor =\
            MaskingUtil.get_input_constants(input_dim, downsample_factors)

        if downsampling_factor != 1 and num_indices is None:
            raise ValueError(
                "Attribution downsampling is not supported for variable length inputs. "
                "Please pad your data samples to the same size to use downsampling."
            )

        input_shape = (input_dim, ) if not isinstance(
            input_dim, collections.Sequence) else input_dim
        input_layer = Input(shape=input_shape)
        last_layer = self.build(input_layer)

        if num_indices is None:
            last_layer = Dense(1, activation="linear")(last_layer)
            last_layer = Flatten()(last_layer)  # None * None outputs
            last_layer = Lambda(
                K.softmax, output_shape=K.int_shape(last_layer))(last_layer)
        else:
            last_layer = Flatten()(last_layer)
            last_layer = Dense(num_indices, activation="softmax")(last_layer)

        # Prepare extra inputs for causal loss.
        all_auxiliary_outputs = Input(shape=(output_dim, ), name="all")
        all_but_one_auxiliary_outputs_input = Input(shape=(num_indices,
                                                           output_dim),
                                                    name="all_but_one")

        if num_indices is not None:
            all_but_one_auxiliary_outputs = Lambda(lambda x: tf.unstack(
                x, axis=1))(all_but_one_auxiliary_outputs_input)
            if K.int_shape(all_but_one_auxiliary_outputs_input)[1] == 1:
                all_but_one_auxiliary_outputs = [all_but_one_auxiliary_outputs]
        else:
            all_but_one_auxiliary_outputs = all_but_one_auxiliary_outputs_input

        all_but_one_auxiliary_outputs = Concatenate()(
            all_but_one_auxiliary_outputs)

        causal_loss_fun = CausalLoss(num_indices=num_indices,
                                     loss_function=loss)

        if downsampling_factor != 1:
            last_layer = Reshape(tuple(steps) + (1, ))(last_layer)

            if len(steps) == 1:
                # Add a dummy dimension to enable usage of __resize_images__.
                last_layer = Reshape(tuple(steps) + (1, 1))(last_layer)
                last_layer = Lambda(lambda x: resize_images(
                    x,
                    height_factor=downsample_factors[0],
                    width_factor=1,
                    data_format="channels_last"))(last_layer)
            elif len(steps) == 2:
                last_layer = Lambda(lambda x: resize_images(
                    x,
                    height_factor=downsample_factors[0],
                    width_factor=downsample_factors[1],
                    data_format="channels_last"))(last_layer)
            elif len(steps) == 3:
                last_layer = Lambda(lambda x: resize_volumes(
                    x,
                    depth_factor=downsample_factors[0],
                    height_factor=downsample_factors[1],
                    width_factor=downsample_factors[2],
                    data_format="channels_last"))(last_layer)
            else:
                raise ValueError(
                    "Attribution maps of larger dimensionality than 3D data are not currently supported. "
                    "Requested output dim was: {}.".format(len(steps)))

            attribution_shape = Validation.get_attribution_shape_from_input_shape(
                num_samples=1, input_dim=input_dim)[1:]
            collapsed_attribution_shape = (int(np.prod(attribution_shape)), )
            last_layer = Reshape(collapsed_attribution_shape)(last_layer)

            # Re-normalise to sum = 1 after resizing (sum = __downsampling_factor__ after resizing).
            last_layer = Lambda(lambda x: x / float(downsampling_factor))(
                last_layer)

        final_layer = Concatenate()(
            [last_layer, all_but_one_auxiliary_outputs, all_auxiliary_outputs])

        model = Model(inputs=[
            input_layer, all_auxiliary_outputs,
            all_but_one_auxiliary_outputs_input
        ],
                      outputs=final_layer)

        model = self.compile_model(model,
                                   main_losses=causal_loss_fun,
                                   learning_rate=self.learning_rate,
                                   optimizer=self.optimizer)

        prediction_model = Model(input_layer, last_layer)
        return model, prediction_model
コード例 #4
0
    def call(self, input_tensor, training=None):
        input_transposed = tf.transpose(input_tensor, [3, 0, 1, 2, 4])
        input_shape = K.shape(input_transposed)
        input_tensor_reshaped = K.reshape(input_transposed, [
            input_shape[1] * input_shape[0], self.input_height,
            self.input_width, self.input_num_atoms
        ])
        input_tensor_reshaped.set_shape(
            (None, self.input_height, self.input_width, self.input_num_atoms))

        if self.upsamp_type == 'resize':
            upsamp = K.resize_images(input_tensor_reshaped, self.scaling,
                                     self.scaling, 'channels_last')
            outputs = K.conv2d(upsamp,
                               kernel=self.W,
                               strides=(1, 1),
                               padding=self.padding,
                               data_format='channels_last')
        elif self.upsamp_type == 'subpix':
            conv = K.conv2d(input_tensor_reshaped,
                            kernel=self.W,
                            strides=(1, 1),
                            padding='same',
                            data_format='channels_last')
            outputs = tf.depth_to_space(conv, self.scaling)
        else:
            batch_size = input_shape[1] * input_shape[0]

            # Infer the dynamic output shape:
            out_height = deconv_output_length(input_length=self.input_height,
                                              stride=self.scaling,
                                              filter_size=self.kernel_size,
                                              padding=self.padding)
            out_width = deconv_output_length(input_length=self.input_width,
                                             stride=self.scaling,
                                             filter_size=self.kernel_size,
                                             padding=self.padding)
            output_shape = (batch_size, out_height, out_width,
                            self.num_capsule * self.num_atoms)

            outputs = K.conv2d_transpose(input_tensor_reshaped,
                                         self.W,
                                         output_shape,
                                         (self.scaling, self.scaling),
                                         padding=self.padding,
                                         data_format='channels_last')

        votes_shape = K.shape(outputs)
        _, conv_height, conv_width, _ = outputs.get_shape()

        votes = K.reshape(outputs, [
            input_shape[1], input_shape[0], votes_shape[1], votes_shape[2],
            self.num_capsule, self.num_atoms
        ])
        votes.set_shape((None, self.input_num_capsule, conv_height.value,
                         conv_width.value, self.num_capsule, self.num_atoms))

        logit_shape = K.stack([
            input_shape[1], input_shape[0], votes_shape[1], votes_shape[2],
            self.num_capsule
        ])
        biases_replicated = K.tile(self.b,
                                   [votes_shape[1], votes_shape[2], 1, 1])

        activations = update_routing(votes=votes,
                                     biases=biases_replicated,
                                     logit_shape=logit_shape,
                                     num_dims=6,
                                     input_dim=self.input_num_capsule,
                                     output_dim=self.num_capsule,
                                     num_routing=self.routings)

        return activations
コード例 #5
0
    for i in range(1,5):
        cloned_weights[index+i] = cloned_weights[index+i][active_weights_index]
    if r != (len(conv_weights_index) - 1):
        cloned_weights[index+5] = cloned_weights[index+5][:,:,active_weights_index,:]
    else:
        active_index = []
        for j in active_weights_index:
            active_index += list(range((j*6*6),((j+1)*6*6)))
        cloned_weights[index+5] = cloned_weights[index+5][active_index,:]


input_shape = (32,32,3)
num_classes = 10
sparse_model = keras.models.Sequential([
    layers.Lambda(lambda img: K.resize_images(img, 7, 7, data_format='channels_last'), input_shape=input_shape),
    layers.ZeroPadding2D(padding=(2, 2)),
    layers.Conv2D(filters=cloned_weights[conv_weights_index[0]].shape[3], kernel_size=(11,11), strides=(4,4), activation='relu', use_bias=False),
    layers.BatchNormalization(),
    layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    layers.Conv2D(filters=cloned_weights[conv_weights_index[1]].shape[3], kernel_size=(5,5), strides=(1,1), activation='relu', padding="same", use_bias=False),
    layers.BatchNormalization(),
    layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    layers.Conv2D(filters=cloned_weights[conv_weights_index[2]].shape[3], kernel_size=(3,3), strides=(1,1), activation='relu', padding="same", use_bias=False),
    layers.BatchNormalization(),
    layers.Conv2D(filters=cloned_weights[conv_weights_index[3]].shape[3], kernel_size=(3,3), strides=(1,1), activation='relu', padding="same", use_bias=False),
    layers.BatchNormalization(),
    layers.Conv2D(filters=cloned_weights[conv_weights_index[4]].shape[3], kernel_size=(3,3), strides=(1,1), activation='relu', padding="same", use_bias=False),
    layers.BatchNormalization(),
    layers.MaxPool2D(pool_size=(3,3), strides=(2,2)),
    layers.Flatten(),