コード例 #1
0
    def __init__(self, squeeze_channels=False, softmax_smoothing=0.0):
        super(AttentionModelColonCancer, self).__init__()

        conv1 = nn.Conv2d(in_channels=3,
                          out_channels=8,
                          kernel_size=3,
                          padding_mode='zeros',
                          padding=1)
        relu1 = nn.ReLU()

        conv2 = nn.Conv2d(in_channels=8,
                          out_channels=8,
                          kernel_size=3,
                          padding_mode='zeros',
                          padding=1)
        relu2 = nn.ReLU()

        conv3 = nn.Conv2d(in_channels=8,
                          out_channels=1,
                          kernel_size=3,
                          padding_mode='zeros',
                          padding=1)

        sample_softmax = SampleSoftmax(squeeze_channels, softmax_smoothing)

        self.forward_pass = nn.Sequential(conv1, relu1, conv2, relu2, conv3,
                                          sample_softmax)
コード例 #2
0
    def __init__(self, squeeze_channels=False, softmax_smoothing=0.0):
        super(AttentionModelTrafficSigns, self).__init__()

        conv1 = nn.Conv2d(in_channels=3,
                          out_channels=8,
                          kernel_size=3,
                          padding_mode='valid')
        relu1 = nn.ReLU()

        conv2 = nn.Conv2d(in_channels=8,
                          out_channels=16,
                          kernel_size=3,
                          padding_mode='valid')
        relu2 = nn.ReLU()

        conv3 = nn.Conv2d(in_channels=16,
                          out_channels=32,
                          kernel_size=3,
                          padding_mode='valid')
        relu3 = nn.ReLU()

        conv4 = nn.Conv2d(in_channels=32,
                          out_channels=1,
                          kernel_size=3,
                          padding_mode='valid')

        pool = nn.MaxPool2d(kernel_size=8)
        sample_softmax = SampleSoftmax(squeeze_channels, softmax_smoothing)

        self.part1 = nn.Sequential(conv1, relu1, conv2, relu2, conv3, relu3)
        self.part2 = nn.Sequential(conv4, pool, sample_softmax)
コード例 #3
0
    def __init__(self, squeeze_channels=False, softmax_smoothing=0.0):
        super(AttentionModelMNIST, self).__init__()

        self.squeeze_channels = squeeze_channels
        self.softmax_smoothing = softmax_smoothing

        self.conv1 = nn.Conv2d(in_channels=1,
                               out_channels=8,
                               kernel_size=3,
                               padding=1,
                               padding_mode='reflect')
        self.tanh1 = nn.Tanh()

        self.conv2 = nn.Conv2d(in_channels=8,
                               out_channels=8,
                               kernel_size=3,
                               padding=1,
                               padding_mode='reflect')
        self.tanh2 = nn.Tanh()

        self.conv3 = nn.Conv2d(in_channels=8,
                               out_channels=1,
                               kernel_size=3,
                               padding=1,
                               padding_mode='reflect')

        self.sample_softmax = SampleSoftmax(squeeze_channels,
                                            softmax_smoothing)
コード例 #4
0
 def _get_attention(self):
     return Sequential([
         Conv2D(8, input_shape=self.SHAPE_SMALL, **self._conv_args),
         MaxPool2D(),
         Conv2D(8, **self._conv_args),
         Conv2D(1, kernel_size=3, padding="same"),
         SampleSoftmax(squeeze_channels=True)
     ])
コード例 #5
0
def attention(x):
    params = dict(
        activation="relu",
        padding="valid",
        kernel_regularizer=l2(1e-5)
    )
    x = Conv2D(8, kernel_size=3, **params)(x)
    x = Conv2D(16, kernel_size=3, **params)(x)
    x = Conv2D(32, kernel_size=3, **params)(x)
    x = Conv2D(1, kernel_size=3)(x)
    x = MaxPooling2D(pool_size=8)(x)
    x = SampleSoftmax(squeeze_channels=True, smooth=1e-4)(x)

    return x
コード例 #6
0
def get_model(outputs, width, height, scale, n_patches, patch_size, reg):
    # Define the shapes
    shape_high = (height, width, 1)
    shape_low = (int(height * scale), int(width * scale), 1)

    # Make the attention and feature models
    attention = Sequential([
        Conv2D(8,
               kernel_size=3,
               activation="tanh",
               padding="same",
               input_shape=shape_low),
        Conv2D(8, kernel_size=3, activation="tanh", padding="same"),
        Conv2D(1, kernel_size=3, padding="same"),
        SampleSoftmax(squeeze_channels=True, smooth=1e-5)
    ])
    feature = Sequential([
        Conv2D(32, kernel_size=7, activation="relu", input_shape=shape_high),
        Conv2D(32, kernel_size=3, activation="relu"),
        Conv2D(32, kernel_size=3, activation="relu"),
        Conv2D(32, kernel_size=3, activation="relu"),
        GlobalMaxPooling2D(),
        L2Normalize()
    ])

    # Let's build the attention sampling network
    x_low = Input(shape=shape_low)
    x_high = Input(shape=shape_high)
    features, attention, patches = attention_sampling(
        attention,
        feature,
        patch_size,
        n_patches,
        replace=False,
        attention_regularizer=multinomial_entropy(reg))([x_low, x_high])
    y = Dense(outputs, activation="softmax")(features)

    return (Model(inputs=[x_low, x_high], outputs=[y]),
            Model(inputs=[x_low, x_high], outputs=[attention, patches]))