Exemplo n.º 1
0
    def build(height, width, depth, classes, batch_size):
        # construct the input shape tuple
        input_shape = (batch_size, height, width, depth)

        # instantiate the model
        model = Model()

        # input layer
        model.add(Input(input_shape=input_shape, name="input"))

        # first CONV => RELU => POOL block
        model.add(Conv2D(20, (5, 5), padding_type="same", name="conv_1"))
        model.add(ReLU(name="relu_1"))
        model.add(
            MaxPooling2D(kernel_size=(2, 2), stride=(2, 2), name="pool_1"))

        # second CONV => RELU => POOL block
        model.add(Conv2D(50, (5, 5), padding_type="same", name="conv_2"))
        model.add(ReLU(name="relu_2"))
        model.add(
            MaxPooling2D(kernel_size=(2, 2), stride=(2, 2), name="pool_2"))

        # first and only set of FC => RELU layers
        model.add(Flatten(name="flatten"))
        model.add(Dense(500, name="fc_1"))
        model.add(ReLU(name="relu_3"))

        # softmax classifier
        model.add(Dense(classes, name="fc_2"))
        model.add(Softmax(name="softmax"))

        # return the constructed network architecture
        return model
Exemplo n.º 2
0
    def __init__(self,
                 in_channels: int,
                 num_features: int,
                 dropout: float = 0.1,
                 temperature: float = None):
        super(AttentionProjector, self).__init__(in_channels, num_features)

        self.in_channels = in_channels
        self.num_features = num_features
        self.temperature = temperature if isinstance(
            temperature, float) else math.sqrt(num_features)

        self.nonlinear = nn.Sequential(
            collections.OrderedDict([('gap', nn.AdaptiveAvgPool2d(1)),
                                     ('flatten', Flatten()),
                                     ('linear',
                                      nn.Linear(in_channels, num_features)),
                                     ('relu', nn.ReLU(inplace=True))]))

        self.q_linear = nn.Linear(num_features, num_features, bias=False)
        self.k_linear = nn.Linear(num_features, num_features, bias=False)
        self.v_linear = nn.Linear(num_features, num_features, bias=True)

        self.softmax = nn.Softmax(dim=1)
        self.dropout = nn.Dropout(dropout) if isinstance(dropout,
                                                         float) else None

        self.initialize_weights()
Exemplo n.º 3
0
    def make_layers(num_features: int, num_classes: int, dropout: float = 0.0):
        layers = nn.Sequential(
            collections.OrderedDict([('flatten', Flatten()),
                                     ('dropout', nn.Dropout(p=dropout)),
                                     ('linear',
                                      nn.Linear(num_features, num_classes))]))

        return layers
Exemplo n.º 4
0
    def make_layers(in_channels: int, num_features: int, dropout: float = 0.0):
        layers = nn.Sequential(
            collections.OrderedDict([('gap', nn.AdaptiveAvgPool2d(1)),
                                     ('flatten', Flatten()),
                                     ('dropout', nn.Dropout(p=dropout)),
                                     ('linear',
                                      nn.Linear(in_channels, num_features))]))

        return layers
Exemplo n.º 5
0
    def make_layers(in_channels: int, num_features: int):
        layers = nn.Sequential(
            collections.OrderedDict([
                ('gap', nn.AdaptiveAvgPool2d(1)), ('flatten', Flatten()),
                ('linear1', nn.Linear(in_channels, in_channels)),
                ('relu1', nn.ReLU(inplace=True)),
                ('linear2', nn.Linear(in_channels, num_features))
            ]))

        return layers
Exemplo n.º 6
0
    def make_layers(in_channels: int, hidden_size: int, output_size: int):
        layers = nn.Sequential(
            collections.OrderedDict([
                ('gap', nn.AdaptiveAvgPool2d(1)), ('flatten', Flatten()),
                ('linear1', nn.Linear(in_channels, hidden_size, bias=False)),
                ('bnorm1', nn.BatchNorm1d(hidden_size)),
                ('relu1', nn.ReLU(inplace=True)),
                ('linear2', nn.Linear(hidden_size, output_size, bias=True))
            ]))

        return layers
Exemplo n.º 7
0
    def make_layers(num_patches: int, in_channels: int, num_classes: int,
                    dropout: float):
        layers = nn.Sequential(
            collections.OrderedDict([
                ('gap', nn.AdaptiveAvgPool2d(1)), ('flatten', Flatten()),
                ('dropout1', nn.Dropout(p=dropout)),
                ('linear1', nn.Linear(num_patches * in_channels, in_channels)),
                ('relu1', nn.ReLU(inplace=True)),
                ('dropout2', nn.Dropout(p=dropout)),
                ('linear2', nn.Linear(in_channels, num_classes))
            ]))

        return layers
Exemplo n.º 8
0
    def build(height, width, depth, classes, batch_size):
        # initialize the input shape
        input_shape = (batch_size, height, width, depth)

        # initialize the model
        model = Model()

        # input layer
        model.add(Input(input_shape, name="input"))

        # first CONV => RELU => POOL block
        model.add(Conv2D(96, (11, 11), padding_type="same", name="conv_1-1"))
        model.add(ReLU(name="relu_1-1"))
        model.add(MaxPooling2D((3, 3), stride=(2, 2), name="pool_1"))

        # second CONV => RELU => POOL block
        model.add(Conv2D(256, (5, 5), padding_type="same", name="conv_2-1"))
        model.add(ReLU(name="relu_2-1"))
        model.add(MaxPooling2D((3, 3), stride=(2, 2), name="pool_2"))

        # first (and only) ( CONV => RELU ) * 3 => POOL block
        model.add(Conv2D(384, (3, 3), padding_type="same", name="conv-3_1"))
        model.add(ReLU(name="relu_3-1"))
        model.add(Conv2D(384, (3, 3), padding_type="same", name="conv_3-2"))
        model.add(ReLU(name="relu_3-2"))
        model.add(Conv2D(256, (3, 3), padding_type="same", name="conv_3-3"))
        model.add(ReLU(name="relu_3-3"))
        model.add(MaxPooling2D((3, 3), stride=(2, 2), name="pool_3"))

        # flatten layer
        model.add(Flatten(name="flatten"))

        # softmax classifier
        model.add(Dense(classes, name="dense_1"))
        model.add(Softmax(name="softmax"))

        # return the constructed network architecture
        return model
Exemplo n.º 9
0
    def build(height, width, depth, classes, batch_size):
        # construct the input shape tuple
        input_shape = (batch_size, height, width, depth)

        # instantiate the model
        model = Model()

        # input layer
        model.add(Input(input_shape=input_shape, name="input"))

        # first CONV => RELU => CONV => RELU => POOl block
        model.add(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   padding_type="same",
                   name="conv_1-1"))
        model.add(ReLU(name="relu_1-1"))
        model.add(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   padding_type="same",
                   name="conv_1-2"))
        model.add(ReLU(name="relu_1-2"))
        model.add(
            MaxPooling2D(kernel_size=(2, 2), stride=(2, 2), name="pool_1"))

        # second CONV => RELU => CONV => RELU => POOl block
        model.add(
            Conv2D(filters=128,
                   kernel_size=(3, 3),
                   padding_type="same",
                   name="conv_2-1"))
        model.add(ReLU(name="relu_2-1"))
        model.add(
            Conv2D(filters=128,
                   kernel_size=(3, 3),
                   padding_type="same",
                   name="conv_2-2"))
        model.add(ReLU(name="relu_2-2"))
        model.add(
            MaxPooling2D(kernel_size=(2, 2), stride=(2, 2), name="pool_2"))

        # third CONV => RELU => CONV => RELU => POOl block
        model.add(
            Conv2D(filters=256,
                   kernel_size=(3, 3),
                   padding_type="same",
                   name="conv_3-1"))
        model.add(ReLU(name="relu_3-1"))
        model.add(
            Conv2D(filters=256,
                   kernel_size=(3, 3),
                   padding_type="same",
                   name="conv_3-2"))
        model.add(ReLU(name="relu_3-2"))
        model.add(
            MaxPooling2D(kernel_size=(2, 2), stride=(2, 2), name="pool_3"))

        # first CONV => RELU => CONV => RELU => CONV => RELU => POOL
        model.add(
            Conv2D(filters=512,
                   kernel_size=(3, 3),
                   padding_type="same",
                   name="conv_4-1"))
        model.add(ReLU(name="relu_4-1"))
        model.add(
            Conv2D(filters=512,
                   kernel_size=(3, 3),
                   padding_type="same",
                   name="conv_4-2"))
        model.add(ReLU(name="relu_4-2"))
        model.add(
            Conv2D(filters=512,
                   kernel_size=(3, 3),
                   padding_type="same",
                   name="conv_4-2"))
        model.add(ReLU(name="relu_4-2"))
        model.add(
            MaxPooling2D(kernel_size=(2, 2), stride=(2, 2), name="pool_4"))

        # second CONV => RELU => CONV => RELU => CONV => RELU => POOL
        model.add(
            Conv2D(filters=512,
                   kernel_size=(3, 3),
                   padding_type="same",
                   name="conv_5-1"))
        model.add(ReLU(name="relu_5-1"))
        model.add(
            Conv2D(filters=512,
                   kernel_size=(3, 3),
                   padding_type="same",
                   name="conv_5-2"))
        model.add(ReLU(name="relu_5-2"))
        model.add(
            Conv2D(filters=512,
                   kernel_size=(3, 3),
                   padding_type="same",
                   name="conv_5-2"))
        model.add(ReLU(name="relu_5-2"))
        model.add(
            MaxPooling2D(kernel_size=(2, 2), stride=(2, 2), name="pool_5"))

        # flatten layer
        model.add(Flatten(name="flatten"))

        # softmax classifier
        model.add(Dense(units=classes, name="dense_1"))
        model.add(Softmax(name="softmax_1"))

        # return the constructed model architecture
        return model