Пример #1
0
    def __init__(self, activation=SILU(), pretrained=True):
        super(MobilePredictor, self).__init__()
        self.activation = activation
        base_model = mobile_net_v2(dimension=DIMENSION, channels = CHANNELS, input_size=IMAGE_SIZE, width_mult=1., pretrained = pretrained)
        base_model = nn.Sequential(*list(base_model.children())[:-1])
        conv = nn.Conv2d(CHANNELS, 32, kernel_size=3, stride=2, padding=3, bias=False)
        weight = torch.FloatTensor(32, CHANNELS, 3, 3)
        parameters = list(base_model.parameters())
        for i in range(32):
            if CHANNELS == 1:
                weight[i, :, :, :] = parameters[0].data[i].mean(0)
            else:
                weight[i, :, :, :] = parameters[0].data[i]
        conv.weight.data.copy_(weight)

        for m in base_model.modules():
            if isinstance(m, InvertedResidual):
                m.conv[2] = activation
                m.conv[5] = activation

        self.features =base_model
        self.features[0][0]= conv

        self.predictor = nn.Sequential(
            Perceptron(1280, 1280),
            nn.Dropout(p=0),
            activation,
            Perceptron( 1280, DIMENSION),
        )
Пример #2
0
    def __init__(self,
                 channels=3,
                 dimension=35,
                 activation=nn.ReLU(),
                 pretrained=True):
        super(SqueezeResidualPredictor, self).__init__(channels, dimension,
                                                       activation, pretrained)
        final_norm_layer = nn.BatchNorm2d(LATENT_DIM)

        self.features = nn.Sequential(
            nn.Conv2d(LATENT_DIM, LATENT_DIM, kernel_size=1),
            final_norm_layer,
            activation,
            nn.AvgPool2d(kernel_size=12, stride=1),
        )

        reduce_number = int((LATENT_DIM + dimension) / 2.0)
        sub_dimension = reduce_number if reduce_number > dimension else (
            reduce_number + dimension)

        self.predictor = nn.Sequential(
            Perceptron(LATENT_DIM, sub_dimension),
            nn.Dropout(p=0),
            activation,
            Perceptron(sub_dimension, dimension),
        )
Пример #3
0
    def __init__(self,
                 channels=3,
                 dimension=1,
                 activation=SILU(),
                 pretrained=True):
        super(MnasPredictor, self).__init__()
        self.activation = activation
        input_size = IMAGE_SIZE
        assert input_size % 32 == 0
        input_channel = int(32 * 1.1)
        self.last_channel = 1280
        self.interverted_residual_setting = [
            # t, c, n, s, k
            [3, 24, 3, 2, 3],
            [3, 40, 3, 2, 5],
            [6, 80, 3, 2, 5],
            [6, 96, 2, 1, 3],
            [6, 192, 4, 2, 5],
            [6, 320, 1, 1, 3],
        ]
        # building first two layer
        self.features = [
            conv_3x3(channels, input_channel, 2, activation),
            SepConv_3x3(input_channel, 16, activation)
        ]
        input_channel = 16

        # building inverted residual blocks (MBConv)
        for t, c, n, s, k in self.interverted_residual_setting:
            output_channel = int(c * 1.1)
            for i in range(n):
                if i == 0:
                    self.features.append(
                        InvertedResidual(input_channel, output_channel, s, t,
                                         k, activation))
                else:
                    self.features.append(
                        InvertedResidual(input_channel, output_channel, 1, t,
                                         k, activation))
                input_channel = output_channel

        # building last several layers
        self.features.append(conv_1x1(input_channel, self.last_channel))
        self.features.append(nn.AdaptiveAvgPool2d(1))

        # make it nn.Sequential
        self.features = nn.Sequential(*self.features)

        self._initialize_weights()

        self.predictor = nn.Sequential(
            activation,
            Perceptron(1280, 1280),
            activation,
            Perceptron(1280, dimension),
        )
    def __init__(self,  activation = SILU(), pretrained = True):
        super(ResidualRecognitron, self).__init__()
        self.activation = activation
        self.inplanes = FEATURES
        layers = [2,2,2,2]
        block = BasicBlock
        base_model = models.resnet18(pretrained=pretrained)
        conv = nn.Conv2d(CHANNELS, FEATURES, kernel_size=7, stride=2, padding=3, bias=False)
        weight = torch.FloatTensor(FEATURES, CHANNELS, 7, 7)
        parameters = list(base_model.parameters())

        for i in range(FEATURES):
            if CHANNELS == 1:
                weight[i, :, :, :] = parameters[0].data[i].mean(0)
            else:
                weight[i, :, :, :] = parameters[0].data[i]

        conv.weight.data.copy_(weight)

        self.conv1 = conv
        self.bn1 = nn.BatchNorm2d(FEATURES)
        self.activation = activation
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64,  layers[0], stride=1, activation = activation)
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2, activation = activation)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2, activation = activation)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2, activation = activation)
        self.avgpool = nn.AdaptiveAvgPool2d(1)
        if pretrained:
            self.bn1.weight.data.copy_(base_model.bn1.weight)
            self.bn1.bias.data.copy_(base_model.bn1.bias)
            self.bn1.affine = (base_model.bn1.affine)
            for i in range(layers[0]):
                self.layer1[i].configure(base_model.layer1[i])
            for i in range(layers[0]):
                self.layer2[i].configure(base_model.layer2[i])
            for i in range(layers[0]):
                self.layer3[i].configure(base_model.layer3[i])
            for i in range(layers[0]):
                self.layer4[i].configure(base_model.layer4[i])

        self.recognitron = nn.Sequential(
            Perceptron(LATENT, LATENT),
            nn.Dropout(p=0.0),
            activation,
            Perceptron(LATENT, DIMENSION),
        )
Пример #5
0
    def __init__(self, activation=nn.ReLU(), pretrained=True):
        super(SqueezeResidualPredictor, self).__init__(activation, pretrained)
        final_norm_layer = nn.BatchNorm2d(LATENT_DIM)

        self.features = nn.Sequential(
            nn.Conv2d(LATENT_DIM, LATENT_DIM, kernel_size=1),
            final_norm_layer,
            activation,
            nn.AdaptiveAvgPool2d(1),
        )

        reduce_number = int((LATENT_DIM + CHANNELS) / 2.0)
        sub_dimension = reduce_number if reduce_number > CHANNELS else (
            reduce_number + CHANNELS)

        self.predictor = nn.Sequential(
            Perceptron(LATENT_DIM, sub_dimension),
            nn.Dropout(p=0),
            activation,
            Perceptron(sub_dimension, CHANNELS),
        )