def __init__(self, num_classes=1, num_filters=32, pretrained=False):
        """
        :param num_classes:
        :param num_filters:
        :param pretrained:
            False - no pre-trained network is used
            True  - encoder is pre-trained with VGG11
        """
        super().__init__()
        self.pool = nn.MaxPool2d(2, 2)

        self.encoder = models.vgg11(pretrained=pretrained).features

        self.relu = self.encoder[1]
        self.conv1 = self.encoder[0]
        self.conv2 = self.encoder[3]
        self.conv3s = self.encoder[6]
        self.conv3 = self.encoder[8]
        self.conv4s = self.encoder[11]
        self.conv4 = self.encoder[13]
        self.conv5s = self.encoder[16]
        self.conv5 = self.encoder[18]

        self.center = DecoderBlock(num_filters * 8 * 2, num_filters * 8 * 2, num_filters * 8)
        self.dec5 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 8)
        self.dec4 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 4)
        self.dec3 = DecoderBlock(num_filters * (8 + 4), num_filters * 4 * 2, num_filters * 2)
        self.dec2 = DecoderBlock(num_filters * (4 + 2), num_filters * 2 * 2, num_filters)
        self.dec1 = ConvRelu(num_filters * (2 + 1), num_filters)

        self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
def vgg11(num_classes=1000, pretrained='imagenet'):
    """VGG 11-layer model (configuration "A")
    """
    model = models.vgg11(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['vgg11'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    return model
Esempio n. 3
0
 def __init__(self) -> None:
     super().__init__()
     _vgg = vgg11()
     self._features = Sequential(
         _vgg.features,
         _vgg.avgpool,
     )
     self._projection = nn.Linear(512 * 7 * 7, 256)
     self._prediction = nn.Linear(256, 256)
def vgg11(num_classes=1000, pretrained='imagenet'):
    """VGG 11-layer model (configuration "A")
    """
    model = models.vgg11(pretrained=False)
    if pretrained is not None:
        settings = pretrained_settings['vgg11'][pretrained]
        model = load_pretrained(model, num_classes, settings)
    model = modify_vggs(model)
    return model
def default_model():
    """
    Build default model.
    """
    network = models.vgg11(pretrained=True)
    input_size = get_input_size(network)
    default_classifier = Classifier(input_size, 102, [12544, 4096], [0.1, 0.1])
    network.classifier = default_classifier
    return network
    def __init__(self,
                 arch: NetworkArchitectures = NetworkArchitectures.VGG16,
                 learning_rate: float = 0.0001,
                 dropout_rate: float = 0.2,
                 input_size: int = 25088,
                 hidden_units: Tuple = (12544, ),
                 output_size: int = 102,
                 model_state_dict: Dict = None,
                 epochs: int = 0,
                 class_to_idx: Dict = None,
                 criterion=nn.NLLLoss()):
        """
		Constructor for a network.
		:param arch: the network architecture
		:param learning_rate: the learning rate used when training the network
		:param dropout_rate: the dropout rate used when training the network
		:param input_size: the input size of the classifier
		:param hidden_units: the number of nodes in the classifier hidden layer
		:param output_size: the output size of the classifier (should equal the number of categories)
		:param model_state_dict: the state_dict of the model; used for saving & loading training progress
		:param epochs: the number of epochs this network has been trained
		:param class_to_idx: a dict of classes to indices (usually taken from a training image dataset)
		:param criterion: function to calculate loss
		"""
        self.arch = arch
        self.learning_rate = learning_rate
        self.dropout_rate = dropout_rate
        self.input_size = input_size
        self.hidden_units = hidden_units
        self.output_size = output_size
        self.epochs = epochs
        self.class_to_idx = class_to_idx
        self.criterion = criterion

        # Build the model using transfer learning, basing it off of the specified input architecture
        if arch == NetworkArchitectures.VGG11:
            self.model = models.vgg11(pretrained=True)
        elif arch == NetworkArchitectures.VGG13:
            self.model = models.vgg13(pretrained=True)
        elif arch == NetworkArchitectures.VGG16:
            self.model = models.vgg16(pretrained=True)
        elif arch == NetworkArchitectures.VGG19:
            self.model = models.vgg19(pretrained=True)
        else:
            raise ValueError('Invalid Network Architecture: {}'.format(arch))

        # Freeze pre-trained parameters so we don't backpropagate through them
        for param in self.model.parameters():
            param.requires_grad = False
        self.model.classifier = self.create_classifier()

        self.optimizer = optim.Adam(self.model.classifier.parameters(),
                                    lr=learning_rate)

        if model_state_dict:
            self.model_state_dict = model_state_dict
            self.model.load_state_dict(model_state_dict)
def load_checkpoints(path):
    lcheckpoint = torch.load(path)
    
    if checkpoint['arch'] == 'vgg16':
        model = models.vgg16(pretrained=True)
    elif checkpoint['arch'] == 'vgg19':
        model = models.vgg19(pretrained=True)
    else checkpoint['arch'] == 'vgg11':
        model = models.vgg11(pretrained=True)
Esempio n. 8
0
def get_vgg11(class_num):
    model = models.vgg11(pretrained=True)
    set_parameter_requires_grad(model)
    model.name = 'vgg11'

    n_inputs = model.classifier[6].in_features
    model.classifier[6] = nn.Linear(n_inputs, class_num)

    return model, 224
Esempio n. 9
0
def get_torchvision_model(arch):
    if arch == 'vgg11':
        return models.vgg11(pretrained=True)
    elif arch == 'vgg16':
        return models.vgg16(pretrained=True)
    elif arch == 'alexnet':
        return models.alexnet(pretrained=True)
    else:
        raise Exception('{} is not supported architecture'.format(arch))
Esempio n. 10
0
def model_create(arch, hidden_units):

    #if arch entered is vgg13
    if arch == 'vgg13':

        model = models.vgg13(pretrained=True)

        for param in model.features.parameters():
            param.requires_grad = False

        #if hidden units are provided
        if hidden_units:

            classifier = nn.Sequential(
                nn.Linear(25088, 512), nn.ReLU(), nn.Dropout(p=0.2),
                nn.Linear(512, hidden_units), nn.ReLU(), nn.Dropout(p=0.2),
                nn.Linear(hidden_units, len(cat_to_name)),
                nn.LogSoftmax(dim=1))
        #if hidden_units are not provided, default it to 256
        else:
            classifier = nn.Sequential(nn.Linear(25088, 512), nn.ReLU(),
                                       nn.Dropout(p=0.2), nn.Linear(512, 256),
                                       nn.ReLU(), nn.Dropout(p=0.2),
                                       nn.Linear(256, len(cat_to_name)),
                                       nn.LogSoftmax(dim=1))

    #Else Loading the pre-trained model from PyTorch vgg11
    else:
        arch = 'vgg11'
        model = models.vgg11(pretrained=True)

        #Freeze training for all "features" layers

        for param in model.features.parameters():

            param.requires_grad = False

        #if hidden units are provided
        if hidden_units:
            classifier = nn.Sequential(
                nn.Linear(25088, 512), nn.ReLU(), nn.Dropout(p=0.2),
                nn.Linear(512, hidden_units), nn.ReLU(), nn.Dropout(p=0.2),
                nn.Linear(hidden_units, len(cat_to_name)),
                nn.LogSoftmax(dim=1))
        #if hidden_units are not provided, default it to 256
        else:
            classifier = nn.Sequential(nn.Linear(25088, 512), nn.ReLU(),
                                       nn.Dropout(p=0.2), nn.Linear(512, 256),
                                       nn.ReLU(), nn.Dropout(p=0.2),
                                       nn.Linear(256, len(cat_to_name)),
                                       nn.LogSoftmax(dim=1))

    #Replacing the classifier in the pre-trained classifier with our classifier
    model.classifier = classifier

    return model, arch
Esempio n. 11
0
    def __init__(self, model_name, code_length, pretrained=True):
        super(CNNNet, self).__init__()
        if model_name == "alexnet":
            original_model = models.alexnet(pretrained)
            self.features = original_model.features
            cl1 = nn.Linear(256 * 6 * 6, 4096)
            cl2 = nn.Linear(4096, 4096)
            if pretrained:
                cl1.weight = original_model.classifier[1].weight
                cl1.bias = original_model.classifier[1].bias
                cl2.weight = original_model.classifier[4].weight
                cl2.bias = original_model.classifier[4].bias

            self.classifier = nn.Sequential(
                nn.Dropout(),
                cl1,
                nn.ReLU(inplace=True),
                nn.Dropout(),
                cl2,
                nn.ReLU(inplace=True),
                nn.Linear(4096, code_length),
                nn.Tanh()
            )
            self.model_name = 'alexnet'

        if model_name == "vgg11":
            original_model = models.vgg11(pretrained)
            self.features = original_model.features
            cl1 = nn.Linear(25088, 4096)

            cl2 = nn.Linear(4096, 4096)
            if pretrained:
                cl1.weight = original_model.classifier[0].weight
                cl1.bias = original_model.classifier[0].bias
                cl2.weight = original_model.classifier[3].weight
                cl2.bias = original_model.classifier[3].bias

            self.classifier = nn.Sequential(
                cl1,
                nn.ReLU(inplace=True),
                nn.Dropout(),
                cl2,
                nn.ReLU(inplace=True),
                nn.Dropout(),
                nn.Linear(4096, code_length),
                nn.Tanh()
            )
            self.model_name = 'vgg11'
        if model_name == 'resnet50':
            original_model = models.resnet50(pretrained)
            self.features = nn.Sequential(*list(original_model.children())[:-1])
            self.classifier = nn.Sequential(
                nn.Linear(2048, code_length),
                nn.Tanh()
            )
            self.model_name = 'resnet50'
Esempio n. 12
0
    def __init__(self, name, nclasses=40, pretraining=True, cnn_name='vgg11'):
        super(SVCNN, self).__init__(name)

        self.classnames = [
            'alexandrium', 'dinophysis', 'emiliania', 'gambier', 'larvezoe',
            'ornithocercus', 'pluteuslarvae'
        ]

        self.nclasses = nclasses
        self.pretraining = pretraining
        self.cnn_name = cnn_name
        self.use_resnet = cnn_name.startswith('resnet')
        self.mean = Variable(torch.FloatTensor([0.485, 0.456, 0.406]),
                             requires_grad=False).cuda()
        self.std = Variable(torch.FloatTensor([0.229, 0.224, 0.225]),
                            requires_grad=False).cuda()

        if self.use_resnet:
            if self.cnn_name == 'resnet18':
                self.net = models.resnet18(pretrained=self.pretraining)
                self.net.fc = nn.Linear(512, self.nclasses)
            elif self.cnn_name == 'resnet34':
                self.net = models.resnet34(pretrained=self.pretraining)
                self.net.fc = nn.Linear(512, self.nclasses)
            elif self.cnn_name == 'resnet50':
                self.net = models.resnet50(pretrained=self.pretraining)
                self.net.fc = nn.Linear(2048, self.nclasses)
        else:
            if self.cnn_name == 'alexnet':
                self.net_1 = models.alexnet(
                    pretrained=self.pretraining).features
                self.net_2 = models.alexnet(
                    pretrained=self.pretraining).classifier
            elif self.cnn_name == 'vgg11':
                self.net_1 = models.vgg11(pretrained=self.pretraining).features
                self.net_2 = models.vgg11(
                    pretrained=self.pretraining).classifier
            elif self.cnn_name == 'vgg16':
                self.net_1 = models.vgg16(pretrained=self.pretraining).features
                self.net_2 = models.vgg16(
                    pretrained=self.pretraining).classifier

            self.net_2._modules['6'] = nn.Linear(4096, self.nclasses)
def vgg11(num_classes=1000, pretrained='imagenet'):
    """VGG 11-layer model (configuration "A")
    """
    model = models.vgg11(pretrained=False)
    settings = pretrained_settings['vgg11']['imagenet']
    if pretrained is not None:
        model = load_pretrained(model, 1000, settings)
    model = VGG(model, num_classes=num_classes, settings=settings)

    return model
    def __init__(self, nc=3):
        super().__init__()
        self.nc = nc
        self.input = nn.Sequential(
            Normalization(mean=torch.Tensor([0.485, 0.456, 0.406]),
                          std=torch.Tensor([0.229, 0.224, 0.225])),
            nn.AdaptiveAvgPool2d(224))

        self.vgg = models.vgg11(pretrained=True)
        self.vgg.classifier = self.vgg.classifier[:-1]
Esempio n. 15
0
def vgg_11(pretrain=True, input_channel=3, num_classes=1):
    net = vgg11(pretrain)
    if input_channel != 3:
        net.features[0] = nn.Conv2d(in_channels=input_channel,
                                    out_channels=64,
                                    kernel_size=(3, 3),
                                    stride=(1, 1),
                                    padding=(1, 1))
    net.classifier[6] = nn.Linear(4096, num_classes)
    return net
Esempio n. 16
0
    def __init__(self, *args, **kwargs):
        super(DilatedDecodingNet, self).__init__(*args, **kwargs)

        self.features = models.vgg11(pretrained=True)
        self.features.eval()
        self.classifier = nn.Linear(512**2, TARGET_SIZE * 2)
        self.gram = GramMatrix()

        if USE_CUDA:
            self.cuda()
def vgg11():
    vgg = models.vgg11(pretrained=True)
    vgg.eval()
    for batch in 1, 2, 4, 8, 16, 32, 64:
        filename = 'vgg11i' + str(batch) + '.onnx'
        print(filename)
        torch.onnx.export(vgg,
                          torch.randn(batch, 3, 224, 224),
                          filename,
                          keep_initializers_as_inputs=True)
Esempio n. 18
0
 def __init__(self, classes=10, pretrained=False):
     super().__init__()
     vgg = models.vgg11(pretrained=pretrained)
     self.out_features = 4096
     self.features = vgg.features
     self.avgpool = vgg.avgpool
     self.classifier = nn.Sequential(
         *list(vgg.classifier._modules.values())[:-1],
         nn.Linear(self.out_features, classes),
     )
Esempio n. 19
0
def CreateModel(model_name, bit, use_gpu):
    if model_name == 'vgg11':
        vgg11 = models.vgg11(pretrained=True)
        cnn_model = CNN_model.cnn_model(vgg11, model_name, bit)
    if model_name == 'alexnet':
        alexnet = models.alexnet(pretrained=True)
        cnn_model = CNN_model.cnn_model(alexnet, model_name, bit)
    if use_gpu:
        cnn_model = cnn_model.cuda()
    return cnn_model
Esempio n. 20
0
 def __init__(self, requires_grad: bool = False):
     super().__init__()
     vgg16 = models.vgg11(pretrained=True)
     self.vgg_pretrained_features = vgg16.features
     self.avgpool = vgg16.avgpool
     self.classifier = vgg16.classifier
     if not requires_grad:
         for parameter in self.parameters():
             parameter.requires_grad = False
     self.out_features = np.prod(self(torch.zeros(1, 3, 224, 224)).shape)
Esempio n. 21
0
def CreateModel(model_name, bit, use_gpu):
    if model_name == 'vgg11':
        vgg11 = models.vgg11(pretrained=True)
        hashnet = IDHashGAN_models.Hash_model(vgg11, model_name, bit)
    if model_name == 'alexnet':
        alexnet = models.alexnet(pretrained=True)
        hashnet = IDHashGAN_models.Hash_model(alexnet, model_name, bit)
    if use_gpu:
        hashnet = hashnet.cuda()
    return hashnet
Esempio n. 22
0
def main():
    # embedded = Generator()
    # embedded.load_state_dict(torch.load("embedded.pt"))
    res18_model = models.vgg11(pretrained=True)
    embedded = ResNet50Bottom(res18_model)
    for para in res18_model.parameters():
        para.requires_grad = False
    policy = MarioPolicyNetwork()
    policy.load_state_dict(torch.load("policy.pt"))
    render(policy, embedded, 'cpu')
Esempio n. 23
0
    def __init__(self):
        super(ModifiedVGG16Model, self).__init__()

        model = models.vgg11(pretrained=True)
        self.features = model.features

        for param in self.features.parameters():
            param.requires_grad = False

        self.classifier = model.classifier
Esempio n. 24
0
    def __init__(self, arch='vgg11', hidden_units=2048, device='cpu'):
        """Initialize Network

        Args:
            hidden_units: int (>0). classifier hidden units.
            device: string ('cpu' or 'gpu').
        """
        if arch not in ['vgg11', 'vgg13', 'vgg16', 'vgg19', 'vgg11_bn', 'vgg13_bn', 'vgg16_bn', 'vgg19_bn']:
            raise ValueError("an architecture " + str(arch) + " is unsupported")
        else:
            if arch == 'vgg11':
                self.__model = models.vgg11(pretrained=True)
            elif arch == 'vgg13':
                self.__model = models.vgg13(pretrained=True)
            elif arch == 'vgg16':
                self.__model = models.vgg16(pretrained=True)
            elif arch == 'vgg19':
                self.__model = models.vgg19(pretrained=True)
            elif arch == 'vgg11_bn':
                self.__model = models.vgg11_bn(pretrained=True)
            elif arch == 'vgg13_bn':
                self.__model = models.vgg13_bn(pretrained=True)
            elif arch == 'vgg16_bn':
                self.__model = models.vgg16_bn(pretrained=True)
            elif arch == 'vgg19_bn':
                self.__model = models.vgg19_bn(pretrained=True)

        # Freeze parameters
        for parameter in self.__model.parameters():
            parameter.required_grad = False

        # Define classifier
        classifier = torch.nn.Sequential(OrderedDict([
            ('fc1', torch.nn.Linear(25088, hidden_units)),
            ('relu1', torch.nn.ReLU(inplace=True)),
            ('drop1', torch.nn.Dropout(0.5)),
            ('fc2', torch.nn.Linear(hidden_units, 102)),
            ('output', torch.nn.LogSoftmax(dim=1))
        ]))

        # Replace classifier
        self.__model.classifier = classifier

        # Set device
        self.__device = device
        self.__model.to(self.__device)

        # Set criterion
        self.__criterion = torch.nn.NLLLoss()

        # Set optimizer
        self.__optimizer = torch.optim.Adam(self.__model.classifier.parameters())

        # Set start epoch for continuous training
        self.__current_epoch = 0
def get_classifier(classifier,
                   pretrained=True,
                   resnet34_8x_file=None,
                   num_classes=10):
    if classifier == "none":
        return NullTeacher(num_classes=num_classes)
    else:
        raise ValueError("Only Null Teacher should be used")
    if classifier == 'vgg11_bn':
        return vgg11_bn(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'vgg13_bn':
        return vgg13_bn(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'vgg16_bn':
        return vgg16_bn(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'vgg19_bn':
        return vgg19_bn(pretrained=pretrained, num_classes=num_classes)
    if classifier == 'vgg11':
        return models.vgg11(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'vgg13':
        return models.vgg13(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'vgg16':
        return models.vgg16(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'vgg19':
        return models.vgg19(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'resnet18':
        return resnet18(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'resnet34':
        return resnet34(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'resnet50':
        return resnet50(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'densenet121':
        return densenet121(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'densenet161':
        return densenet161(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'densenet169':
        return densenet169(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'mobilenet_v2':
        return mobilenet_v2(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'googlenet':
        return googlenet(pretrained=pretrained, num_classes=num_classes)
    elif classifier == 'inception_v3':
        return inception_v3(pretrained=pretrained, num_classes=num_classes)
    elif classifier == "resnet34_8x":
        net = network.resnet_8x.ResNet34_8x(num_classes=num_classes)
        if pretrained:
            if resnet34_8x_file is not None:
                net.load_state_dict(torch.load(resnet34_8x_file))
            else:
                raise ValueError(
                    "Cannot load pretrained resnet34_8x from here")

        return net

    else:
        raise NameError(f'Please enter a valid classifier {classifier}')
Esempio n. 26
0
    def __init__(self, num_filters=32, pretrained=False):
        """
        :param num_classes:
        :param num_filters:
        :param pretrained:
            False - no pre-trained network used
            vgg - encoder pre-trained with VGG11
            carvana - all weights pre trained on
                Kaggle: Carvana dataset https://www.kaggle.com/c/carvana-image-masking-challenge

        """
        super().__init__()
        self.pool = nn.MaxPool2d(2, 2)

        if pretrained == 'vgg':
            self.encoder = models.vgg11(pretrained=True).features
        else:
            self.encoder = models.vgg11(pretrained=False).features

        self.relu = self.encoder[1]
        self.conv1 = self.encoder[0]
        self.conv2 = self.encoder[3]
        self.conv3s = self.encoder[6]
        self.conv3 = self.encoder[8]
        self.conv4s = self.encoder[11]
        self.conv4 = self.encoder[13]
        self.conv5s = self.encoder[16]
        self.conv5 = self.encoder[18]

        self.center = DecoderBlock(num_filters * 8 * 2, num_filters * 8 * 2,
                                   num_filters * 8)
        self.dec5 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2,
                                 num_filters * 8)
        self.dec4 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2,
                                 num_filters * 4)
        self.dec3 = DecoderBlock(num_filters * (8 + 4), num_filters * 4 * 2,
                                 num_filters * 2)
        self.dec2 = DecoderBlock(num_filters * (4 + 2), num_filters * 2 * 2,
                                 num_filters)
        self.dec1 = ConvRelu(num_filters * (2 + 1), num_filters)

        self.final = nn.Conv2d(num_filters, 1, kernel_size=1)
Esempio n. 27
0
def build_model(arch, inputs, hidden_units, output, rate):
    """
    This function builds the model to be used in training or predition using
    the parameters passed
    Params:
        arch - Model architecture/type to be used
        hidden_units - Number of hidden units to be used in classifier
    Returns:
        model - Model with replace classifier and freezed gradients
    """
    fc1_input = inputs if inputs else 25088
    fc2_output = output if output else 102
    dp_rate = rate if rate else 0.2

    #Check if arch is valid and load the correct model
    model_options = ["vgg11", "vgg13", "vgg19"]
    if arch in model_options:
        print("Building model ---> arch: {}".format(arch))
        if arch == "vgg19":
            model = models.vgg19(pretrained=True)
        elif arch == "vgg13":
            model = models.vgg13(pretrained=True)
        else:
            model = models.vgg11(pretrained=True)
    else:
        print("Invalid model: {} --> Using arch: vgg11".format(arch))
        model = models.vgg11(pretrained=True)

    #Freezes gradients
    for param in model.parameters():
        param.requires_grad = False

    #Build classifier and replace it in model
    classifier = nn.Sequential(
        OrderedDict([("fc1", nn.Linear(fc1_input, hidden_units)),
                     ("relu", nn.ReLU()), ("drop", nn.Dropout(p=dp_rate)),
                     ("fc2", nn.Linear(hidden_units, fc2_output)),
                     ("output", nn.LogSoftmax(dim=1))]))

    model.classifier = classifier

    return model
Esempio n. 28
0
 def __init__(self):
     super().__init__()
     self.features = models.vgg11(pretrained=True).features
     self.regressor = torch.nn.Sequential(
         torch.nn.Linear(512 * 7 * 7, units1),
         torch.nn.Sigmoid(),
         torch.nn.Linear(units1, units2),
         torch.nn.Sigmoid(),
         torch.nn.Linear(units2, 1),
         torch.nn.Sigmoid()
     )
Esempio n. 29
0
    def __init__(self, num_classes=1, num_filters=32, pretrained=False):
        """
        :param num_classes:
        :param num_filters:
        :param pretrained:
            False - no pre-trained network used
            True - encoder pre-trained with VGG11
        """
        super().__init__()
        self.pool = nn.MaxPool2d(2, 2)

        self.num_classes = num_classes

        self.encoder = models.vgg11(pretrained=pretrained).features

        self.relu = nn.ReLU(inplace=True)
        self.conv1 = nn.Sequential(self.encoder[0], self.relu)

        self.conv2 = nn.Sequential(self.encoder[3], self.relu)

        self.conv3 = nn.Sequential(
            self.encoder[6],
            self.relu,
            self.encoder[8],
            self.relu,
        )
        self.conv4 = nn.Sequential(
            self.encoder[11],
            self.relu,
            self.encoder[13],
            self.relu,
        )

        self.conv5 = nn.Sequential(
            self.encoder[16],
            self.relu,
            self.encoder[18],
            self.relu,
        )

        self.center = DecoderBlockUpsample(256 + num_filters * 8,
                                           num_filters * 8 * 2,
                                           num_filters * 8)
        self.dec5 = DecoderBlockUpsample(512 + num_filters * 8,
                                         num_filters * 8 * 2, num_filters * 8)
        self.dec4 = DecoderBlockUpsample(512 + num_filters * 8,
                                         num_filters * 8 * 2, num_filters * 4)
        self.dec3 = DecoderBlockUpsample(256 + num_filters * 4,
                                         num_filters * 4 * 2, num_filters * 2)
        self.dec2 = DecoderBlockUpsample(128 + num_filters * 2,
                                         num_filters * 2 * 2, num_filters)
        self.dec1 = ConvRelu(64 + num_filters, num_filters)

        self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
Esempio n. 30
0
 def adjusted_VGG(self):
     model = models.vgg11(pretrained=True)
     model.classifier[0] = torch.nn.Linear(in_features=25088, out_features=1024, bias=True)
     model.classifier[2] = torch.nn.Dropout(0.2)
     model.classifier[3] = torch.nn.Linear(in_features=1024, out_features=256, bias=True)
     model.classifier[5] = torch.nn.Dropout(0.2)
     model.classifier[6] = torch.nn.Linear(in_features=256, out_features=2, bias=True)
     for param in model.features.parameters():
         param.requires_grad = False
     #print(model)
     return model
Esempio n. 31
0
def vgg11_FlowerModel(nhu):
    model = models.vgg11(pretrained=True)
    model.name = "vgg"
    for param in model.parameters():
        param.requires_grad = False
    classifier = nn.Sequential(
        OrderedDict([('fc1', nn.Linear(25088, nhu)), ('relu', nn.ReLU()),
                     ('fc2', nn.Linear(nhu, 102)),
                     ('output', nn.LogSoftmax(dim=1))]))
    model.classifier = classifier
    return model
Esempio n. 32
0
    def __init__(self, num_filters=32, num_bands=8, pretrained=False):
        """
        :param num_classes:
        :param num_filters:
        :param pretrained:
            False - no pre-trained network is used
            True  - encoder is pre-trained with VGG11
        """
        super().__init__()
        self.pool = nn.MaxPool2d(2, 2)

        self.encoder = models.vgg11(pretrained=pretrained).features
        self.relu = self.encoder[1]

        # change the encoder[0] to support many bands
        self.encoder[0] = nn.Sequential(
            nn.Conv2d(num_bands,
                      64,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)))

        # changed this one
        print('num_bands is ', num_bands)
        self.conv1 = nn.Sequential(
            nn.Conv2d(num_bands,
                      64,
                      kernel_size=(3, 3),
                      stride=(1, 1),
                      padding=(1, 1)),  # support for multiple bands
            self.relu)

        self.relu = self.encoder[1]
        self.conv1 = self.encoder[0]
        self.conv2 = self.encoder[3]
        self.conv3s = self.encoder[6]
        self.conv3 = self.encoder[8]
        self.conv4s = self.encoder[11]
        self.conv4 = self.encoder[13]
        self.conv5s = self.encoder[16]
        self.conv5 = self.encoder[18]
        self.center = DecoderBlock(num_filters * 8 * 2, num_filters * 8 * 2,
                                   num_filters * 8)
        self.dec5 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2,
                                 num_filters * 8)
        self.dec4 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2,
                                 num_filters * 4)
        self.dec3 = DecoderBlock(num_filters * (8 + 4), num_filters * 4 * 2,
                                 num_filters * 2)
        self.dec2 = DecoderBlock(num_filters * (4 + 2), num_filters * 2 * 2,
                                 num_filters)
        self.dec1 = ConvRelu(num_filters * (2 + 1), num_filters)

        self.final = nn.Conv2d(num_filters, 1, kernel_size=1)