Esempio n. 1
0
    def __init__(self, args):

        self.total_model_dic = {}
        #criterion means 기준
        #KL Loss
        self.kl_criterion = nn.KLDivLoss().cuda()
        self.total_loss_dic = {}
        self.kl_loss_dic = {}
        self.cross_loss_dic = {}
        self.model_output_dic = {}
        #CrossEntropyLoss
        self.criterion = nn.CrossEntropyLoss().cuda()
        self.model_name = args.model_name
        self.lr = args.lr
        self.epochs = args.epochs
        self.scope = args.model_number
        if (self.model_name == 'mobilenet'):
            for i in range(self.scope):
                self.total_model_dic["{0}".format(i)] = models.MobileNetV2()
                self.cross_loss_dic["{0}".format(i)] = 0
                self.model_output_dic["{0}".format(i)] = 0
                self.kl_loss_dic["{0}".format(i)] = 0
                self.total_loss_dic["{0}".format(i)] = 0

        print(self.total_model_dic.keys())
        self.optimizer = torch.optim.Adam(
            self.total_model_dic[str(0)].parameters(),
            lr=self.lr,
            betas=(0.5, 0.999))
Esempio n. 2
0
 def __init__(self, output_dim="2048"):
     super(ImageNetResNetFeature, self).__init__()
     resnet = models.MobileNetV2()
     n_layers_to_rm = 0
     # resnet = models.resnet152(pretrained=True)
     # if output_dim == "2048":
     #     n_layers_to_rm = 1  # remove last fc layer
     # elif output_dim == "2048x7x7":
     #     n_layers_to_rm = 2  # remove last fc layer and its precedent 7x7 avg pooling layer
     # else:
     #     raise ValueError("Wrong value for argument output_dim")
     self.feature = nn.Sequential(
         *list(resnet.children())[:-n_layers_to_rm])
Esempio n. 3
0
    def test_shape(self):
        model = mobileNetV2Class(num_classes=2, pretrain=True)
        std_model = models.MobileNetV2(num_classes=2)

        x = torch.randn((2, 3, 224, 224), dtype=torch.float32)

        y_std_model = std_model(x)
        y_std_features = std_model.features(x)

        y_model = model(x)
        y_features = model.features(x)

        self.assertEqual(list(y_model.shape), list(y_std_model.shape))
        self.assertEqual(list(y_features.shape), list(y_std_features.shape))
Esempio n. 4
0
    def __init__(self):
        super(MobileNet, self).__init__()
        self.model = models.MobileNetV2()

        self.model._modules['features'][18] = nn.Sequential(nn.ConvTranspose2d(320, 150, (3, 3), (2, 2), padding=(1, 1), output_padding=(1, 1)),
                                                            nn.BatchNorm2d(150),
                                                            nn.ReLU(),
                                                            nn.ConvTranspose2d(150, 150, kernel_size=(3, 3), stride=(2, 2),padding=(1, 1), output_padding=(1, 1)),
                                                            nn.BatchNorm2d(150),
                                                            nn.ReLU(),
                                                            nn.ConvTranspose2d(150, 150, kernel_size=(3, 3), stride=(2, 2),padding=(1, 1), output_padding=(1, 1)),
                                                            nn.BatchNorm2d(150),
                                                            nn.ReLU(),
                                                            nn.Conv2d(150, 14, kernel_size=(1, 1), stride=(1, 1)))
        self.model.classifier = nn.Sequential()
Esempio n. 5
0
def mobilenet_v2(pretrained=False, progress=True, map_location=None, **kwargs):
    """
    Constructs a MobileNetV2 architecture from
    `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    model = models.MobileNetV2(**kwargs)
    if pretrained:
        state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
                                              progress=progress,
                                              map_location=map_location)
        model.load_state_dict(state_dict)
    return model
Esempio n. 6
0
 def build_model(self, pretrained=True):
     self.pretrained = pretrained
     model = models.MobileNetV2(pretrained=pretrained)
     self.set_model(model, self.param_layer_ids, self.default_prune_factors)
def initialize_model(model_name,
                     embedding_dim,
                     feature_extracting,
                     use_pretrained=True):
    if model_name == "densenet161":
        model_ft = models.densenet161(pretrained=use_pretrained,
                                      memory_efficient=True)
        set_parameter_requires_grad(model_ft, feature_extracting)

        #print(model_ft)
        num_features = model_ft.classifier.in_features
        print(num_features)
        #print(embedding_dim)
        model_ft.classifier = nn.Linear(num_features, embedding_dim)
        #print(model_ft)
        if model_name == "densenet169":
            model_ft = models.densenet161(pretrained=use_pretrained,
                                          memory_efficient=True)
            set_parameter_requires_grad(model_ft, feature_extracting)

            #print(model_ft)
            num_features = model_ft.classifier.in_features
            print(num_features)
            #print(embedding_dim)
            model_ft.classifier = nn.Linear(num_features, embedding_dim)
            #print(model_ft)
    if model_name == "densenet121":
        model_ft = models.densenet121(pretrained=use_pretrained,
                                      memory_efficient=True)
        set_parameter_requires_grad(model_ft, feature_extracting)

        #print(model_ft)
        num_features = model_ft.classifier.in_features
        print(num_features)
        #print(embedding_dim)
        model_ft.classifier = nn.Linear(num_features, embedding_dim)
        #print(model_ft)
    if model_name == "densenet201":
        model_ft = models.densenet201(pretrained=use_pretrained,
                                      memory_efficient=True)
        set_parameter_requires_grad(model_ft, feature_extracting)

        #print(model_ft)
        num_features = model_ft.classifier.in_features
        print(num_features)
        #print(embedding_dim)
        model_ft.classifier = nn.Linear(num_features, embedding_dim)
        #print(model_ft)
    elif model_name == "resnet101":
        model_ft = models.resnet101(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extracting)
        num_features = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_features, embedding_dim)
        print(model_ft)
    elif model_name == "resnet34":
        model_ft = models.resnet34(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extracting)
        num_features = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_features, embedding_dim)
        print(model_ft)
    elif model_name == "adl_resnet50":
        model_ft = adl_resnet50(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extracting)
        num_features = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_features, embedding_dim)
        print(model_ft)

    elif model_name == "inceptionv3":
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extracting)
        num_features = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_features, embedding_dim)
    elif model_name == "seresnext":
        model_ft = se_resnext101_32x4d(num_classes=1000)
        set_parameter_requires_grad(model_ft, feature_extracting)
        num_features = model_ft.last_linear.in_features
        model_ft.last_linear = nn.Linear(num_features, embedding_dim)
    elif model_name == "seresnext50":
        model_ft = se_resnext50_32x4d(num_classes=1000)
        set_parameter_requires_grad(model_ft, feature_extracting)
        num_features = model_ft.last_linear.in_features
        model_ft.last_linear = nn.Linear(num_features, embedding_dim)
    elif model_name == "googlenet":
        model_ft = models.googlenet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extracting)
        #print(model_ft)
        num_features = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_features, embedding_dim)

    elif model_name == "mobilenet2":
        model_ft = models.MobileNetV2(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extracting)

        num_features = model_ft.classifier[1].in_features
        model_ft.classifier[1] = nn.Linear(num_features, embedding_dim)
        print(model_ft)

    elif model_name == "mnasnet":
        model_ft = mnasnet1_0(pretrained=use_pretrained)
        #model_ft = MnasNet()
        set_parameter_requires_grad(model_ft, feature_extracting)

        print(model_ft.classifier[0])
        print(model_ft.classifier[1])

        num_features = model_ft.classifier[1].in_features
        print(num_features)
        print(embedding_dim)
        #model_ft.classifier[0] = nn.Dropout(p=0.2, inplace=False)
        model_ft.classifier[1] = nn.Linear(num_features, embedding_dim)
        print(model_ft)
    elif model_name == "adl_googlenet":
        model_ft = GoogLeNet()
        set_parameter_requires_grad(model_ft, feature_extracting)
        print(model_ft)
        num_features = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_features, embedding_dim)

    else:
        raise ValueError

    return model_ft
Esempio n. 8
0
import torch.nn.functional as F
from torchvision import transforms
from PIL import Image
import sys
import torchvision.models as models
import torchsummary

if torch.cuda.is_available():
    device = torch.device('cuda')
else:
    device = torch.device('cpu')

# with open("C:/Users/hyuno/Desktop/data/bin.json") as f:
#     labels = json.load(f)

net = models.MobileNetV2(num_classes=2)
print(net.eval())

# data_transform = {
#     'train' : transforms.Compose([
#         transforms.RandomSizedCrop(224),
#         transforms.RandomHorizontalFlip(),
#         transforms.ToTensor(),
#         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
#     ]),
#
# }

#
# datasetPath = 'C:/Users/hyuno/Desktop/data'
#
Esempio n. 9
0
#28344882 28344882
#Model parameters:  28344882
#
#
#EfficientNet b6  has input image size:  528
#40740314 40740314
#Model parameters:  40740314
#
#
#EfficientNet b7  has input image size:  600
#63792082 63792082
#Model parameters:  63792082

from torchvision import models

net = models.MobileNetV2()
count_parameters(net)

net = models.densenet201()
count_parameters(net)

net = models.inception_v3()
count_parameters(net)
#[27161264, 27161264]

net = models.googlenet()
count_parameters(net)
#[13004888, 13004888]

net = models.vgg19_bn()
count_parameters(net)
    transform=transforms.Compose([
        transforms.Resize(224),
        transforms.ToTensor(),
        normalize,
    ]))
val_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=test_batch,
                                         shuffle=False,
                                         num_workers=num_workers['val'],
                                         pin_memory=True)

dataset_sizes = {'train': len(train_dataset), 'val': len(val_dataset)}

dataloaders = {'train': train_loader, 'val': val_loader}

model_ft = models.MobileNetV2()

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_ft = model_ft.to(device)
print(sum(p.numel() for p in model_ft.parameters() if p.requires_grad))

#Loss Function
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
Init_lr = 0.1
optimizer_ft = optim.SGD(model_ft.parameters(), lr=Init_lr, momentum=0.9)


def adjust_learning_rate(optimizer, epoch, Init_lr):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    lr = Init_lr * (0.1**(epoch // 30))
Esempio n. 11
0
import model_loads as lo
import torchvision.models as models

model = models.MobileNetV2()
model_path = "../examples/models/pth/mobilenet_v2-b0353104.pth"
lo.load_models(model_path, model, use_gpu=True)
print(model)
print(type(model))

from models.tar.mobilenet_v2 import MobileNetV2

model = MobileNetV2()
model_path = "models/tar/checkpoint.pth.tar"
# load_multi_gpu_models2_single_gpu(model_path, model)
lo.load_models(model_path, model)
print(model)

import os

os.environ["CUDA_VISIBLE_DEVICES"] = ""

model = models.MobileNetV2()
model_path = "models/pth/mobilenet_v2-b0353104.pth"
lo.load_models(model_path, model, use_gpu=True)
print(model)
print(type(model))