Ejemplo n.º 1
0
    def __init__(self,
                 subtype='squeezenet1_1',
                 out_stages=[1, 2, 3],
                 backbone_path=None):
        super(SqueezeNet, self).__init__()
        self.out_stages = out_stages
        self.backbone_path = backbone_path

        if subtype == 'squeezenet1_1':
            features = squeezenet1_1(
                pretrained=not self.backbone_path).features
            self.out_channels = [96, 128, 256, 512]
        else:
            raise NotImplementedError

        self.out_channels = self.out_channels[self.out_stages[0]:self.
                                              out_stages[-1] + 1]

        self.conv1 = nn.Sequential(*list(features.children())[0:2])
        self.layer1 = nn.Sequential(*list(features.children())[2:5])
        self.layer2 = nn.Sequential(*list(features.children())[5:8])
        self.layer3 = nn.Sequential(*list(features.children())[8:13])

        if self.backbone_path:
            self.features.load_state_dict(torch.load(self.backbone_path))
        else:
            self.init_weights()
def squeezenet(pretrained=True):
    model = SqueezeNet()
    if pretrained:
        from torchvision.models.squeezenet import squeezenet1_1
        source_state = squeezenet1_1(pretrained=True).features.state_dict()
        load_weights_sequential(model, source_state)
    return model
Ejemplo n.º 3
0
    def __init__(self, pretrained=False):
        super(SqueezeNet, self).__init__()

        self.feat_1 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),
            nn.ReLU(inplace=True)
        )
        self.feat_2 = nn.Sequential(
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
            Fire(64, 16, 64, 64),
            Fire(128, 16, 64, 64)
        )
        self.feat_3 = nn.Sequential(
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
            Fire(128, 32, 128, 128, 2),
            Fire(256, 32, 128, 128, 2)
        )
        self.feat_4 = nn.Sequential(
            Fire(256, 48, 192, 192, 4),
            Fire(384, 48, 192, 192, 4),
            Fire(384, 64, 256, 256, 4),
            Fire(512, 64, 256, 256, 4)
        )
        if pretrained:
            weights = squeezenet1_1(pretrained=True).features.state_dict()
            load_weights_sequential(self, weights)
Ejemplo n.º 4
0
    def __init__(self, feature_levels=(3, 4, 5), pretrained=True):
        super().__init__()
        _check_levels(feature_levels)
        self.forward_levels = tuple(range(1, feature_levels[-1] + 1))
        self.feature_levels = feature_levels
        from torchvision.models.squeezenet import squeezenet1_1, Fire
        backbone = squeezenet1_1(pretrained=pretrained)
        del backbone.classifier
        backbone = backbone.features
        backbone[0].padding = (1, 1)

        self.layer1 = backbone[:2]
        self.layer2 = backbone[2:5]
        self.layer3 = backbone[5:8]
        self.layer4 = backbone[8:]

        if 5 in feature_levels:
            self.layer5 = nn.Sequential(
                nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
                Fire(512, 64, 256, 256),
            )

        channels = [64, 128, 256, 512, 512]

        self.out_channels = [channels[i - 1] for i in feature_levels]
Ejemplo n.º 5
0
    def __init__(self, pretrained=True):
        super(FurnitureSqueezeNet350, self).__init__()
        model = squeezenet1_1(pretrained=pretrained)
        self.features = model.features

        # Final convolution is initialized differently form the rest
        final_conv = Conv2d(512, 128, kernel_size=1)
        self.classifier = Sequential(Dropout(p=0.5), final_conv,
                                     ReLU(inplace=True), AdaptiveAvgPool2d(1))

        for m in final_conv.modules():
            normal_(m.weight, mean=0.0, std=0.01)
            if m.bias is not None:
                constant_(m.bias, 0.0)
Ejemplo n.º 6
0
def get_small_squeezenet_v1_1(num_classes):

    model = squeezenet1_1(num_classes=num_classes, pretrained=False)
    # As input image size is small 64x64, we modify first layers:
    # replace : Conv2d(3, 64, (3, 3), stride=(2, 2)) by Conv2d(3, 64, (3, 3), stride=(1, 1), padding=1)
    # replace : MaxPool2d (size=(3, 3), stride=(2, 2), dilation=(1, 1)))
    # by MaxPool2d (size=(2, 2), stride=(1, 1))
    layers = [l for i, l in enumerate(model.features)]
    layers[0] = Conv2d(3, 64, kernel_size=(3, 3), padding=1)
    layers[2] = MaxPool2d(kernel_size=2, stride=1)
    model.features = Sequential(*layers)
    # Replace the last AvgPool2d -> AdaptiveAvgPool2d
    layers = [l for l in model.classifier]
    layers[-1] = AdaptiveAvgPool2d(1)
    model.classifier = Sequential(*layers)
    return model
Ejemplo n.º 7
0
def recordSqueeze(info):
    global SKIP
    import torchvision.models.squeezenet as sqGen

    if not (SKIP and 'squeezenet1_0' in info['name_list']):
        INFO("proceeding for squeezenet1_0")
        net = sqGen.squeezenet1_0(pretrained=True).cuda()
        sum = __summary(net, [3, 224, 224], verbose=True)
        __writeInfoJSON(sum, 'squeezenet1_0')
    else:
        INFO("Skip squeezenet1_0")

    if not (SKIP and 'squeezenet1_1' in info['name_list']):
        INFO("proceeding for squeezenet1_1")
        net = sqGen.squeezenet1_1(pretrained=True).cuda()
        sum = __summary(net, [3, 224, 224], verbose=True)
        __writeInfoJSON(sum, 'squeezenet1_1')
    else:
        INFO("Skip squeezenet1_1")
Ejemplo n.º 8
0
    def __init__(self, pretrained=True, n_crops=6):
        super(FurnitureSqueezeNetOnCrops, self).__init__()
        model = squeezenet1_1(pretrained=pretrained)
        self.features = model.features
        self.crop_classifiers = []
        for i in range(n_crops):
            # Final convolution is initialized differently form the rest
            final_conv = Conv2d(512, 512, kernel_size=1, bias=False)
            self.crop_classifiers.append(
                Sequential(Dropout(p=0.5), final_conv, ReLU(inplace=True),
                           AdaptiveAvgPool2d(1)))
            for m in final_conv.modules():
                normal_(m.weight, mean=0.0, std=0.01)
                if m.bias is not None:
                    constant_(m.bias, 0.0)

        self.crop_classifiers = ModuleList(self.crop_classifiers)
        self.final_classifier = Linear(512, 128)

        for m in self.final_classifier.modules():
            normal_(m.weight, mean=0.0, std=0.01)
            if m.bias is not None:
                constant_(m.bias, 0.0)
Ejemplo n.º 9
0
from torchvision.models.squeezenet import squeezenet1_1
from PIL import Image

INFER_FOLDER  = r"C:\Users\Moses\Documents\Moses\W7\AI\ImageAI\ImageAI 1.0.2 Repo\images"
MODEL_PATH = r"C:\Users\Moses\Documents\Prime Project\AI\PyTorch\tests\squeezenet.pth"

transformations = transforms.Compose([
    transforms.Resize((224,224)),
    transforms.ToTensor(),
    transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
])

infer_set = tf.ImagesFromPaths([INFER_FOLDER],recursive=False,transformations=transformations)
infer_loader = DataLoader(infer_set,batch_size=2)

net = squeezenet1_1()

model = tf.StandardModel(net)
model.load_model(MODEL_PATH)

def predict_loader(data_loader):
    predictions = model.predict(data_loader,apply_softmax=True)
    print(len(predictions))

    for pred in predictions:
        class_index = torch.argmax(pred)
        class_name = tf.decode_imagenet(class_index)
        confidence = torch.max(pred)
        print("Prediction: {} , Accuracy: {} ".format(class_name, confidence))

def predict_image(image_path):
Ejemplo n.º 10
0
if BACKBONE == 'resnet18':
    net = resnet18(num_classes=num_classes).to(device)
elif BACKBONE == 'resnet34':
    net = resnet34(num_classes=num_classes).to(device)
elif BACKBONE == 'resnet50':
    net = resnet50(num_classes=num_classes).to(device)
elif BACKBONE == 'densenet121':
    net = densenet121(num_classes=num_classes).to(device)
elif BACKBONE == 'mobilenet_v2':
    net = mobilenet_v2(num_classes=num_classes, width_mult=1.0, inverted_residual_setting=None, round_nearest=8).to(device)
elif BACKBONE == 'shufflenet_v2_x1_5':
    net = shufflenet_v2_x1_5(num_classes=num_classes).to(device)
elif BACKBONE == 'squeezenet1_0':
    net = squeezenet1_0(num_classes=num_classes).to(device)
elif BACKBONE == 'squeezenet1_1':
    net = squeezenet1_1(num_classes=num_classes).to(device)
elif BACKBONE == 'mnasnet0_5':
    net = mnasnet0_5(num_classes=num_classes).to(device)
elif BACKBONE == 'mnasnet1_0':
    net = mnasnet1_0(num_classes=num_classes).to(device)
elif BACKBONE == 'mobilenet_v1':
    net = MobileNetV1(num_classes=num_classes).to(device)
else:
    raise Exception('unknow backbone: {}'.format(BACKBONE))

for m in net.modules():
    if isinstance(m, nn.Conv2d):
        nn.init.xavier_normal_(m.weight.data)
        if m.bias is not None:
            m.bias.data.zero_()
    elif isinstance(m, nn.BatchNorm2d):
Ejemplo n.º 11
0
def loss_selector(loss_net):
    #base
    if loss_net == "vgg16":
        from torchvision.models.vgg import vgg16
        net = vgg16(pretrained=True)
        loss_network = nn.Sequential(*list(net.features)[:31]).eval()
        return loss_network
    elif loss_net == "vgg16_bn":
        from torchvision.models.vgg import vgg16_bn
        net = vgg16_bn(pretrained=True)
        loss_network = nn.Sequential(*list(net.features)[:44]).eval()
        return loss_network
    elif loss_net == "resnet50":
        from torchvision.models.resnet import resnet50
        net=resnet50(pretrained=True)
        loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval()
        return loss_network
    elif loss_net == "resnet101":
        from torchvision.models.resnet import resnet101
        net=resnet101(pretrained=True)
        loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval()
        return loss_network
    elif loss_net == "resnet152":
        from torchvision.models.resnet import resnet152
        net=resnet152(pretrained=True)
        loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval()
        return loss_network
    elif loss_net == "squeezenet1_1":
        from torchvision.models.squeezenet import squeezenet1_1
        net=squeezenet1_1(pretrained=True)
        classifier=[item for item in net.classifier.modules()][1:-1]
        loss_network=nn.Sequential(*[net.features,*classifier]).eval()
        return loss_network
    elif loss_net == "densenet121":
        from torchvision.models.densenet import densenet121
        net=densenet121(pretrained=True)
        loss_network=nn.Sequential(*[net.features,nn.ReLU()]).eval()
        return loss_network
    elif loss_net == "densenet169":
        from torchvision.models.densenet import densenet169
        net=densenet169(pretrained=True)
        loss_network=nn.Sequential(*[net.features,nn.ReLU()]).eval()
        return loss_network
    elif loss_net == "densenet201":
        from torchvision.models.densenet import densenet201
        net=densenet201(pretrained=True)
        loss_network=nn.Sequential(*[net.features,nn.ReLU()]).eval()
        return loss_network        
    elif loss_net == "mobilenet_v2":
        from torchvision.models.mobilenet import mobilenet_v2
        net=mobilenet_v2(pretrained=True)
        loss_network=nn.Sequential(*[net.features]).eval()
        return loss_network                
    elif loss_net == "resnext50_32x4d":
        from torchvision.models.resnet import resnext50_32x4d
        net=resnext50_32x4d(pretrained=True)
        loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval()
        return loss_network      
    elif loss_net == "resnext101_32x8d":
        from torchvision.models.resnet import resnext101_32x8d
        net=resnext101_32x8d(pretrained=True)
        loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval()
        return loss_network
    elif loss_net == "wide_resnet50_2":
        from torchvision.models.resnet import wide_resnet50_2
        net=wide_resnet50_2(pretrained=True)
        loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval()
        return loss_network
    elif loss_net == "wide_resnet101_2":
        from torchvision.models.resnet import wide_resnet101_2
        net=wide_resnet101_2(pretrained=True)
        loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval()
        return loss_network
    elif loss_net == "inception_v3":
Ejemplo n.º 12
0
def base_model():
    full_model = squeezenet1_1(pretrained=True)
    feature_model = nn.Sequential(*list(full_model.features.children())[:-1])
    return feature_model
Ejemplo n.º 13
0
#from .semantic.DeepSqueeze.models import DeepSqueeze as DeepSqueeze_
from torchvision.models._utils import IntermediateLayerGetter
import torchvision.models.resnet as resnet

from torch.nn import functional as F
import os

from collections import OrderedDict
from torchvision.models.segmentation.deeplabv3 import DeepLabHead
from torch import nn, hub
from torchvision.models.squeezenet import squeezenet1_1
import torch
import torchvision

squeeze = squeezenet1_1(pretrained=False).eval()


class DeepSqueeze_(nn.Module):
    def __init__(self, num_classes):
        super(DeepSqueeze_, self).__init__()
        backbone = squeeze.features

        classifier = DeepLabHead(512, num_classes)

        self.backbone = backbone
        self.classifier = classifier
        self.aux_classifier = None

    def forward(self, x):
        input_shape = x.shape[-2:]
Ejemplo n.º 14
0
from ignite.engine import Engine, _prepare_batch
import torch.nn as nn
from torchvision.models.squeezenet import squeezenet1_1
import torch.nn as nn
from torch.optim import SGD
from ignite.engine import Events

device = "cpu"
model = squeezenet1_1(pretrained=False, num_classes=81)
model.classifier[-1] = nn.AdaptiveAvgPool2d(1)
model = model.to(device)
optimizer = SGD(model.parameters(), lr=0.01, momentum=0.5)
criterion = nn.CrossEntropyLoss()


def process_function(engine, batch):
    model.train()
    optimizer.zero_grad()
    x, y = _prepare_batch(batch, device=device)
    y_pred = model(x)
    loss = criterion(y_pred, y)
    loss.backward()
    optimizer.step()
    return loss.item()


trainer = Engine(process_function)

log_interval = 50