Esempio n. 1
0
def test_inceptionv4_model(input_var, pool, assert_equal_outputs):
    original_model = pretrainedmodels.inceptionv4(pretrained='imagenet',
                                                  num_classes=1000)
    finetune_model = make_model(
        'inception_v4',
        num_classes=1000,
        pool=pool,
        pretrained=True,
    )
    copy_module_weights(original_model.last_linear, finetune_model._classifier)
    assert_equal_outputs(input_var, original_model, finetune_model)
Esempio n. 2
0
def test_nasnetalarge_model(input_var, pool, assert_equal_outputs):
    original_model = pretrainedmodels.nasnetalarge(pretrained='imagenet',
                                                   num_classes=1000)
    finetune_model = make_model(
        'nasnetalarge',
        num_classes=1000,
        pool=pool,
        pretrained=True,
    )
    copy_module_weights(original_model.last_linear, finetune_model._classifier)
    assert_equal_outputs(input_var, original_model, finetune_model)
Esempio n. 3
0
def construct_dcnn(model_name):
    global model, optimizer, scheduler, criterion
    model = make_model(model_name, num_classes=len(classes), 
                       pretrained=args.pre_trained_model,  
                       dropout_p=args.dropout_p, 
                       input_size = input_size)
    model = model.to(device)
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, 
                      weight_decay=args.weight_decay, nesterov=use_nestrov_moment)               # dampening = 0.005                      
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.lr_milestones, gamma= .95) 
    criterion = nn.CrossEntropyLoss()                                  
def get_net():
    net = make_model('se_resnext101_32x4d', num_classes=2, pretrained=True)
    # net = EfficientNet.from_pretrained('efficientnet-b6')
    # net._fc = nn.Linear(in_features=2304, out_features=2, bias=True)
    #net._fc = nn.Linear(in_features=2048, out_features=2, bias=True)
    #net._fc = nn.Linear(in_features=1792, out_features=2, bias=True)
    #net._fc = nn.Linear(in_features=1536, out_features=2, bias=True)
    #net._fc = nn.Linear(in_features=1408, out_features=2, bias=True)
    #net._fc = nn.Linear(in_features=1280, out_features=2, bias=True)
    #net._fc = nn.Linear(in_features=1280, out_features=2, bias=True)

    return net
Esempio n. 5
0
def test_senet_models(input_var, model_name, pool, assert_equal_outputs):
    original_model = getattr(pretrainedmodels,
                             model_name)(pretrained='imagenet',
                                         num_classes=1000)
    finetune_model = make_model(
        model_name,
        num_classes=1000,
        pool=pool,
        pretrained=True,
    )
    copy_module_weights(original_model.last_linear, finetune_model._classifier)
    assert_equal_outputs(input_var, original_model, finetune_model)
Esempio n. 6
0
def test_load_state_dict():
    torch.manual_seed(42)
    model1 = make_model('resnet18', num_classes=10)
    model1_state = model1.state_dict()

    torch.manual_seed(84)
    model2 = make_model('resnet18', num_classes=10)
    model2_state = model2.state_dict()

    assert not all(
        torch.equal(weights1, weights2) for weights1, weights2
        in zip(model1_state.values(), model2_state.values())
    )

    model2.load_state_dict(model1_state)
    model2_state = model2.state_dict()

    assert all(
        torch.equal(weights1, weights2) for weights1, weights2 in
        zip(model1_state.values(), model2_state.values())
    )
Esempio n. 7
0
def test_state_dict_features_and_classifier():
    model = make_model('resnet18', num_classes=10)
    model_state_keys = model.state_dict().keys()

    assert '_classifier.weight' in model_state_keys
    assert '_classifier.bias' in model_state_keys

    features_keys = [
        key for key in model_state_keys
        if key.startswith('_features') and key.endswith(('weight', 'bias', 'running_mean', 'running_var'))
    ]
    assert len(features_keys) == 100
Esempio n. 8
0
    def __init__(self, extractor_name):
        super(Extractor, self).__init__()
        basenet = make_model(model_name=extractor_name,
                             num_classes=1000,
                             input_size=(224, 224),
                             pretrained=True)
        # print(basenet)

        self.n_features = basenet._classifier.in_features
        self.pool = GeM()

        self.extractor = basenet._features
Esempio n. 9
0
def test_models_without_pretrained_weights_dont_have_model_info(model_name):
    input_size = None
    if (model_name == 'alexnet' or model_name.startswith('vgg')
            or model_name.startswith('squeezenet')):
        input_size = (224, 224)
    model = make_model(
        model_name,
        pretrained=False,
        num_classes=10,
        dropout_p=0.5,
        input_size=input_size,
    )
    assert model.original_model_info is None
 def __init__(self, encoder, pretrained, num_classes, activation):
     super().__init__()
     self.net = make_model(model_name=encoder,
                           pretrained=pretrained,
                           num_classes=1000)
     in_features = self.net._classifier.in_features
     if activation == "Mish":
         to_Mish(self.net)
         print("Mish activation added!")
     self.head_grapheme_root = Head(in_features, num_classes[0])
     self.head_vowel_diacritic = Head(in_features, num_classes[1])
     self.head_consonant_diacritic = Head(in_features, num_classes[2])
     print(self.net)
def cnn(image):
    global warray

    PATH = 'direct2642_noisy_resnet2.pt' #direct2642_noisy_resnet2.pt, direct2642_noisy.pt
    # If RoS is being used
    # listener()
    # model,saber,imgtopic = initialize()
    model = make_model('resnet18', num_classes=1, pretrained=True, input_size=(227, 227))

    if torch.cuda.is_available():
        device = torch.device("cuda")
        use_gpu=1 
        print ("CUDA Available")
    else:
        device = torch.device("cpu")

    if use_gpu == 1:
        model = nn.DataParallel(model).cuda()

    if path.exists(PATH):
        model.load_state_dict(torch.load(PATH))
    else:
        print("Did not find Model!")

    model = model.to(device)

    # print ("\nInitialization complete")

    # print ("q value is {}".format(q.get()))
    # transform = transforms.Compose([
    #     transforms.ToPILImage(),
    #     transforms.Resize((227,227)),
    #     transforms.ToTensor(),
    # ])

    if image.shape[2]>3:
        image = image[:,:,:3]

    image = image_loader(image)

    model.eval()

    with torch.no_grad():
        w = model(image)

    # print ("Before {}".format(w))

    w = ((w.cpu().numpy().flatten()))
    w = w*0.7897025648 - 0.0967433495
    return w
Esempio n. 12
0
def build_model(args, gpus):
    # Build the model to run
    print("Building a model...")
    if args.task == 1:
        #from se_resnet import se_resnet_custom
        #model = nn.DataParallel(se_resnet_custom(size=args.model_num_blocks,
        #                                             dropout_p=args.dropout_p, num_classes=23),
        #                                             device_ids=gpus)
        pass  # TODO make model here
    elif args.task == 2:
        # TODO make this use MSE and have 3 heads, one for X,Y,Z
        model = make_model(args.model,
                           num_classes=3,
                           dropout_p=args.dropout_p,
                           pretrained=True)
    elif args.task == 3 or args.task == 4:
        model = make_model(args.model,
                           num_classes=23,
                           dropout_p=args.dropout_p,
                           pretrained=True)
        #model = make_model('resnet18', num_classes=23, dropout_p=args.dropout_p, pretrained=True)
        #model = make_model('resnext101_32x4d', num_classes=23, dropout_p=args.dropout_p, pretrained=True)

    return model
def initialize():
    global saber
    PATH = 'direct2642_noisy_resnet2.pt'  #direct2642_noisy_resnet2.pt, direct2642_noisy.pt
    quality = "qhd"

    print("\n Initializing Kinect ...")

    # command="roslaunch kinect2_bridge kinect2_bridge.launch"
    # os.system("gnome-terminal -e 'bash -c \"roslaunch kinect2_bridge kinect2_bridge.launch; exec bash\"'")

    imgtopic = "/kinect2/{}/image_color".format(quality)

    print("\nDetecting sabertooth....\n")
    pl = list(port.comports())
    print(pl)
    address = ''
    for p in pl:
        print(p)
        if 'Sabertooth' in str(p):
            address = str(p).split(" ")
    print("\nAddress found @")
    print(address[0])

    saber = Sabertooth(address[0], baudrate=9600, address=128, timeout=0.1)

    if torch.cuda.is_available():
        device = torch.device("cuda")
        use_gpu = 1
        print("CUDA Available")
    else:
        device = torch.device("cpu")

    model = cnn_finetune.make_model('resnet18',
                                    num_classes=1,
                                    pretrained=True,
                                    input_size=(227, 227))

    if use_gpu == 1:
        model = nn.DataParallel(model).cuda()

    if path.exists(PATH):
        model.load_state_dict(torch.load(PATH))

    model = model.to(device)

    print("\n Initialization complete")

    return model, saber, imgtopic
Esempio n. 14
0
def test_dpn_models(input_var, model_name, pool, assert_equal_outputs):
    if model_name in {'dpn68b', 'dpn92', 'dpn107'}:
        pretrained = 'imagenet+5k'
    else:
        pretrained = 'imagenet'
    original_model = getattr(pretrainedmodels,
                             model_name)(pretrained=pretrained,
                                         num_classes=1000)
    finetune_model = make_model(
        model_name,
        num_classes=1000,
        pool=pool,
        pretrained=True,
    )
    copy_module_weights(original_model.classifier, finetune_model._classifier)
    assert_equal_outputs(input_var, original_model, finetune_model)
Esempio n. 15
0
    def __init__(self, cnn_finetune, num_country, country_emb_dim=10):
        super().__init__()
        num_classes = cnn_finetune['num_classes']
        cnn_finetune = make_model(**cnn_finetune)
        self.features = cnn_finetune._features
        self.pool = cnn_finetune.pool
        self.dropout = cnn_finetune.dropout

        img_in_features = cnn_finetune._classifier.in_features

        self.country_emb = nn.Embedding(num_country, country_emb_dim)

        self.fc_1 = nn.Linear(img_in_features + country_emb_dim, 512)
        self.fc_2 = nn.Linear(512, num_classes)

        self.relu = nn.ReLU(inplace=True)
Esempio n. 16
0
    def load_model(self, modelname="resnet50", pretrained=True):
        """load model return model"""
        if not modelname: modelname = "resnet50"
        model = make_model(modelname,
                           num_classes=self.num_classes,
                           pretrained=pretrained)
        #set model mode
        model.train()

        model = model.to(self.device)

        if "cuda" in str(self.device):
            model = torch.nn.DataParallel(model)
        self.model = model
        print(modelname)
        return model
Esempio n. 17
0
def get_model(model: str,
              num_classes: int,
              pretrained: bool,
              input_size: int,
              use_cuda: bool = None) -> nn.Module:
    if model.startswith('efficientnet'):
        model = EfficientNet.from_name(model)
        model._fc = nn.Linear(1536, num_classes)
    else:
        model = make_model(model,
                           num_classes,
                           pretrained,
                           input_size=(input_size, input_size),
                           pool=AvgPool())
    if use_cuda:
        model = model.cuda()
    return model
Esempio n. 18
0
    def __init__(self, cnn_finetune, arcface, embedding_size):
        super().__init__()
        num_classes = cnn_finetune['num_classes']
        cnn_finetune = make_model(**cnn_finetune)
        self.features = cnn_finetune._features
        self.pool = cnn_finetune.pool
        self.dropout = cnn_finetune.dropout

        in_features = cnn_finetune._classifier.in_features
        self.fc_1 = nn.Linear(in_features, 1024)
        self.fc_2 = nn.Linear(1024, embedding_size)
        self.relu = nn.ReLU(inplace=True)
        self.bn = nn.BatchNorm1d(embedding_size)

        self.arcface = ArcMarginProduct(in_features=embedding_size,
                                        out_features=num_classes,
                                        **arcface)
def initialize():
    global rdeftrain, rdeftest, tta, toa, model_ft_best, model_ft
    rdeftrain = 0
    rdeftest = 0
    tta = []
    toa = []
    model_ft = make_model('resnet18',
                          num_classes=1,
                          pretrained=True,
                          input_size=(224, 224))  #,dropout_p=0.5)
    model_ft_best = copy.deepcopy(model_ft)

    model_ft = model_ft.to(device)
    model_ft_best = model_ft_best.to(device)

    if use_gpu == 1:
        model_ft = nn.DataParallel(model_ft).cuda()
        model_ft_best = nn.DataParallel(model_ft_best).cuda()
Esempio n. 20
0
def test_every_pretrained_model_has_model_info(model_name):
    input_size = get_default_input_size_for_model(model_name)
    model = make_model(
        model_name,
        pretrained=True,
        num_classes=10,
        dropout_p=0.5,
        input_size=input_size,
    )
    original_model_info = model.original_model_info
    assert original_model_info

    assert_iterable_length_and_type(original_model_info.input_space, 3, str)
    assert_iterable_length_and_type(original_model_info.input_size, 3, int)
    assert_iterable_length_and_type(original_model_info.input_range, 2,
                                    (int, float))
    assert_iterable_length_and_type(original_model_info.mean, 3, (int, float))
    assert_iterable_length_and_type(original_model_info.std, 3, (int, float))
Esempio n. 21
0
    def __init__(
        self,
        arch="se_resnet50",
        n_class=6,
        pretrained=True,
        image_size=256,
    ):
        super(Finetune, self).__init__()
        self.model = make_model(
            model_name=arch,
            num_classes=n_class,
            pretrained=pretrained,
            input_size=(image_size, image_size),
        )

        self.extract_feature = False
        self.model_name = arch
        self.n_class = n_class
def train():
    # GPU
    device = torch.device("cuda" if GPU else "cpu")

    # model
    model = make_model('vgg16', num_classes=num_classes, pretrained=True, input_size=(img_height, img_width))
    model = model.to(device)
    opt = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    model.train()

    xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)

    # training
    mb = 16
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(100):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = torch.tensor(xs[mb_ind], dtype=torch.float).to(device)
        t = torch.tensor(ts[mb_ind], dtype=torch.long).to(device)

        opt.zero_grad()
        y = model(x)
        y = F.log_softmax(y, dim=1)
        loss = torch.nn.CrossEntropyLoss()(y, t)
        loss.backward()
        opt.step()
    
        pred = y.argmax(dim=1, keepdim=True)
        acc = pred.eq(t.view_as(pred)).sum().item() / mb
        
        print("iter >>", i+1, ',loss >>', loss.item(), ',accuracy >>', acc)

    torch.save(model.state_dict(), 'cnn.pt')
    def __init__(
        self,
        model_name='resnet50',
        num_classes=6,
        pretrained=True,
        n_channels=4,
    ):
        super(DSResnet, self).__init__()
        self.model = make_model(model_name=model_name,
                                num_classes=num_classes,
                                pretrained=True,
                                dropout_p=0.3)
        # print(self.model)
        conv1 = self.model._features[0]
        self.model._features[0] = nn.Conv2d(in_channels=n_channels,
                                            out_channels=conv1.out_channels,
                                            kernel_size=conv1.kernel_size,
                                            stride=conv1.stride,
                                            padding=conv1.padding,
                                            bias=conv1.bias)

        # copy pretrained weights
        self.model._features[0].weight.data[:, :3, :, :] = conv1.weight.data
        self.model._features[
            0].weight.data[:, 3:n_channels, :, :] = conv1.weight.data[:, :int(
                n_channels - 3), :, :]

        # self.deepsuper_1 = nn.Sequential(
        #     nn.AdaptiveAvgPool2d(),
        #     Flatten(),
        #     nn.BatchNorm1d(256),
        #     nn.Linear(256, num_classes)
        # )

        self.deepsuper_2 = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(),
                                         nn.BatchNorm1d(512),
                                         nn.Linear(512, num_classes))

        self.deepsuper_3 = nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten(),
                                         nn.BatchNorm1d(1024),
                                         nn.Linear(1024, num_classes))

        self.is_infer = False
    def __init__(self,
                 model_name="seresnext26_32x4d",
                 num_classes=1108,
                 n_channels=6,
                 num_samples=4,
                 weight=None):
        super(SENetCellMultipleDropout, self).__init__()

        self.model = make_model(model_name=model_name,
                                num_classes=31,
                                pretrained=True)
        print("*" * 100)
        print("SENetGrouplevel")
        conv1 = self.model._features[0].conv1
        self.model._features[0].conv1 = nn.Conv2d(
            in_channels=n_channels,
            out_channels=conv1.out_channels,
            kernel_size=conv1.kernel_size,
            stride=conv1.stride,
            padding=conv1.padding,
            bias=conv1.bias)

        # copy pretrained weights
        self.model._features[
            0].conv1.weight.data[:, :3, :, :] = conv1.weight.data
        self.model._features[
            0].conv1.weight.data[:, 3:
                                 n_channels, :, :] = conv1.weight.data[:, :int(
                                     n_channels - 3), :, :]

        if weight:
            model_state_dict = torch.load(weight)['model_state_dict']
            self.model.load_state_dict(model_state_dict)
            print(
                f"\n\n******************************* Loaded checkpoint {weight}"
            )

        in_features = self.model._classifier.in_features
        self.num_samples = num_samples

        self.classifier = nn.Linear(in_features, num_classes)
def test():
    device = torch.device("cuda" if GPU else "cpu")
    model = make_model('vgg16', num_classes=num_classes, pretrained=True, input_size=(img_height, img_width))
    model = model.to(device)
    model.eval()
    model.load_state_dict(torch.load('cnn.pt'))

    xs, ts, paths = data_load('../Dataset/test/images/')

    for i in range(len(paths)):
        x = xs[i]
        t = ts[i]
        path = paths[i]
        
        x = np.expand_dims(x, axis=0)
        x = torch.tensor(x, dtype=torch.float).to(device)
        
        pred = model(x)
        pred = F.softmax(pred, dim=1).detach().cpu().numpy()[0]
    
        print("in {}, predicted probabilities >> {}".format(path, pred))
def main(args):
    if args.model_name == 'mobilenetv2':
        model = MobileNetV2(num_classes=1000)
    elif args.model_name == 'mobilenetv3large':
        model = MobileNetV3Large(n_classes=1000)
    elif args.model_name == 'mobilenetv3small':
        model = MobileNetV3Small(n_classes=1000)
    else:
        model = make_model(args.model_name, num_classes=1000, pretrained=True)

    input_size = (1, ) + tuple(model.original_model_info.input_size)
    x_dummy = torch.rand(input_size)

    input_names = ['input_0']
    output_names = ['output_0']
    torch.onnx.export(model,
                      x_dummy,
                      args.model_save_path,
                      export_params=True,
                      input_names=input_names,
                      output_names=output_names)
Esempio n. 27
0
    def __init__(self, grad_layer, num_classes):
        super(GAIN, self).__init__()
        self.model = make_model(model_name='resnet50',
                                pretrained=True,
                                num_classes=num_classes)
        # print(self.model)
        self.grad_layer = grad_layer

        self.num_classes = num_classes

        # Feed-forward features
        self.feed_forward_features = None
        # Backward features
        self.backward_features = None

        # Register hooks
        self._register_hooks(grad_layer)

        # sigma, omega for making the soft-mask
        self.sigma = 0.25
        self.omega = 100
Esempio n. 28
0
def test_every_pretrained_model_has_model_info(model_name):
    input_size = None
    if (model_name == 'alexnet' or model_name.startswith('vgg')
            or model_name.startswith('squeezenet')):
        input_size = (224, 224)
    model = make_model(
        model_name,
        pretrained=True,
        num_classes=10,
        dropout_p=0.5,
        input_size=input_size,
    )
    original_model_info = model.original_model_info
    assert original_model_info

    assert_iterable_length_and_type(original_model_info.input_space, 3, str)
    assert_iterable_length_and_type(original_model_info.input_size, 3, int)
    assert_iterable_length_and_type(original_model_info.input_range, 2,
                                    (int, float))
    assert_iterable_length_and_type(original_model_info.mean, 3, (int, float))
    assert_iterable_length_and_type(original_model_info.std, 3, (int, float))
Esempio n. 29
0
    def __init__(self,
                 model_name,
                 in_channels=3,
                 out_dim=10,
                 hdim=512,
                 use_dropout=True,
                 activation=F.leaky_relu,
                 use_bn=True,
                 pretrained=False,
                 kernel_size=3,
                 stride=1,
                 padding=1):
        super(PretrainedCNN, self).__init__()
        print("Architecture: ", model_name)
        is_remote = os.getcwd() != "/kaggle/working"
        if model_name in get_official_names(
        ) or model_name in get_pretrained_names():
            self.base_model = cnn_finetune.make_model(model_name,
                                                      num_classes=out_dim,
                                                      pretrained=is_remote
                                                      and pretrained)
        elif model_name in get_timm_names():
            self.base_model = timm.create_model(model_name,
                                                num_classes=out_dim,
                                                pretrained=is_remote
                                                and pretrained)
        else:
            print("Not supported architecture")
            assert False
        if not use_dropout:
            dropout_replace(self.base_model)

        self.conv0 = nn.Conv2d(in_channels,
                               3,
                               kernel_size=kernel_size,
                               stride=stride,
                               padding=padding,
                               bias=True)
Esempio n. 30
0
    def __init__(self,
                 arch="resnet18",
                 n_class=10,
                 pretrained=True,
                 num_embeddings=750,
                 embedding_dim=10):
        super(BaselineModel, self).__init__()
        resnet = make_model(
            model_name=arch,
            num_classes=n_class,
            pretrained=pretrained,
        )
        print(resnet)

        self.encoder1 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
        )

        self.encoder2 = nn.Sequential(
            nn.MaxPool2d(kernel_size=2, stride=2),
            resnet._features[1],
        )

        self.encoder3 = resnet._features[2]
        self.encoder4 = resnet._features[3]
        self.encoder5 = resnet._features[4]

        # Embedding layers
        self.embedding = nn.Sequential(
            nn.Embedding(num_embeddings=num_embeddings + 1,
                         embedding_dim=embedding_dim),
            nn.Dropout(0.25),
            GlobalConcatPool1d(),
        )

        self.logit = nn.Linear(2068, n_class)