Пример #1
0
 def test_chicken(self):
     chicken_img = Image.open(
         pathlib.Path(__file__).parent / 'efficientnet/Chicken.jpg')
     model = EfficientNet(number=0)
     model.load_from_pretrained()
     label = _infer(model, chicken_img)
     self.assertEqual(label, "hen",
                      f"Expected hen but got {label} for number=0")
Пример #2
0
def EfficientNet_B8(pretrained=True, num_class=5, onehot=1, onehot2=0):
    if pretrained:
        model = EfficientNet.from_pretrained('efficientnet-b8', num_classes=num_class, onehot=onehot, onehot2=onehot2)
        for name, param in model.named_parameters():
            if 'fc' not in name:
                param.requires_grad = False
    else:
        model = EfficientNet.from_name('efficientnet-b8', onehot=onehot, onehot2=onehot2)
    model.name = "EfficientNet_B8"    
    print("EfficientNet B7 Loaded!")

    return model
Пример #3
0
def EfficientNet_B6(pretrained=True, num_class=5, advprop=False, onehot=1, onehot2=0):
    if pretrained:
        model = EfficientNet.from_pretrained('efficientnet-b6', num_classes=num_class, onehot=onehot, onehot2=onehot2)
        for name, param in model.named_parameters():
            if 'fc' not in name :# and 'blocks.24' not in name and 'blocks.25' not in name
                param.requires_grad = False
    else:
        model = EfficientNet.from_name('efficientnet-b6', onehot=onehot, onehot2=onehot2)
    
    model.name = "EfficientNet_B6"
    print("EfficientNet B6 Loaded!")

    return model
Пример #4
0
    def __init__(self, cfg = None, phase = 'train'):
        """
        :param cfg:  Network related settings.
        :param phase: train or test.
        """
        super(EfficientDet, self).__init__()
        self.phase = phase
        self.num_classes = 2
        self.base = EfficientNet.from_pretrained('efficientnet-b3')
        self.fpn_flag = True
        if self.fpn_flag:
            in_channels_list = [48,96,136,232]
            out_channels = 128
            self.ssh1 = DetectModule(out_channels)
            self.ssh2 = DetectModule(out_channels)
            self.ssh3 = DetectModule(out_channels)
            self.ssh4 = DetectModule(out_channels)
 
            '''
            self.ssh1 = SSH(out_channels, out_channels)
            self.ssh2 = SSH(out_channels, out_channels)
            self.ssh3 = SSH(out_channels, out_channels)
            self.ssh4 = SSH(out_channels, out_channels)
            '''
 
            self.fpn = FPN(in_channels_list, out_channels)
 
        self.loc, self.conf, self.landm = self.multibox(self.num_classes);
    def __init__(self,
                 num_classes,
                 network='efficientdet-d0',
                 D_bifpn=3,
                 W_bifpn=88,
                 D_class=3,
                 is_training=True,
                 threshold=0.5,
                 iou_threshold=0.5,
                 gpu=1):
        super(EfficientDet, self).__init__()
        self.backbone = EfficientNet.from_pretrained(MODEL_MAP[network])
        self.is_training = is_training
        self.neck = BIFPN(in_channels=self.backbone.get_list_features()[-5:],
                          out_channels=W_bifpn,
                          stack=D_bifpn,
                          num_outs=5)
        self.bbox_head = RetinaHead(num_classes=num_classes,
                                    in_channels=W_bifpn)

        self.anchors = Anchors()
        self.regressBoxes = BBoxTransform()
        self.clipBoxes = ClipBoxes()
        self.threshold = threshold
        self.iou_threshold = iou_threshold
        self.gpu = gpu
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
        self.freeze_bn()
        self.criterion = FocalLoss()
Пример #6
0
 def __init__(self,
              net='xception',
              feature_layer='b3',
              num_classes=2,
              dropout_rate=0.5,
              pretrained=False):
     super().__init__()
     self.num_classes = num_classes
     if 'xception' in net:
         self.net = xception(num_classes, escape=feature_layer)
     elif net.split('-')[0] == 'efficientnet':
         self.net = EfficientNet.from_pretrained(net,
                                                 advprop=True,
                                                 num_classes=num_classes,
                                                 escape=feature_layer)
     self.feature_layer = feature_layer
     with torch.no_grad():
         layers = self.net(torch.zeros(1, 3, 100, 100))
     num_features = layers[self.feature_layer].shape[1]
     if pretrained:
         a = torch.load(pretrained, map_location='cpu')
         keys = {
             i: a['state_dict'][i]
             for i in a.keys() if i.startswith('net')
         }
         if not keys:
             keys = a['state_dict']
         load_state(self.net, keys)
     self.pooling = nn.AdaptiveAvgPool2d(1)
     self.texture_enhance = Texture_Enhance_v2(num_features, 1)
     self.num_features = self.texture_enhance.output_features
     self.fc = nn.Linear(self.num_features, self.num_classes)
     self.dropout = nn.Dropout(dropout_rate)
    def __init__(self,
                 num_classes,
                 network='efficientdet-d0',
                 D_bifpn=3,
                 W_bifpn=88):
        super(EfficientDetBiFPN, self).__init__()
        self.backbone = EfficientNet.get_network_from_name(MODEL_MAP[network])
        self.neck = BIFPN(in_channels=self.backbone.get_list_features()[-5:],
                          out_channels=W_bifpn,
                          stack=D_bifpn,
                          num_outs=5)

        self.bbox_head = RetinaHead(num_classes=num_classes,
                                    in_channels=W_bifpn)

        self.anchors = Anchors()

        self.regressBoxes = BBoxTransform()
        self.clipBoxes = ClipBoxes()

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
        self.freeze_bn()
    def __init__(self,
                 num_classes,
                 levels=3,
                 num_channels=128,
                 model_name='efficientdet-d0',
                 is_training=True,
                 threshold=0.5):
        super(EfficientDet, self).__init__()
        self.efficientnet = EfficientNet.from_pretrained(MODEL_MAP[model_name])
        self.is_training = is_training
        self.BIFPN = BIFPN(in_channels=self.efficientnet.get_list_features()[2:],
                                out_channels=256,
                                num_outs=5)
        self.regressionModel = RegressionModel(256)
        self.classificationModel = ClassificationModel(256, num_classes=num_classes)
        self.anchors = Anchors()
        self.regressBoxes = BBoxTransform()
        self.clipBoxes = ClipBoxes()
        self.threshold = threshold

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
        prior = 0.01
        
        self.classificationModel.output.weight.data.fill_(0)
        self.classificationModel.output.bias.data.fill_(-math.log((1.0-prior)/prior))
        self.regressionModel.output.weight.data.fill_(0)
        self.regressionModel.output.bias.data.fill_(0)
        self.freeze_bn()
Пример #9
0
def main():

    exp_name = f'baseline_{now()}'
    device, log, result_dir = setup(exp_name, conf)

    train_df = load_csv(conf.train_csv)
    if conf.npy:
        train_images = np.load(conf.train_images)
    else:
        train_images = pd.read_parquet(conf.train_images)

    test_df = load_csv(conf.test_csv)
    if conf.npy:
        test_images = np.load(conf.test_images)
    else:
        test_images = pd.read_parquet(conf.test_images)

    log.info('done')
    for i in range(5):
        if i != conf.fold:
            continue

        if "resnet" in conf.arch or "resnext" in conf.arch:
            model_ft = ResNet(conf,
                              arch_name=conf.arch,
                              input_size=conf.image_size)
            model_ft.load_state_dict(
                torch.load("result/baseline_2020_03_21_13_01_08/model_0.pkl"))
        elif "densenet" in conf.arch:
            model_ft = DenseNet(conf,
                                arch_name=conf.arch,
                                input_size=conf.image_size)
        elif "efficientnet" in conf.arch:
            model_ft = EfficientNet(conf, arch_name=conf.arch)

        criterion = [
            nn.CrossEntropyLoss(reduction="none"),
            nn.CrossEntropyLoss(reduction="none"),
            nn.CrossEntropyLoss(reduction="none")
        ]
        criterion = [c.to(device) for c in criterion]

        model_ft, val_preds = train_model(train_df,
                                          train_images,
                                          test_df,
                                          test_images,
                                          model_ft,
                                          criterion,
                                          log,
                                          device,
                                          result_dir,
                                          fold=i,
                                          num_epoch=conf.num_epoch)

        torch.save(model_ft.state_dict(), result_dir / f'model_{i}.pkl')
        np.save(result_dir / f'val_preds_{i}.npy', val_preds)
Пример #10
0
    def __init__(self, model, ema_model, lr = 0.002, alpha=0.999, \
                model_version = 'efficient', efficient_version = 'b0',
                num_classes=7, device = 'cuda'):
        self.model = model
        self.ema_model = ema_model
        self.alpha = alpha
        self.lr = lr
        self.device = device

        if model_version == 'resnet':
            self.tmp_model = WideResNet(num_classes=num_classes)
        else:
            self.tmp_model = EfficientNet(version=efficient_version,
                                          num_classes=num_classes)
        self.wd = 0.02 * self.lr

        for param, ema_param in zip(self.model.parameters(), \
                                    self.ema_model.parameters()):
            ema_param.data.copy_(param.data)
Пример #11
0
    def __init__(self, backbone, lr_channels=17, hr_channels=5):
        super(isCNN, self).__init__()

        self.model_name = backbone
        self.lr_backbone = EfficientNet.from_name(backbone)
        self.hr_backbone = EfficientNet.from_name(backbone)

        self.lr_final = nn.Sequential(double_conv(self.n_channels, 64), nn.Conv2d(64, lr_channels, 1))

        self.up_conv1 = up_conv(2*self.n_channels+lr_channels, 512)
        self.double_conv1 = double_conv(self.size[0], 512)
        self.up_conv2 = up_conv(512, 256)
        self.double_conv2 = double_conv(self.size[1], 256)
        self.up_conv3 = up_conv(256, 128)
        self.double_conv3 = double_conv(self.size[2], 128)
        self.up_conv4 = up_conv(128, 64)
        self.double_conv4 = double_conv(self.size[3], 64)
        self.up_conv_input = up_conv(64, 32)
        self.double_conv_input = double_conv(self.size[4], 32)
        self.hr_final = nn.Conv2d(self.size[5], hr_channels, kernel_size=1)
Пример #12
0
def create_model(args, model, efficient_version='b0', ema=False):

    if model == 'resnet':
        model = WideResNet(num_classes=args.num_classes)
    elif model == 'efficient':
        model = EfficientNet(version=efficient_version,
                             num_classes=args.num_classes)
    model = model.to(args.device)

    if ema:
        for param in model.parameters():
            param.detach_()

    return model
Пример #13
0
    def __init__(self,
                 num_classes=21,
                 levels=3,
                 num_channels=128,
                 model_name='efficientnet-b0'):
        super(EfficientDet, self).__init__()
        self.efficientnet = EfficientNet.from_pretrained(model_name)

        self.BIFPN = BIFPN(in_channels=[40, 80, 112, 192, 320],
                                out_channels=256,
                                num_outs=5)
        self.regressionModel = RegressionModel(256)
        self.classificationModel = ClassificationModel(256, num_classes=num_classes)
        self.anchors = Anchors()
 def __init__(self, version, num_classes, implementation="EN-torch", pretrained=True):
     super().__init__()
     if implementation == "ours":
         self.model = EfficientNet(version, num_classes)
     elif implementation == "EN-torch":
         if pretrained:
             self.model = EN_torch.from_pretrained(f"efficientnet-{version}",
                                                   num_classes=num_classes)
         else:
             self.model = EN_torch.from_name(f"efficientnet-{version}",
                                             num_classes=num_classes)
     else:
         raise NotImplementedError(
             "The implementation must be either [ours, EN-torch]."
             f"\nyour input: {implementation}")
Пример #15
0
    def __init__(self,
                num_class = 21,
                levels = 3,
                num_channels = 128,
                model_name = 'efficientnet-b0'):
        super(EfficientDet, self).__init__()
        self.num_class = num_class 
        self.levels = levels
        self.num_channels = num_channels
        self.efficientnet = EfficientNet.from_pretrained(model_name)
        print('efficientnet: ', self.efficientnet)
        self.bifpn = BiFPN(num_channels = self.num_channels)

        self.cfg = (coco, voc)[num_class == 21]
        self.priorbox = PriorBox(self.cfg)
        self.priors = Variable(self.priorbox.forward(), volatile=True)
Пример #16
0
def main(args):
    # Step 1: parse args config
    logging.basicConfig(
        format=
        '[%(asctime)s] [p%(process)s] [%(pathname)s:%(lineno)d] [%(levelname)s] %(message)s',
        level=logging.INFO,
        handlers=[
            logging.FileHandler(args.log_file, mode='w'),
            logging.StreamHandler()
        ])
    print_args(args)

    # Step 2: model, criterion, optimizer, scheduler
    # model = MobileNetV3(mode='large').to(args.device)

    model = EfficientNet.from_name(args.arch).to(args.device)
    # auxiliarynet = AuxiliaryNet().to(args.device)
    auxiliarynet = None

    checkpoint = torch.load(args.model_path)
    model.load_state_dict(checkpoint['model'])

    # step 3: data
    # argumetion
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    # mpiidataset = MPIIDatasets(args.dataroot, train=True, transforms=transform)
    # train_dataset = GazeCaptureDatasets(args.dataroot, train=True, transforms=transform)

    # mpii_val_dataset = MPIIDatasets(args.val_dataroot, train=False, transforms=transform)
    val_dataset = GazeCaptureDatasets(args.val_dataroot,
                                      train=True,
                                      transforms=transform)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=args.val_batchsize,
                                shuffle=False,
                                num_workers=args.workers)

    # step 4: run
    val_loss, val_error = validate(args, val_dataloader, model, auxiliarynet,
                                   1)
    print("val_loss: '{}' val_error: '{}'".format(val_loss, val_error))
Пример #17
0
 def __init__(self, net='xception',feature_layer='b3',attention_layer='final',num_classes=2, M=8,mid_dims=256,\
 dropout_rate=0.5,drop_final_rate=0.5, pretrained=False,alpha=0.05,size=(380,380),margin=1,inner_margin=[0.01,0.02]):
     super(MAT, self).__init__()
     self.num_classes = num_classes
     self.M = M
     if 'xception' in net:
         self.net = xception(num_classes)
     elif net.split('-')[0] == 'efficientnet':
         self.net = EfficientNet.from_pretrained(net,
                                                 advprop=True,
                                                 num_classes=num_classes)
     self.feature_layer = feature_layer
     self.attention_layer = attention_layer
     with torch.no_grad():
         layers = self.net(torch.zeros(1, 3, size[0], size[1]))
     num_features = layers[self.feature_layer].shape[1]
     self.mid_dims = mid_dims
     if pretrained:
         a = torch.load(pretrained, map_location='cpu')
         keys = {
             i: a['state_dict'][i]
             for i in a.keys() if i.startswith('net')
         }
         if not keys:
             keys = a['state_dict']
         self.net.load_state_dict(keys, strict=False)
     self.attentions = AttentionMap(layers[self.attention_layer].shape[1],
                                    self.M)
     self.atp = AttentionPooling()
     self.texture_enhance = Texture_Enhance_v2(num_features, M)
     self.num_features = self.texture_enhance.output_features
     self.num_features_d = self.texture_enhance.output_features_d
     self.projection_local = nn.Sequential(
         nn.Linear(M * self.num_features, mid_dims), nn.Hardswish(),
         nn.Linear(mid_dims, mid_dims))
     self.project_final = nn.Linear(layers['final'].shape[1], mid_dims)
     self.ensemble_classifier_fc = nn.Sequential(
         nn.Linear(mid_dims * 2, mid_dims), nn.Hardswish(),
         nn.Linear(mid_dims, num_classes))
     self.auxiliary_loss = Auxiliary_Loss_v2(M, self.num_features_d,
                                             num_classes, alpha, margin,
                                             inner_margin)
     self.dropout = nn.Dropout2d(dropout_rate, inplace=True)
     self.dropout_final = nn.Dropout(drop_final_rate, inplace=True)
Пример #18
0
    def __init__(self,
                 num_classes,
                 network='efficientdet-d0',
                 D_bifpn=3,
                 W_bifpn=88,
                 D_class=3,
                 is_training=True,
                 threshold=0.01,
                 iou_threshold=0.5):
        super(EfficientDet, self).__init__()
        # self.backbone = EfficientNet.from_pretrained(MODEL_MAP[network])
        self.backbone = EfficientNet.get_network_from_name(MODEL_MAP[network])

        # print backbone parameters
        # params = list(self.backbone.named_parameters())
        # for param_key, param_value in params:
        #     print("{},   {}".format(param_key, param_value.shape))
        #
        # for features in self.backbone.get_list_features():
        #     print(features)

        self.is_training = is_training
        self.neck = BIFPN(in_channels=self.backbone.get_list_features()[-5:],
                          out_channels=W_bifpn,
                          stack=D_bifpn,
                          num_outs=5)

        self.bbox_head = RetinaHead(num_classes=num_classes,
                                    in_channels=W_bifpn)

        self.anchors = Anchors()
        self.regressBoxes = BBoxTransform()
        self.clipBoxes = ClipBoxes()
        self.threshold = threshold
        self.iou_threshold = iou_threshold
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
        self.freeze_bn()
        self.criterion = FocalLoss()
Пример #19
0
    def __init__(self,
                 num_classes,
                 network='efficientdet-d0',
                 D_bifpn=3,
                 W_bifpn=88,
                 D_class=3,
                 is_training=True,
                 threshold=0.5,
                 iou_threshold=0.5):
        super(EfficientDet, self).__init__()
        # self.efficientnet = EfficientNet.from_pretrained(MODEL_MAP[network])
        self.efficientnet = EfficientNet.from_name(
            MODEL_MAP[network], override_params={'num_classes': num_classes})
        self.is_training = is_training
        self.BIFPN = BIFPN(
            in_channels=self.efficientnet.get_list_features()[-5:],
            out_channels=W_bifpn,
            stack=D_bifpn,
            num_outs=5)
        self.regressionModel = RegressionModel(W_bifpn)
        self.classificationModel = ClassificationModel(W_bifpn,
                                                       num_classes=num_classes)
        self.anchors = Anchors()
        self.regressBoxes = BBoxTransform()
        self.clipBoxes = ClipBoxes()
        self.threshold = threshold
        self.iou_threshold = iou_threshold

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
        prior = 0.01

        self.classificationModel.output.weight.data.fill_(0)
        self.classificationModel.output.bias.data.fill_(-math.log(
            (1.0 - prior) / prior))
        self.regressionModel.output.weight.data.fill_(0)
        self.regressionModel.output.bias.data.fill_(0)
        self.freeze_bn()
    def __init__(self,
                 num_class=21,
                 levels=3,
                 num_channels=128,
                 model_name='efficientnet-b0'):
        super(EfficientDet, self).__init__()
        self.num_class = num_class
        self.levels = levels
        self.num_channels = num_channels
        self.efficientnet = EfficientNet.from_pretrained(model_name)

        self.cfg = (coco, voc)[num_class == 21]
        self.priorbox = PriorBox(self.cfg)
        self.priors = Variable(self.priorbox.forward(), volatile=True)
        self.num_anchor = 9
        self.class_module = list()
        self.regress_module = list()
        for _ in range(3, 8):
            self.class_module.append(
                nn.Sequential(
                    nn.Conv2d(in_channels=self.num_channels,
                              out_channels=64,
                              kernel_size=2,
                              stride=1),
                    nn.Conv2d(in_channels=64,
                              out_channels=self.num_anchor * num_class,
                              kernel_size=2,
                              stride=1)))
            self.regress_module.append(
                nn.Sequential(
                    nn.Conv2d(in_channels=self.num_channels,
                              out_channels=64,
                              kernel_size=2,
                              stride=1),
                    nn.Conv2d(in_channels=64,
                              out_channels=self.num_anchor * 4,
                              kernel_size=2,
                              stride=1)))
            self.BIFPN = BIFPN(in_channels=[40, 80, 112, 192, 320],
                               out_channels=self.num_channels,
                               num_outs=5)
            self.sigmoid = nn.Sigmoid()
Пример #21
0
class WeightEMA(object):
    def __init__(self, model, ema_model, lr = 0.002, alpha=0.999, \
                model_version = 'efficient', efficient_version = 'b0',
                num_classes=7, device = 'cuda'):
        self.model = model
        self.ema_model = ema_model
        self.alpha = alpha
        self.lr = lr
        self.device = device

        if model_version == 'resnet':
            self.tmp_model = WideResNet(num_classes=num_classes)
        else:
            self.tmp_model = EfficientNet(version=efficient_version,
                                          num_classes=num_classes)
        self.wd = 0.02 * self.lr

        for param, ema_param in zip(self.model.parameters(), \
                                    self.ema_model.parameters()):
            ema_param.data.copy_(param.data)

    def step(self, bn=False):
        self.model.to('cpu')
        self.ema_model.to('cpu')
        self.tmp_model.to('cpu')
        if bn:
            # copy batchnorm stats to ema model
            for ema_param, tmp_param in zip(self.ema_model.parameters(), \
                                            self.tmp_model.parameters()):

                tmp_param.data.copy_(ema_param.data.detach())

            self.ema_model.load_state_dict(self.model.state_dict())

            for ema_param, tmp_param in zip(self.ema_model.parameters(), \
                                            self.tmp_model.parameters()):
                ema_param.data.copy_(tmp_param.data.detach())
        else:
            one_minus_alpha = 1.0 - self.alpha
            for param, ema_param in zip(self.model.parameters(), \
                                        self.ema_model.parameters()):
                ema_param.data.mul_(self.alpha)
                ema_param.data.add_(param.data.detach() * one_minus_alpha)
                # customized weight decay
                param.data.mul_(1 - self.wd)
        self.model.to(self.device)
        self.ema_model.to(self.device)
        self.tmp_model.to(self.device)
Пример #22
0
    def __init__(self, backbone, out_channels=2, concat_input=True):
        super().__init__()

        self.model_name = backbone
        self.backbone = EfficientNet.from_name(backbone)
        self.concat_input = concat_input

        self.up_conv1 = up_conv(self.n_channels, 512)
        self.double_conv1 = double_conv(self.size[0], 512)
        self.up_conv2 = up_conv(512, 256)
        self.double_conv2 = double_conv(self.size[1], 256)
        self.up_conv3 = up_conv(256, 128)
        self.double_conv3 = double_conv(self.size[2], 128)
        self.up_conv4 = up_conv(128, 64)
        self.double_conv4 = double_conv(self.size[3], 64)

        if self.concat_input:
            self.up_conv_input = up_conv(64, 32)
            self.double_conv_input = double_conv(self.size[4], 32)

        self.final_conv = nn.Conv2d(self.size[5], out_channels, kernel_size=1)
Пример #23
0
def _infer(model: EfficientNet, img):
    # preprocess image
    aspect_ratio = img.size[0] / img.size[1]
    img = img.resize((int(224 * max(aspect_ratio, 1.0)),
                      int(224 * max(1.0 / aspect_ratio, 1.0))))

    img = np.array(img)
    y0, x0 = (np.asarray(img.shape)[:2] - 224) // 2
    img = img[y0:y0 + 224, x0:x0 + 224]

    # low level preprocess
    img = np.moveaxis(img, [2, 0, 1], [0, 1, 2])
    img = img.astype(np.float32)[:3].reshape(1, 3, 224, 224)
    img /= 255.0
    img -= np.array([0.485, 0.456, 0.406]).reshape((1, -1, 1, 1))
    img /= np.array([0.229, 0.224, 0.225]).reshape((1, -1, 1, 1))

    # run the net
    out = model.forward(Tensor(img)).cpu()
    class_id = np.argmax(out.data)
    return _LABELS[np.argmax(out.data)]
Пример #24
0
    def __init__(self):
        super(EfficientBase, self).__init__()
        self.model_name = 'efficientnet-b0'
        self.encoder = EfficientNet.from_name('efficientnet-b0')
        self.size = [1280, 80, 40, 24, 16]

        # sum
        self.decoder5 = self._make_deconv_layer(DecoderBlock,
                                                self.size[0],
                                                self.size[1],
                                                stride=2)
        self.decoder4 = self._make_deconv_layer(DecoderBlock,
                                                self.size[1],
                                                self.size[2],
                                                stride=2)
        self.decoder3 = self._make_deconv_layer(DecoderBlock,
                                                self.size[2],
                                                self.size[3],
                                                stride=2)
        self.decoder2 = self._make_deconv_layer(DecoderBlock,
                                                self.size[3],
                                                self.size[4],
                                                stride=2)
        self.decoder1 = self._make_deconv_layer(DecoderBlock, self.size[4],
                                                self.size[4])

        self.final = nn.Sequential(
            nn.ConvTranspose2d(16,
                               8,
                               kernel_size=3,
                               stride=2,
                               padding=1,
                               output_padding=1),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(8, 8, kernel_size=3, padding=1),
            nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(8, 2, kernel_size=1))
Пример #25
0
import cv2
import numpy as np
import torchvision.transforms as tfs
import matplotlib.pyplot as plt
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader

from models.efficientnet import EfficientNet
from models.efficientdet import EfficientDet
from models.loss import FocalLoss

mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
#model = EfficientNet.from_pretrained('efficientnet-b0',False,True)
parameters = torch.load('weights/efficientnet-b0.pth')
model = EfficientNet.from_name('efficientnet-b0')
parameters = [v for _, v in parameters.items()]
model_state_dict = model.state_dict()
for i, (k, v) in enumerate(model_state_dict.items()):
    model_state_dict[k] = parameters[i]
torch.save(model_state_dict, 'weights/efficientnet-b0.pth')
'''
data_dir = 'data'
imgs_path = [os.path.join(data_dir, f) for f in os.listdir(data_dir)]

imgs = [cv2.resize((cv2.imread(i)[...,::-1]/255 - mean)/std,(608,608)) for i in imgs_path]
imgs = torch.stack([torch.from_numpy(i.astype(np.float32)) for i in imgs], 0).permute(0, 3, 1, 2)

imgs = imgs[:4].cuda()
features = model.extract_features(imgs)
print(features.size())
Пример #26
0
  img /= np.array([0.229, 0.224, 0.225]).reshape((1,-1,1,1))

  # run the net
  out = model.forward(Tensor(img)).cpu()

  # if you want to look at the outputs
  """
  import matplotlib.pyplot as plt
  plt.plot(out.data[0])
  plt.show()
  """
  return out, retimg

if __name__ == "__main__":
  # instantiate my net
  model = EfficientNet(int(os.getenv("NUM", "0")))
  model.load_from_pretrained()

  # category labels
  import ast
  lbls = fetch("https://gist.githubusercontent.com/yrevar/942d3a0ac09ec9e5eb3a/raw/238f720ff059c1f82f368259d1ca4ffa5dd8f9f5/imagenet1000_clsidx_to_labels.txt")
  lbls = ast.literal_eval(lbls.decode('utf-8'))

  # load image and preprocess
  from PIL import Image
  url = sys.argv[1]
  if url == 'webcam':
    import cv2
    cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
    while 1:
Пример #27
0
def test_model(args):
    # create model
    num_classes = 2
    if args.arch == 'efficientnet_b0':
        if args.pretrained:
            model = EfficientNet.from_pretrained("efficientnet-b0",
                                                 quantize=args.quantize,
                                                 num_classes=num_classes)
        else:
            model = EfficientNet.from_name(
                "efficientnet-b0",
                quantize=args.quantize,
                override_params={'num_classes': num_classes})
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'mobilenet_v1':
        model = mobilenet_v1(quantize=args.quantize, num_classes=num_classes)
        model = torch.nn.DataParallel(model).cuda()

        if args.pretrained:
            checkpoint = torch.load(args.resume)
            state_dict = checkpoint['state_dict']

            if num_classes != 1000:
                new_dict = {
                    k: v
                    for k, v in state_dict.items() if 'fc' not in k
                }
                state_dict = new_dict

            res = model.load_state_dict(state_dict, strict=False)

            for missing_key in res.missing_keys:
                assert 'quantize' in missing_key or 'fc' in missing_key

    elif args.arch == 'mobilenet_v2':
        model = mobilenet_v2(pretrained=args.pretrained,
                             num_classes=num_classes,
                             quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'resnet18':
        model = resnet18(pretrained=args.pretrained,
                         num_classes=num_classes,
                         quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'resnet50':
        model = resnet50(pretrained=args.pretrained,
                         num_classes=num_classes,
                         quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'resnet152':
        model = resnet152(pretrained=args.pretrained,
                          num_classes=num_classes,
                          quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'resnet164':
        model = resnet_164(num_classes=num_classes, quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'vgg11':
        model = vgg11(pretrained=args.pretrained,
                      num_classes=num_classes,
                      quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'vgg19':
        model = vgg19(pretrained=args.pretrained,
                      num_classes=num_classes,
                      quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    else:
        logging.info('No such model.')
        sys.exit()

    if args.resume and not args.pretrained:
        if os.path.isfile(args.resume):
            logging.info('=> loading checkpoint `{}`'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            logging.info('=> loaded checkpoint `{}` (epoch: {})'.format(
                args.resume, checkpoint['epoch']))
        else:
            logging.info('=> no checkpoint found at `{}`'.format(args.resume))

    cudnn.benchmark = False
    test_loader = prepare_test_data(dataset=args.dataset,
                                    datadir=args.datadir,
                                    batch_size=args.batch_size,
                                    shuffle=False,
                                    num_workers=args.workers)
    criterion = nn.CrossEntropyLoss().cuda()

    with torch.no_grad():
        prec1 = validate(args, test_loader, model, criterion, 0)
Пример #28
0
    return total_flops


if __name__ == '__main__':
    if 'cifar10' in args.dataset:
        num_classes = 10
        input_res = 32
    elif 'cifar100' in args.dataset:
        num_classes = 100
        input_res = 32
    elif 'imagenet' in args.dataset:
        num_classes = 1000
        input_res = 224

    if args.arch == 'efficientnet_b0':
        model = EfficientNet.from_name(
            "efficientnet-b0", override_params={'num_classes': num_classes})

    elif args.arch == 'mobilenet_v1':
        model = mobilenet_v1(num_classes=num_classes)

    elif args.arch == 'mobilenet_v2':
        model = mobilenet_v2(num_classes=num_classes)

    elif args.arch == 'resnet18':
        model = resnet18(num_classes=num_classes)

    elif args.arch == 'resnet50':
        model = resnet50(num_classes=num_classes)

    elif args.arch == 'resnet152':
        model = resnet152(num_classes=num_classes)
Пример #29
0
 def __init__(self, net_type='efficientnet-b0', num_classes=2):
     super(efficientnet, self).__init__()
     self.net = EfficientNet.from_pretrained(net_type,
                                             num_classes=num_classes)
Пример #30
0
 def test_efficientnet(self):
     model = EfficientNet(0)
     X = np.zeros((BS, 3, 224, 224), dtype=np.float32)
     Y = np.zeros((BS), dtype=np.int32)
     train_one_step(model, X, Y)