Exemple #1
0
 def __init__(self,
              net='xception',
              feature_layer='b3',
              num_classes=2,
              dropout_rate=0.5,
              pretrained=False):
     super().__init__()
     self.num_classes = num_classes
     if 'xception' in net:
         self.net = xception(num_classes, escape=feature_layer)
     elif net.split('-')[0] == 'efficientnet':
         self.net = EfficientNet.from_pretrained(net,
                                                 advprop=True,
                                                 num_classes=num_classes,
                                                 escape=feature_layer)
     self.feature_layer = feature_layer
     with torch.no_grad():
         layers = self.net(torch.zeros(1, 3, 100, 100))
     num_features = layers[self.feature_layer].shape[1]
     if pretrained:
         a = torch.load(pretrained, map_location='cpu')
         keys = {
             i: a['state_dict'][i]
             for i in a.keys() if i.startswith('net')
         }
         if not keys:
             keys = a['state_dict']
         load_state(self.net, keys)
     self.pooling = nn.AdaptiveAvgPool2d(1)
     self.texture_enhance = Texture_Enhance_v2(num_features, 1)
     self.num_features = self.texture_enhance.output_features
     self.fc = nn.Linear(self.num_features, self.num_classes)
     self.dropout = nn.Dropout(dropout_rate)
    def __init__(self,
                 num_classes,
                 levels=3,
                 num_channels=128,
                 model_name='efficientdet-d0',
                 is_training=True,
                 threshold=0.5):
        super(EfficientDet, self).__init__()
        self.efficientnet = EfficientNet.from_pretrained(MODEL_MAP[model_name])
        self.is_training = is_training
        self.BIFPN = BIFPN(in_channels=self.efficientnet.get_list_features()[2:],
                                out_channels=256,
                                num_outs=5)
        self.regressionModel = RegressionModel(256)
        self.classificationModel = ClassificationModel(256, num_classes=num_classes)
        self.anchors = Anchors()
        self.regressBoxes = BBoxTransform()
        self.clipBoxes = ClipBoxes()
        self.threshold = threshold

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
        prior = 0.01
        
        self.classificationModel.output.weight.data.fill_(0)
        self.classificationModel.output.bias.data.fill_(-math.log((1.0-prior)/prior))
        self.regressionModel.output.weight.data.fill_(0)
        self.regressionModel.output.bias.data.fill_(0)
        self.freeze_bn()
Exemple #3
0
    def __init__(self, cfg = None, phase = 'train'):
        """
        :param cfg:  Network related settings.
        :param phase: train or test.
        """
        super(EfficientDet, self).__init__()
        self.phase = phase
        self.num_classes = 2
        self.base = EfficientNet.from_pretrained('efficientnet-b3')
        self.fpn_flag = True
        if self.fpn_flag:
            in_channels_list = [48,96,136,232]
            out_channels = 128
            self.ssh1 = DetectModule(out_channels)
            self.ssh2 = DetectModule(out_channels)
            self.ssh3 = DetectModule(out_channels)
            self.ssh4 = DetectModule(out_channels)
 
            '''
            self.ssh1 = SSH(out_channels, out_channels)
            self.ssh2 = SSH(out_channels, out_channels)
            self.ssh3 = SSH(out_channels, out_channels)
            self.ssh4 = SSH(out_channels, out_channels)
            '''
 
            self.fpn = FPN(in_channels_list, out_channels)
 
        self.loc, self.conf, self.landm = self.multibox(self.num_classes);
    def __init__(self,
                 num_classes,
                 network='efficientdet-d0',
                 D_bifpn=3,
                 W_bifpn=88,
                 D_class=3,
                 is_training=True,
                 threshold=0.5,
                 iou_threshold=0.5,
                 gpu=1):
        super(EfficientDet, self).__init__()
        self.backbone = EfficientNet.from_pretrained(MODEL_MAP[network])
        self.is_training = is_training
        self.neck = BIFPN(in_channels=self.backbone.get_list_features()[-5:],
                          out_channels=W_bifpn,
                          stack=D_bifpn,
                          num_outs=5)
        self.bbox_head = RetinaHead(num_classes=num_classes,
                                    in_channels=W_bifpn)

        self.anchors = Anchors()
        self.regressBoxes = BBoxTransform()
        self.clipBoxes = ClipBoxes()
        self.threshold = threshold
        self.iou_threshold = iou_threshold
        self.gpu = gpu
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
        self.freeze_bn()
        self.criterion = FocalLoss()
def EfficientNet_B8(pretrained=True, num_class=5, onehot=1, onehot2=0):
    if pretrained:
        model = EfficientNet.from_pretrained('efficientnet-b8', num_classes=num_class, onehot=onehot, onehot2=onehot2)
        for name, param in model.named_parameters():
            if 'fc' not in name:
                param.requires_grad = False
    else:
        model = EfficientNet.from_name('efficientnet-b8', onehot=onehot, onehot2=onehot2)
    model.name = "EfficientNet_B8"    
    print("EfficientNet B7 Loaded!")

    return model
def EfficientNet_B6(pretrained=True, num_class=5, advprop=False, onehot=1, onehot2=0):
    if pretrained:
        model = EfficientNet.from_pretrained('efficientnet-b6', num_classes=num_class, onehot=onehot, onehot2=onehot2)
        for name, param in model.named_parameters():
            if 'fc' not in name :# and 'blocks.24' not in name and 'blocks.25' not in name
                param.requires_grad = False
    else:
        model = EfficientNet.from_name('efficientnet-b6', onehot=onehot, onehot2=onehot2)
    
    model.name = "EfficientNet_B6"
    print("EfficientNet B6 Loaded!")

    return model
Exemple #7
0
    def __init__(self,
                 num_classes=21,
                 levels=3,
                 num_channels=128,
                 model_name='efficientnet-b0'):
        super(EfficientDet, self).__init__()
        self.efficientnet = EfficientNet.from_pretrained(model_name)

        self.BIFPN = BIFPN(in_channels=[40, 80, 112, 192, 320],
                                out_channels=256,
                                num_outs=5)
        self.regressionModel = RegressionModel(256)
        self.classificationModel = ClassificationModel(256, num_classes=num_classes)
        self.anchors = Anchors()
Exemple #8
0
    def __init__(self,
                num_class = 21,
                levels = 3,
                num_channels = 128,
                model_name = 'efficientnet-b0'):
        super(EfficientDet, self).__init__()
        self.num_class = num_class 
        self.levels = levels
        self.num_channels = num_channels
        self.efficientnet = EfficientNet.from_pretrained(model_name)
        print('efficientnet: ', self.efficientnet)
        self.bifpn = BiFPN(num_channels = self.num_channels)

        self.cfg = (coco, voc)[num_class == 21]
        self.priorbox = PriorBox(self.cfg)
        self.priors = Variable(self.priorbox.forward(), volatile=True)
Exemple #9
0
 def __init__(self, net='xception',feature_layer='b3',attention_layer='final',num_classes=2, M=8,mid_dims=256,\
 dropout_rate=0.5,drop_final_rate=0.5, pretrained=False,alpha=0.05,size=(380,380),margin=1,inner_margin=[0.01,0.02]):
     super(MAT, self).__init__()
     self.num_classes = num_classes
     self.M = M
     if 'xception' in net:
         self.net = xception(num_classes)
     elif net.split('-')[0] == 'efficientnet':
         self.net = EfficientNet.from_pretrained(net,
                                                 advprop=True,
                                                 num_classes=num_classes)
     self.feature_layer = feature_layer
     self.attention_layer = attention_layer
     with torch.no_grad():
         layers = self.net(torch.zeros(1, 3, size[0], size[1]))
     num_features = layers[self.feature_layer].shape[1]
     self.mid_dims = mid_dims
     if pretrained:
         a = torch.load(pretrained, map_location='cpu')
         keys = {
             i: a['state_dict'][i]
             for i in a.keys() if i.startswith('net')
         }
         if not keys:
             keys = a['state_dict']
         self.net.load_state_dict(keys, strict=False)
     self.attentions = AttentionMap(layers[self.attention_layer].shape[1],
                                    self.M)
     self.atp = AttentionPooling()
     self.texture_enhance = Texture_Enhance_v2(num_features, M)
     self.num_features = self.texture_enhance.output_features
     self.num_features_d = self.texture_enhance.output_features_d
     self.projection_local = nn.Sequential(
         nn.Linear(M * self.num_features, mid_dims), nn.Hardswish(),
         nn.Linear(mid_dims, mid_dims))
     self.project_final = nn.Linear(layers['final'].shape[1], mid_dims)
     self.ensemble_classifier_fc = nn.Sequential(
         nn.Linear(mid_dims * 2, mid_dims), nn.Hardswish(),
         nn.Linear(mid_dims, num_classes))
     self.auxiliary_loss = Auxiliary_Loss_v2(M, self.num_features_d,
                                             num_classes, alpha, margin,
                                             inner_margin)
     self.dropout = nn.Dropout2d(dropout_rate, inplace=True)
     self.dropout_final = nn.Dropout(drop_final_rate, inplace=True)
    def __init__(self,
                 num_class=21,
                 levels=3,
                 num_channels=128,
                 model_name='efficientnet-b0'):
        super(EfficientDet, self).__init__()
        self.num_class = num_class
        self.levels = levels
        self.num_channels = num_channels
        self.efficientnet = EfficientNet.from_pretrained(model_name)

        self.cfg = (coco, voc)[num_class == 21]
        self.priorbox = PriorBox(self.cfg)
        self.priors = Variable(self.priorbox.forward(), volatile=True)
        self.num_anchor = 9
        self.class_module = list()
        self.regress_module = list()
        for _ in range(3, 8):
            self.class_module.append(
                nn.Sequential(
                    nn.Conv2d(in_channels=self.num_channels,
                              out_channels=64,
                              kernel_size=2,
                              stride=1),
                    nn.Conv2d(in_channels=64,
                              out_channels=self.num_anchor * num_class,
                              kernel_size=2,
                              stride=1)))
            self.regress_module.append(
                nn.Sequential(
                    nn.Conv2d(in_channels=self.num_channels,
                              out_channels=64,
                              kernel_size=2,
                              stride=1),
                    nn.Conv2d(in_channels=64,
                              out_channels=self.num_anchor * 4,
                              kernel_size=2,
                              stride=1)))
            self.BIFPN = BIFPN(in_channels=[40, 80, 112, 192, 320],
                               out_channels=self.num_channels,
                               num_outs=5)
            self.sigmoid = nn.Sigmoid()
Exemple #11
0
def test_model(args):
    # create model
    num_classes = 2
    if args.arch == 'efficientnet_b0':
        if args.pretrained:
            model = EfficientNet.from_pretrained("efficientnet-b0",
                                                 quantize=args.quantize,
                                                 num_classes=num_classes)
        else:
            model = EfficientNet.from_name(
                "efficientnet-b0",
                quantize=args.quantize,
                override_params={'num_classes': num_classes})
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'mobilenet_v1':
        model = mobilenet_v1(quantize=args.quantize, num_classes=num_classes)
        model = torch.nn.DataParallel(model).cuda()

        if args.pretrained:
            checkpoint = torch.load(args.resume)
            state_dict = checkpoint['state_dict']

            if num_classes != 1000:
                new_dict = {
                    k: v
                    for k, v in state_dict.items() if 'fc' not in k
                }
                state_dict = new_dict

            res = model.load_state_dict(state_dict, strict=False)

            for missing_key in res.missing_keys:
                assert 'quantize' in missing_key or 'fc' in missing_key

    elif args.arch == 'mobilenet_v2':
        model = mobilenet_v2(pretrained=args.pretrained,
                             num_classes=num_classes,
                             quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'resnet18':
        model = resnet18(pretrained=args.pretrained,
                         num_classes=num_classes,
                         quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'resnet50':
        model = resnet50(pretrained=args.pretrained,
                         num_classes=num_classes,
                         quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'resnet152':
        model = resnet152(pretrained=args.pretrained,
                          num_classes=num_classes,
                          quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'resnet164':
        model = resnet_164(num_classes=num_classes, quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'vgg11':
        model = vgg11(pretrained=args.pretrained,
                      num_classes=num_classes,
                      quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    elif args.arch == 'vgg19':
        model = vgg19(pretrained=args.pretrained,
                      num_classes=num_classes,
                      quantize=args.quantize)
        model = torch.nn.DataParallel(model).cuda()

    else:
        logging.info('No such model.')
        sys.exit()

    if args.resume and not args.pretrained:
        if os.path.isfile(args.resume):
            logging.info('=> loading checkpoint `{}`'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            logging.info('=> loaded checkpoint `{}` (epoch: {})'.format(
                args.resume, checkpoint['epoch']))
        else:
            logging.info('=> no checkpoint found at `{}`'.format(args.resume))

    cudnn.benchmark = False
    test_loader = prepare_test_data(dataset=args.dataset,
                                    datadir=args.datadir,
                                    batch_size=args.batch_size,
                                    shuffle=False,
                                    num_workers=args.workers)
    criterion = nn.CrossEntropyLoss().cuda()

    with torch.no_grad():
        prec1 = validate(args, test_loader, model, criterion, 0)
Exemple #12
0
def main():
    args = parser.parse_args()
    log = logger(args)
    log.write('V' * 50 + " configs " + 'V' * 50 + '\n')
    log.write(args)
    log.write('')
    log.write('Λ' * 50 + " configs " + 'Λ' * 50 + '\n')

    # load data
    input_size = (224, 224)
    dataset = DataLoader(args, input_size)
    train_data, val_data = dataset.load_data()

    num_classes = dataset.num_classes
    classes = dataset.classes
    log.write('\n\n')
    log.write('V' * 50 + " data " + 'V' * 50 + '\n')
    log.info('success load data.')
    log.info('num classes: %s' % num_classes)
    log.info('classes: ' + str(classes) + '\n')
    log.write('Λ' * 50 + " data " + 'Λ' * 50 + '\n')

    # Random seed
    if args.manual_seed is None:
        args.manual_seed = random.randint(1, 10000)
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    np.random.seed(args.manual_seed)
    log.write('random seed is %s' % args.manual_seed)

    # pretrained or not
    log.write('\n\n')
    log.write('V' * 50 + " model " + 'V' * 50 + '\n')
    if args.pretrained:
        log.info("using pre-trained model")
    else:
        log.info("creating model from initial")

    # model
    log.info('using model: %s' % args.arch)
    log.write('')
    log.write('Λ' * 50 + " model " + 'Λ' * 50 + '\n')

    # resume model
    if args.resume:
        log.info('using resume model: %s' % args.resume)
        states = torch.load(args.resume)
        model = states['model']
        model.load_state_dict(states['state_dict'])
    else:
        log.info('not using resume model')
        if args.arch.startswith('dla'):
            model = eval(args.arch)(args.pretrained, num_classes)

        elif args.arch.startswith('efficientnet'):
            if args.pretrained:
                model = EfficientNet.from_pretrained(args.arch,
                                                     num_classes=num_classes)
            else:
                model = EfficientNet.from_name(args.arch,
                                               num_classes=num_classes)
        else:
            model = make_model(model_name=args.arch,
                               num_classes=num_classes,
                               pretrained=args.pretrained,
                               pool=nn.AdaptiveAvgPool2d(output_size=1),
                               classifier_factory=None,
                               input_size=input_size,
                               original_model_state_dict=None,
                               catch_output_size_exception=True)

    # cuda
    have_cuda = torch.cuda.is_available()
    use_cuda = args.use_gpu and have_cuda
    log.info('using cuda: %s' % use_cuda)
    if have_cuda and not use_cuda:
        log.info(
            '\nWARNING: found gpu but not use, you can switch it on by: -ug or --use-gpu\n'
        )

    multi_gpus = False
    if use_cuda:
        torch.backends.cudnn.benchmark = True
        if args.multi_gpus:
            gpus = torch.cuda.device_count()
            multi_gpus = gpus > 1

    if multi_gpus:
        log.info('using multi gpus, found %d gpus.' % gpus)
        model = torch.nn.DataParallel(model).cuda()
    elif use_cuda:
        model = model.cuda()

    # criterian
    log.write('\n\n')
    log.write('V' * 50 + " criterion " + 'V' * 50 + '\n')
    if args.label_smoothing > 0 and args.mixup == 1:
        criterion = CrossEntropyWithLabelSmoothing()
        log.info('using label smoothing criterion')

    elif args.label_smoothing > 0 and args.mixup < 1:
        criterion = CrossEntropyWithMixup()
        log.info('using label smoothing and mixup criterion')

    elif args.mixup < 1 and not args.label_smoothing == 0:
        criterion = CrossEntropyWithMixup()
        log.info('using mixup criterion')

    else:
        criterion = nn.CrossEntropyLoss()
        log.info('using normal cross entropy criterion')

    if use_cuda:
        criterion = criterion.cuda()

    log.write('using criterion: %s' % str(criterion))
    log.write('')
    log.write('Λ' * 50 + " criterion " + 'Λ' * 50 + '\n')
    # optimizer
    log.write('\n\n')
    log.write('V' * 50 + " optimizer " + 'V' * 50 + '\n')
    if args.linear_scaling:
        args.lr = 0.1 * args.train_batch / 256
    log.write('initial lr: %4f\n' % args.lr)
    if args.no_bias_decay:
        log.info('using no bias weight decay')
        param_optimizer = list(model.named_parameters())
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [{
            'params': [
                p for n, p in param_optimizer
                if not any(nd in n for nd in no_decay)
            ],
            'weight_decay':
            args.weight_decay
        }, {
            'params':
            [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
            'weight_decay':
            0.0
        }]
        optimizer = optim.SGD(optimizer_grouped_parameters,
                              lr=args.lr,
                              momentum=args.momentum)

    else:
        log.info('using bias weight decay')
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)

    if args.resume:
        optimizer.load_state_dict(states['optimizer'])
    log.write('using optimizer: %s' % str(optimizer))
    log.write('')
    log.write('Λ' * 50 + " optimizer " + 'Λ' * 50 + '\n')
    # low precision
    use_low_precision_training = args.low_precision_training
    if use_low_precision_training:
        from apex import amp
        model, optimizer = amp.initialize(model, optimizer, opt_level='O1')

    # lr scheduler
    iters_per_epoch = int(np.ceil(len(train_data) / args.train_batch))
    total_iters = iters_per_epoch * args.epochs
    log.write('\n\n')
    log.write('V' * 50 + " lr_scheduler " + 'V' * 50 + '\n')
    if args.warmup:
        log.info('using warmup scheduler, warmup epochs: %d' %
                 args.warmup_epochs)
        scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(
            optimizer, iters_per_epoch * args.warmup_epochs, eta_min=1e-6)
    elif args.cosine:
        log.info('using cosine lr scheduler')
        scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                         T_max=total_iters)

    else:
        log.info('using normal lr decay scheduler')
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                         factor=0.5,
                                                         patience=10,
                                                         min_lr=1e-6,
                                                         mode='min')

    log.write('using lr scheduler: %s' % str(scheduler))
    log.write('')
    log.write('Λ' * 50 + " lr_scheduler " + 'Λ' * 50 + '\n')

    log.write('\n\n')
    log.write('V' * 50 + " training start " + 'V' * 50 + '\n')
    best_acc = 0
    start = time.time()
    log.info('\nstart training ...')
    for epoch in range(1, args.epochs + 1):
        lr = optimizer.param_groups[-1]['lr']
        train_loss, train_acc = train_one_epoch(
            log, scheduler, train_data, model, criterion, optimizer, use_cuda,
            use_low_precision_training, args.label_smoothing, args.mixup)
        test_loss, test_acc = val_one_epoch(log, val_data, model, criterion,
                                            use_cuda)
        end = time.time()
        log.info(
            'epoch: [%d / %d], time spent(s): %.2f, mean time: %.2f, lr: %.4f, train loss: %.4f, train acc: %.4f, '
            'test loss: %.4f, test acc: %.4f' %
            (epoch, args.epochs, end - start, (end - start) / epoch, lr,
             train_loss, train_acc, test_loss, test_acc))
        states = dict()
        states['arch'] = args.arch
        if multi_gpus:
            states['model'] = model.module
            states['state_dict'] = model.module.state_dict()
        else:
            states['model'] = model
            states['state_dict'] = model.state_dict()
        states['optimizer'] = optimizer.state_dict()
        states['test_acc'] = test_acc
        states['train_acc'] = train_acc
        states['epoch'] = epoch
        states['classes'] = classes
        is_best = False
        if test_acc > best_acc:
            is_best = True
            log.save_checkpoint(states, is_best)
        else:
            log.save_checkpoint(states, is_best)

    log.write('\ntraining finished.')
    log.write('Λ' * 50 + " training finished " + 'Λ' * 50 + '\n')
    log.log_file.close()
    log.writer.close()
Exemple #13
0
def main(args):
    # Step 1: parse args config
    logging.basicConfig(
        format=
        '[%(asctime)s] [p%(process)s] [%(pathname)s:%(lineno)d] [%(levelname)s] %(message)s',
        level=logging.INFO,
        handlers=[
            logging.FileHandler(args.log_file, mode='w'),
            logging.StreamHandler()
        ])
    print_args(args)

    # Step 2: model, criterion, optimizer, scheduler
    # model = MobileNetV3(mode='large').to(args.device)

    if args.pretrained:
        model = EfficientNet.from_pretrained(args.arch).to(args.device)
        print("=> using pre-trained model '{}'".format(args.arch))
    else:
        print("=> creating model '{}'".format(args.arch))
        model = EfficientNet.from_name(args.arch).to(args.device)
    auxiliarynet = AuxiliaryNet().to(args.device)
    # auxiliarynet = AuxiliaryNet()
    criterion = GazeLoss()
    optimizer = torch.optim.Adam([{
        'params': model.parameters()
    }, {
        'params': auxiliarynet.parameters()
    }],
                                 lr=args.base_lr,
                                 weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode='min',
        patience=args.lr_patience,
        verbose=True,
        min_lr=args.min_lr)

    # optionally resume from a checkpoint
    min_error = 1e6
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            min_error = checkpoint['error']
            model.load_state_dict(checkpoint['model'])
            auxiliarynet.load_state_dict(checkpoint['aux'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {}) {:.3f}".format(
                args.resume, checkpoint['epoch'], min_error))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # step 3: data
    # argumetion
    # transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
    transform = transforms.Compose([
        transforms.ToTensor(),
    ])

    # train_dataset = MPIIDatasets(args.dataroot, train=True, transforms=transform)
    train_dataset = GazeCaptureDatasets(args.dataroot,
                                        train=True,
                                        transforms=transform)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.train_batchsize,
                                  shuffle=True,
                                  num_workers=args.workers,
                                  drop_last=True)

    # val_dataset = MPIIDatasets(args.val_dataroot, train=False, transforms=transform)
    val_dataset = GazeCaptureDatasets(args.val_dataroot,
                                      train=False,
                                      transforms=transform)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=args.val_batchsize,
                                shuffle=False,
                                num_workers=args.workers)

    # step 4: run
    writer = SummaryWriter(args.tensorboard)
    for epoch in range(args.start_epoch, args.end_epoch + 1):
        train_loss, train_error = train(args, train_dataloader, model,
                                        auxiliarynet, criterion, optimizer,
                                        epoch)

        val_loss, val_error = validate(args, val_dataloader, model,
                                       auxiliarynet, criterion, epoch)
        filename = os.path.join(str(args.snapshot),
                                "checkpoint_epoch_" + str(epoch) + '.pth.tar')
        is_best = min_error > val_error
        min_error = min(min_error, val_error)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model': model.state_dict(),
                'aux': auxiliarynet.state_dict(),
                'optimizer': optimizer.state_dict(),
                'error': min_error,
            }, is_best, filename)
        scheduler.step(val_loss)
        writer.add_scalars('data/error', {
            'val error': val_error,
            'train error ': train_error
        }, epoch)
        writer.add_scalars('data/loss', {
            'val loss': val_loss,
            'train loss': train_loss
        }, epoch)
    writer.close()
import torch 
from models import EfficientDet
from models.efficientnet import EfficientNet

if __name__ == '__main__':

    inputs = torch.randn(5, 3, 512, 512)
    
    # Test EfficientNet
    model = EfficientNet.from_pretrained('efficientnet-b0')
    inputs = torch.randn(4, 3, 512, 512)
    P = model(inputs)
    for idx, p in enumerate(P):
        print('P{}: {}'.format(idx, p.size()))

    # print('model: ', model) 



    # Test inference
    model = EfficientDet(num_classes=20, is_training=False)
    output = model(inputs)
    for out in output:
        print(out.size())
Exemple #15
0
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res


data_dir = '/media/zhoun/新加卷1/data/ILSVRC/val'

import platform
if platform.system() == 'Windows':
    data_dir = r'E:\data\ILSVRC\val'
model_path = 'weights/efficient-b0.pth'

img_tfs = tfs.Compose([
    tfs.Resize((224, 224)),
    tfs.ToTensor(),
    tfs.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

if __name__ == '__main__':

    valset = ImageFolder(data_dir, transform=img_tfs)
    loader = DataLoader(valset, 100, False, num_workers=6, pin_memory=True)
    criterion = torch.nn.CrossEntropyLoss()
    net = EfficientNet.from_pretrained('efficientnet-b0', False)
    net.load_state_dict(torch.load('weights/efficientnet-b0.pth'))

    top1 = validate(loader, net, criterion)
Exemple #16
0
        .split_from_df()
        .label_from_df(cols='Detected',label_cls=FloatList)
        .transform(tfms)
        .databunch(bs=bs,num_workers=4)
        .normalize(IMAGE_STATS_GLOBAL2)
       )


from fastai.vision.models import resnet50
from models.efficientnet import EfficientNet
#making model
arch = 'efficientnet-b0'
model_name = f'{arch}-v1'
# Parameters for the entire model (stem, all blocks, and head)

md_ef = EfficientNet.from_pretrained(arch, num_classes=1, dropout_rate=0.5)
# md_ef = resnet50(pretrained=False, num_classes=1)

learn = Learner(data, md_ef, opt_func=optar,
                metrics = [accuracy_thresh],
                model_dir='fastai-class1').to_fp16()
learn.path = Path(DATA_BASE_PATH)


# First
learn.unfreeze()
learn.fit_one_cycle(10, max_lr=1e-2)
learn.save(f'{model_name}')


# First
Exemple #17
0
 def __init__(self, net_type='efficientnet-b0', num_classes=2):
     super(efficientnet, self).__init__()
     self.net = EfficientNet.from_pretrained(net_type,
                                             num_classes=num_classes)
Exemple #18
0
    def build_backbone(self,
                       base_model_name: str,
                       pretrained: bool,
                       finetune: bool,
                       layer_freezed=3,
                       num_classes: int = 4):
        base_model_accepted = [
            "mobilenetv2",
            "vgg16",
            "resnet18",
            "resnet50",
            "resnext50",
            "seresnext50",
            "seresnext101",
        ]

        efficientnetns = ["efficientnetnsb" + str(i) for i in range(1, 8)]
        efficientnet = ["efficientnetb" + str(i) for i in range(1, 8)]
        base_model_accepted += efficientnetns + efficientnet

        # Mobilenet v2
        if base_model_name == "mobilenetv2":
            backbone = torchvision.models.mobilenet_v2(pretrained).features
            if finetune:
                self.set_grad_for_finetunning(backbone, 7)
            num_ftrs = backbone.classifier[-1].in_features
            backbone.classifier[-1] = torch.nn.Linear(num_ftrs, num_classes)
        # VGG 16
        elif base_model_name == "vgg16":
            backbone = torchvision.models.vgg16(pretrained).features
            if finetune:
                self.set_grad_for_finetunning(backbone, 10)
            num_ftrs = backbone.classifier[-1].in_features
            backbone.classifier[-1] = torch.nn.Linear(num_ftrs, num_classes)
        # ResNet 18
        elif base_model_name == "resnet18":
            backbone = torchvision.models.resnet18(pretrained)
            if finetune:
                self.set_grad_for_finetunning(backbone, 7)
            num_ftrs = backbone.fc.in_features
            backbone.fc = torch.nn.Linear(num_ftrs, num_classes)
        # ResNet 50
        elif base_model_name == "resnet50":
            backbone = torchvision.models.resnet50(pretrained)
            if finetune:
                self.set_grad_for_finetunning(backbone, 7)
            num_ftrs = backbone.fc.in_features
            backbone.fc = torch.nn.Linear(num_ftrs, num_classes)
        # ResNext 50
        elif base_model_name == "resnext50":
            backbone = torchvision.models.resnext50_32x4d(pretrained)
            if finetune:
                self.set_grad_for_finetunning(backbone, 7)
            num_ftrs = backbone.fc.in_features
            backbone.fc = torch.nn.Linear(num_ftrs, num_classes)
        # EfficientNet
        elif base_model_name[:-1] == "efficientnetb":
            n = base_model_name[-1]
            backbone = CustomEfficientNet.from_pretrained("efficientnet-b" +
                                                          str(n))
            if finetune:
                self.set_grad_for_finetunning(backbone, 3)
            num_ftrs = backbone._fc.in_features
            backbone._fc = torch.nn.Linear(num_ftrs, num_classes)
        # EfficientNet Noisy Student
        elif base_model_name[:-1] == "efficientnetnsb":
            n = base_model_name[-1]
            backbone = timm.create_model(f"tf_efficientnet_b{n}_ns",
                                         num_classes=4,
                                         pretrained=True)
            if finetune:
                self.set_grad_for_finetunning(backbone, layer_freezed)
            num_ftrs = backbone.classifier.in_features
            backbone.classifier = torch.nn.Linear(num_ftrs, num_classes)
        # SE ResNeXt50
        elif base_model_name == "seresnext50":
            backbone = pretrainedmodels.se_resnext50_32x4d()
            if finetune:
                self.set_grad_for_finetunning(backbone, 3)
            num_ftrs = backbone.last_linear.in_features
            backbone.last_linear = torch.nn.Linear(num_ftrs, num_classes)
        # SE ResNeXt101
        elif base_model_name == "seresnext101":
            backbone = pretrainedmodels.se_resnext101_32x4d()
            if finetune:
                self.set_grad_for_finetunning(backbone, 3)
            num_ftrs = backbone.last_linear.in_features
            backbone.last_linear = torch.nn.Linear(num_ftrs, num_classes)
        else:
            print("Backbone model should be one of the following list: ")
            for name in base_model_accepted:
                print("     - {}".format(name))
            raise NotImplementedError
        return backbone