示例#1
0
for i in range(5):
    print(f'{i}th check')
    test_image_dataset = data_fraction(
        datasets.ImageFolder(
            INPUT_PATH, (new_transform if GREEN_ESCAPE else test_transform)))
    test_set_size = len(test_image_dataset)
    max_batch_idx = test_set_size // BATCH_SIZE

    test_image_loaded = DataLoader(test_image_dataset,
                                   batch_size=BATCH_SIZE,
                                   shuffle=True,
                                   num_workers=4)

    device = torch.device("cuda:0")
    model = EfficientNet.from_pretrained(f'efficientnet-b{effi_version}',
                                         num_classes=num_classes)
    model.load_state_dict(torch.load(WEIGHTS_FILE))
    model.to(device)

    criterion = nn.CrossEntropyLoss()
    ''' for train, not here'''
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
    ''' inference run'''
    since = time.time()
    running_loss = 0.0
    running_corrects = 0
    for batch_idx, (inputs, labels) in enumerate(test_image_loaded):
        inputs = inputs.to(device)
        labels = labels.to(device)
示例#2
0
    image_size = EfficientNet.get_image_size(model_name)  # 224

    # Open image
    img = Image.open(
        '../datasets/ilsvrc2012/images/val/ILSVRC2012_val_00000001.JPEG')
    # Preprocess image
    tfms = transforms.Compose([
        transforms.Resize(image_size),
        transforms.CenterCrop(image_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])
    img = tfms(img).unsqueeze(0)
    print(img.shape)
    # Classify with EfficientNet
    model = EfficientNet.from_pretrained(model_name)
    model.eval()
    with torch.no_grad():
        print('predicting')
        logits = model(img)
        print(type(logits))
        print(logits.shape)
    preds = torch.topk(logits, k=5).indices.squeeze(0)
    print(preds)

    # parser = argparse.ArgumentParser()
    # parser.add_argument(
    #     "--batch_size", "-batch_size", help="Batch size", type=int, default=128
    # )
    # args = parser.parse_args()
    # print(args)
示例#3
0
                                  batch_size=config['batch_size'],
                                  shuffle=True,
                                  num_workers=8,
                                  drop_last=True,
                                  pin_memory=True)

    val_dataset = IDRND_dataset(mode=config['mode'].replace('train', 'val'),
                                use_face_detection=False)
    val_loader = DataLoader(val_dataset,
                            batch_size=config['batch_size'],
                            shuffle=True,
                            num_workers=4,
                            drop_last=False)

    # model = Model(base_model = fishnet99())
    model = Model(base_model=EfficientNet.from_pretrained('efficientnet-b3'))
    #model = Model(base_model=resnet34(pretrained=True))
    summary(model, (3, 224, 224), device="cpu")

    dataowner = DataOwner(train_loader, val_loader, None)
    # criterion = torch.nn.BCELoss()
    # criterion = WeightedBCELoss(weights=[0.49, 0.51]) Не обучается
    criterion = FocalLoss()

    shutil.rmtree('../output/logs')
    os.mkdir('../output/logs')

    keker = Keker(
        model=model,
        dataowner=dataowner,
        criterion=criterion,
def main(args):
    logger = bit_common.setup_logger(args)

    # Lets cuDNN benchmark conv implementations and choose the fastest.
    # Only good if sizes stay the same within the main loop!
    torch.backends.cudnn.benchmark = True

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    logger.info(f"Going to train on {device}")

    classes = 5

    train_set, valid_set, train_loader, valid_loader = mktrainval(args, logger)

    logger.info(f"Loading model from {args.model}.npz")
    #model = models.KNOWN_MODELS[args.model](head_size=classes, zero_head=True)
    #model.load_from(np.load(f"{args.model}.npz"))

    model = EfficientNet.from_pretrained(args.model, num_classes=classes)
    #model._fc = torch.nn.Linear(in_features=model._fc.in_features, out_features=classes)
    logger.info("Moving model onto all GPUs")
    model = torch.nn.DataParallel(model)

    # Optionally resume from a checkpoint.
    # Load it to CPU first as we'll move the model to GPU later.
    # This way, we save a little bit of GPU memory when loading.

    # Note: no weight-decay!
    optim = torch.optim.SGD(model.parameters(), lr=0.003, momentum=0.9)
    savename = pjoin(args.logdir, args.name, "efficientdet.pth.tar")
    # Resume fine-tuning if we find a saved model.

    model = model.to(device)
    optim.zero_grad()

    model.train()
    #mixup = bit_hyperrule.get_mixup(len(train_set))
    mixup = -1
    cri = torch.nn.CrossEntropyLoss().to(device)

    logger.info("Starting training!")
    chrono = lb.Chrono()
    accum_steps = 0
    mixup_l = np.random.beta(mixup, mixup) if mixup > 0 else 1
    end = time.time()

    with lb.Uninterrupt() as u:
        for x, y in recycle(train_loader):
            # measure data loading time, which is spent in the `for` statement.
            chrono._done("load", time.time() - end)

            if u.interrupted:
                break

            # Schedule sending to GPU(s)
            x = x.to(device, non_blocking=True)
            y = y.to(device, non_blocking=True)

            # Update learning-rate, including stop training if over.
            lr = bit_hyperrule.get_lr(step, len(train_set), args.base_lr)
            if lr is None:
                break
            for param_group in optim.param_groups:
                param_group["lr"] = lr

            if mixup > 0.0:
                x, y_a, y_b = mixup_data(x, y, mixup_l)

            # compute output
            with chrono.measure("fprop"):
                logits = model(x)
                if mixup > 0.0:
                    c = mixup_criterion(cri, logits, y_a, y_b, mixup_l)
                else:
                    c = cri(logits, y)
                c_num = float(
                    c.data.cpu().numpy())  # Also ensures a sync point.

            # Accumulate grads
            with chrono.measure("grads"):
                (c / args.batch_split).backward()
                accum_steps += 1

            accstep = f" ({accum_steps}/{args.batch_split})" if args.batch_split > 1 else ""
            logger.info(
                f"[step {step}{accstep}]: loss={c_num:.5f} (lr={lr:.1e})")  # pylint: disable=logging-format-interpolation
            logger.flush()

            # Update params
            if accum_steps == args.batch_split:
                with chrono.measure("update"):
                    optim.step()
                    optim.zero_grad()
                step += 1
                accum_steps = 0
                # Sample new mixup ratio for next batch
                mixup_l = np.random.beta(mixup, mixup) if mixup > 0 else 1

                # Run evaluation and save the model.
                if args.eval_every and step % args.eval_every == 0:
                    run_eval(model, valid_loader, device, chrono, logger, step)
                    if args.save:
                        torch.save(
                            {
                                "step": step,
                                "model": model.state_dict(),
                                "optim": optim.state_dict(),
                            }, savename)

            end = time.time()

        # Final eval at end of training.
        run_eval(model, valid_loader, device, chrono, logger, step='end')

    logger.info(f"Timings:\n{chrono}")
示例#5
0
import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F

from efficientnet_pytorch import EfficientNet

model = EfficientNet.from_pretrained('efficientnet-b3')


class RFB(nn.Module):
    def __init__(self, in_channel, out_channel):
        super(RFB, self).__init__()
        self.relu = nn.ReLU(True)
        self.branch0 = nn.Sequential(
            nn.Conv2d(in_channel, out_channel, 1),
            nn.Conv2d(out_channel, out_channel, 3, padding=1, dilation=1),
        )
        self.branch1 = nn.Sequential(
            nn.Conv2d(in_channel, out_channel, 1),
            nn.Conv2d(out_channel,
                      out_channel,
                      kernel_size=(1, 3),
                      padding=(0, 1)),
            nn.Conv2d(out_channel,
                      out_channel,
                      kernel_size=(3, 1),
                      padding=(1, 0)),
            nn.Conv2d(out_channel, out_channel, 3, padding=1, dilation=1))
        self.branch2 = nn.Sequential(
            nn.Conv2d(in_channel, out_channel, 1),
def get_model(model_name, dataset_name, pretrained=False):
    """Retrieve an appropriate architecture."""
    if 'CIFAR' in dataset_name or 'MNIST' in dataset_name:
        if pretrained:
            raise ValueError(
                'Loading pretrained models is only supported for ImageNet.')
        in_channels = 1 if dataset_name == 'MNIST' else 3
        num_classes = 10 if dataset_name in ['CIFAR10', 'MNIST'] else 100
        if 'ResNet' in model_name:
            model = resnet_picker(model_name, dataset_name)
        elif 'efficientnet-b' in model_name.lower():
            from efficientnet_pytorch import EfficientNet
            model = EfficientNet.from_name(model_name.lower())
        elif model_name == 'ConvNet':
            model = convnet(width=32,
                            in_channels=in_channels,
                            num_classes=num_classes)
        elif model_name == 'ConvNet64':
            model = convnet(width=64,
                            in_channels=in_channels,
                            num_classes=num_classes)
        elif model_name == 'ConvNet128':
            model = convnet(width=64,
                            in_channels=in_channels,
                            num_classes=num_classes)
        elif model_name == 'ConvNetBN':
            model = ConvNetBN(width=64,
                              in_channels=in_channels,
                              num_classes=num_classes)
        elif model_name == 'Linear':
            model = linear_model(dataset_name, num_classes=num_classes)
        elif model_name == 'alexnet-mp':
            model = alexnet_metapoison(in_channels=in_channels,
                                       num_classes=num_classes,
                                       batchnorm=False)
        elif model_name == 'alexnet-mp-bn':
            model = alexnet_metapoison(in_channels=in_channels,
                                       num_classes=num_classes,
                                       batchnorm=True)
        elif 'VGG' in model_name:
            model = VGG(model_name)
        elif model_name == 'MobileNetV2':
            model = MobileNetV2(num_classes=num_classes,
                                train_dp=0,
                                test_dp=0,
                                droplayer=0,
                                bdp=0)
        else:
            raise ValueError(
                f'Architecture {model_name} not implemented for dataset {dataset_name}.'
            )

    elif 'ImageNet' in dataset_name:
        in_channels = 3
        num_classes = 1000
        if 'efficientnet-b' in model_name.lower():
            from efficientnet_pytorch import EfficientNet
            if pretrained:
                model = EfficientNet.from_pretrained(model_name.lower())
            else:
                model = EfficientNet.from_name(model_name.lower())
        elif model_name == 'Linear':
            model = linear_model(dataset_name, num_classes=num_classes)
        else:
            if 'densenet' in model_name.lower():
                extra_args = dict(
                    memory_efficient=False
                )  # memory_efficient->checkpointing -> incompatible with autograd.grad
            else:
                extra_args = dict()

            try:
                model = getattr(torchvision.models,
                                model_name.lower())(pretrained=pretrained,
                                                    **extra_args)
            except AttributeError:
                raise NotImplementedError(
                    f'ImageNet model {model_name} not found at torchvision.models.'
                )

    return model
def net(model, pretrained):
    return EfficientNet.from_pretrained(
        model) if pretrained else EfficientNet.from_name(model)
示例#8
0
def build_model(args, rot=False, dropout=None):

    json_options = json_file_to_pyobj(args.config)
    training_configurations = json_options.training

    modelName = training_configurations.model.lower() if not rot else 'rot' + training_configurations.model.lower()
    depth = int(training_configurations.depth)
    pretrained = True if training_configurations.pretrained == 'True' else False
    out_classes = training_configurations.out_classes
    print(out_classes)

    if modelName == 'wideresnet':
      from models.WideResNet import WideResNet
      if not pretrained:
        net = WideResNet(d=40, k=4, n_classes=out_classes, input_features=1, output_features=16, strides=[1, 1, 2, 2])
      else:
        net = WideResNet(d=40, k=4, n_classes=out_classes, input_features=3, output_features=16, strides=[1, 1, 2, 2])
      return net
    elif modelName == 'densenet':
        from models.DenseNet import DenseNet
        net = DenseNet(depth=121, growthRate=32, nClasses=out_classes)
        return net
    elif modelName == 'efficientnet':
        if depth in range(8):
            from efficientnet_pytorch import EfficientNet
            model = EfficientNet.from_pretrained('efficientnet-b{}'.format(depth))
            net = deepcopy(model)
            for param in net.parameters():
                param.requires_grad = True
            if not pretrained:
                net._conv_stem = nn.Conv2d(1, net._conv_stem.out_channels, kernel_size=3, stride=2, bias=False)
            net._fc = nn.Linear(model._fc.in_features, out_classes)
            if dropout is not None:
                net._dropout = torch.nn.Dropout(p=dropout)
            return net
        else:
            raise NotImplementedError('net not implemented')
    elif modelName == 'rotefficientnet':
        if depth in range(8):
            from efficientnet_pytorch.rot_model import RotEfficientNet
            model = RotEfficientNet.from_pretrained('efficientnet-b{}'.format(depth))
            net = deepcopy(model)
            for param in net.parameters():
                param.requires_grad = True
            net._fc = nn.Linear(model._fc.in_features, out_classes)
            return net
        else:
            raise NotImplementedError('net not implemented')
    elif modelName == 'genodinefficientnet':
        gen_odin_mode = training_configurations.gen_odin_mode
        if depth in range(8):
            from efficientnet_pytorch.gen_odin_model import GenOdinEfficientNet
            model = GenOdinEfficientNet.from_pretrained('efficientnet-b{}'.format(depth), mode=gen_odin_mode)
            from efficientnet_pytorch.gen_odin_model import CosineSimilarity
            model._fc_nominator = CosineSimilarity(feat_dim=1280, num_centers=out_classes)
            net = deepcopy(model)
            for param in net.parameters():
                param.requires_grad = True
            return net
        else:
            raise NotImplementedError('net not implemented')
    else:
        raise NotImplementedError('net not implemented')
def run(tb, vb, lr, epochs, writer):
    device = os.environ['main-device']
    logging.info('Training program start!')
    logging.info('Configuration:')
    logging.info('\n' + json.dumps(INFO, indent=2))

    # ------------------------------------
    # 1. Define dataloader
    train_loader, train4val_loader, val_loader, num_of_images, mapping = get_dataloaders(
        tb, vb)
    # train_loader, train4val_loader, val_loader, num_of_images = get_dataloaders(tb, vb)
    weights = (1 / num_of_images) / ((1 / num_of_images).sum().item())
    # weights = (1/num_of_images)/(1/num_of_images + 1/(num_of_images.sum().item()-num_of_images))
    weights = weights.to(device=device)

    # ------------------------------------
    # 2. Define model
    model = EfficientNet.from_pretrained(
        'efficientnet-b3', num_classes=INFO['dataset-info']['num-of-classes'])
    model = carrier(model)

    # ------------------------------------
    # 3. Define optimizer
    optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
    ignite_scheduler = LRScheduler(scheduler)

    # ------------------------------------
    # 4. Define metrics

    class CenterLoss(nn.Module):
        """Center loss.
    Reference:
    Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
    Args:
        num_classes (int): number of classes.
        feat_dim (int): feature dimension.
    """
        def __init__(self, num_classes=10, feat_dim=2):
            super(CenterLoss, self).__init__()
            self.num_classes = num_classes
            self.feat_dim = feat_dim
            self.centers = torch.randn(self.num_classes,
                                       self.feat_dim).to(device=device)

        def forward(self, x, labels):
            """
      Args:
        x: feature matrix with shape (batch_size, feat_dim).
        labels: ground truth labels with shape (batch_size).
      """
            batch_size = x.size(0)
            distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
                      torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
            distmat.addmm_(1, -2, x, self.centers.t())
            classes = torch.arange(self.num_classes).long()
            classes = classes.to(device=device)
            labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
            mask = labels.eq(classes.expand(batch_size, self.num_classes))
            dist = distmat * mask.float()
            loss = dist.clamp(min=1e-12, max=1e+12).sum() / batch_size
            return loss

    class MixLoss(nn.Module):
        def __init__(self):
            super(MixLoss, self).__init__()

        def forward(self, x, y):
            _lambda = 0.5
            center = CenterLoss(x.shape[0], x.shape[1])
            ce = nn.CrossEntropyLoss(weight=weights)
            center = center(x, y)
            ce = ce(x, y)
            # print('{:.4f} | {:.4f}'.format(center, ce))
            return _lambda * center + ce

    class EntropyPrediction(metric.Metric):
        def __init__(self, threshold=1.0):
            super(EntropyPrediction, self).__init__()
            self.threshold = threshold
            self.prediction = torch.tensor([], dtype=torch.int)
            self.y = torch.tensor([], dtype=torch.int)

        def reset(self):
            # self.threshold = 0.5
            self.prediction = torch.tensor([])
            self.y = torch.tensor([])
            super(EntropyPrediction, self).reset()

        def update(self, output):
            y_pred, y = output
            softmax = torch.exp(y_pred) / torch.exp(y_pred).sum(1)[:, None]
            entropy_base = math.log(y_pred.shape[1])
            entropy = (-softmax * torch.log(softmax)).sum(1) / entropy_base
            values, inds = softmax.max(1)
            prediction = torch.where(entropy < self.threshold, inds,
                                     torch.tensor([-1]).to(device=device))
            self.prediction = torch.cat(
                (self.prediction.type(torch.LongTensor).to(device=device),
                 torch.tensor([mapping[x.item()]
                               for x in prediction]).to(device=device)))
            self.y = torch.cat(
                (self.y.type(torch.LongTensor).to(device=device),
                 y.to(device=device)))
            # return self.prediction, self.y

        def compute(self):
            return self.prediction, self.y

    train_metrics = {
        'accuracy':
        Accuracy(),
        'loss':
        Loss(MixLoss()),
        'precision_recall':
        MetricsLambda(PrecisionRecallTable, Precision(), Recall(),
                      train_loader.dataset.classes),
        'cmatrix':
        MetricsLambda(CMatrixTable,
                      ConfusionMatrix(INFO['dataset-info']['num-of-classes']),
                      train_loader.dataset.classes)
    }

    val_metrics = {
        'accuracy':
        MetricsLambda(Labels2Acc, EntropyPrediction(1.0)),
        'precision_recall':
        MetricsLambda(Labels2PrecisionRecall, EntropyPrediction(1.0),
                      val_loader.dataset.classes),
        'cmatrix':
        MetricsLambda(Labels2CMatrix, EntropyPrediction(1.0),
                      val_loader.dataset.classes)
    }

    # ------------------------------------
    # 5. Create trainer
    trainer = create_supervised_trainer(model,
                                        optimizer,
                                        MixLoss(),
                                        device=device)

    # ------------------------------------
    # 6. Create evaluator
    train_evaluator = create_supervised_evaluator(model,
                                                  metrics=train_metrics,
                                                  device=device)
    val_evaluator = create_supervised_evaluator(model,
                                                metrics=val_metrics,
                                                device=device)

    desc = 'ITERATION - loss: {:.4f}'
    pbar = tqdm(initial=0,
                leave=False,
                total=len(train_loader),
                desc=desc.format(0))

    # ------------------------------------
    # 7. Create event hooks

    # Update process bar on each iteration completed.
    @trainer.on(Events.ITERATION_COMPLETED)
    def log_training_loss(engine):
        log_interval = 1
        iter = (engine.state.iteration - 1) % len(train_loader) + 1
        if iter % log_interval == 0:
            pbar.desc = desc.format(engine.state.output)
            pbar.update(log_interval)

    @trainer.on(Events.EPOCH_STARTED)
    def refresh_pbar(engine):
        torch.cuda.empty_cache()
        print('Finish epoch {}'.format(engine.state.epoch))
        pbar.refresh()
        pbar.n = pbar.last_print_n = 0

    # Compute metrics on train data on each epoch completed.
    # cpe = CustomPeriodicEvent(n_epochs=50)
    # cpe.attach(trainer)
    # @trainer.on(cpe.Events.EPOCHS_50_COMPLETED)
    def log_training_results(engine):
        print('Checking on training set.')
        train_evaluator.run(train4val_loader)
        metrics = train_evaluator.state.metrics
        avg_accuracy = metrics['accuracy']
        avg_loss = metrics['loss']
        precision_recall = metrics['precision_recall']
        cmatrix = metrics['cmatrix']
        prompt = """
      Training Results - Epoch: {}
      Avg accuracy: {:.4f}
      Avg loss: {:.4f}
      precision_recall: \n{}
      confusion matrix: \n{}
      """.format(engine.state.epoch, avg_accuracy, avg_loss,
                 precision_recall['pretty'], cmatrix['pretty'])
        tqdm.write(prompt)
        logging.info('\n' + prompt)
        writer.add_text(os.environ['run-id'], prompt, engine.state.epoch)
        writer.add_scalars('Aggregate/Acc', {'Train Acc': avg_accuracy},
                           engine.state.epoch)
        writer.add_scalars('Aggregate/Loss', {'Train Loss': avg_loss},
                           engine.state.epoch)

    # Compute metrics on val data on each epoch completed.
    # cpe = CustomPeriodicEvent(n_epochs=50)
    # cpe.attach(trainer)
    # @trainer.on(cpe.Events.EPOCHS_50_COMPLETED)
    def log_validation_results(engine):
        pbar.clear()
        print('* - * - * - * - * - * - * - * - * - * - * - * - *')
        print('Checking on validation set.')
        val_evaluator.run(val_loader)
        metrics = val_evaluator.state.metrics
        avg_accuracy = metrics['accuracy']
        precision_recall = metrics['precision_recall']
        cmatrix = metrics['cmatrix']
        prompt = """
      Validating Results - Epoch: {}
      Avg accuracy: {:.4f}
      precision_recall: \n{}
      confusion matrix: \n{}
      """.format(engine.state.epoch, avg_accuracy, precision_recall['pretty'],
                 cmatrix['pretty'])
        tqdm.write(prompt)
        logging.info('\n' + prompt)
        writer.add_text(os.environ['run-id'], prompt, engine.state.epoch)
        writer.add_scalars('Aggregate/Acc', {'Val Acc': avg_accuracy},
                           engine.state.epoch)
        writer.add_scalars(
            'Aggregate/Score', {
                'Val avg precision': precision_recall['data'][0, -1],
                'Val avg recall': precision_recall['data'][1, -1]
            }, engine.state.epoch)

    cpe = CustomPeriodicEvent(n_epochs=50)
    cpe.attach(trainer)
    # @trainer.on(cpe.Events.EPOCHS_50_COMPLETED)
    trainer.add_event_handler(cpe.Events.EPOCHS_50_COMPLETED,
                              log_training_results)
    trainer.add_event_handler(cpe.Events.EPOCHS_50_COMPLETED,
                              log_validation_results)
    trainer.add_event_handler(Events.STARTED, log_training_results)
    trainer.add_event_handler(Events.STARTED, log_validation_results)

    # Save model ever N epoch.
    save_model_handler = ModelCheckpoint(os.environ['savedir'],
                                         '',
                                         save_interval=10,
                                         n_saved=2)
    trainer.add_event_handler(Events.EPOCH_COMPLETED, save_model_handler,
                              {'model': model})

    # Update learning-rate due to scheduler.
    trainer.add_event_handler(Events.EPOCH_STARTED, ignite_scheduler)

    # ------------------------------------
    # Run
    trainer.run(train_loader, max_epochs=epochs)
    pbar.close()
示例#10
0
def get_effecientnet(train_labels, pretrained=True, model_path=None):
    model_ft = EfficientNet.from_pretrained('efficientnet-b5')
    num_ftrs = model_ft._fc.in_features
    model_ft._fc = nn.Linear(num_ftrs, train_labels.shape[1])
    return load_pretrained_model(model_ft,
                                 model_path) if model_path else model_ft
示例#11
0
def initialize_model(model_name,
                     num_classes,
                     feature_extract,
                     use_pretrained=True):
    # https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
    # Initialize these variables which will be set in this if statement. Each of these
    #   variables is model specific.
    model_ft = None
    input_size = 0

    def set_parameter_requires_grad(model, feature_extracting):
        if feature_extracting:
            for param in model.parameters():
                param.requires_grad = False

    if model_name == "resnet":
        """ Resnet18
        """
        model_ft = models.resnet34(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "alexnet":
        """ Alexnet
        """
        model_ft = models.alexnet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "vgg":
        """ VGG11_bn
        """
        model_ft = models.vgg16(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "squeezenet":
        """ Squeezenet
        """
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        model_ft.classifier[1] = nn.Conv2d(512,
                                           num_classes,
                                           kernel_size=(1, 1),
                                           stride=(1, 1))
        model_ft.num_classes = num_classes
        input_size = 224

    elif model_name == "densenet":
        """ Densenet
        """
        model_ft = models.densenet161(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)
        input_size = 224

    elif model_name == "inception":
        """ Inception v3
        Be careful, expects (299,299) sized images and has auxiliary output
        """
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        # Handle the auxilary net
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        # Handle the primary net
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 299

    elif model_name == 'effecientnet':
        # https://pypi.org/project/efficientnet-pytorch/
        # https://www.kaggle.com/ateplyuk/pytorch-efficientnet
        # https://www.kaggle.com/akasharidas/plant-pathology-2020-in-pytorch
        model_ft = EfficientNet.from_pretrained('efficientnet-b5')

        set_parameter_requires_grad(model_ft, feature_extract)

        num_ftrs = model_ft._fc.in_features
        model_ft._fc = nn.Linear(num_ftrs, num_classes)
        # model_ft._fc = nn.Sequential(nn.Linear(num_ftrs, 1000, bias=True),
        #                              nn.ReLU(),
        #                              nn.Dropout(p=0.5),
        #                              nn.Linear(1000, num_classes, bias=True))

        input_size = 224

    else:
        print("Invalid model name, exiting...")
        # exit()

    return model_ft, input_size
示例#12
0
 def __init__(self, backbone='efficientnet-b0', num_classes=6):
     super(EnetV1, self).__init__()
     self.enet = EfficientNet.from_pretrained(backbone)
     self.myfc = nn.Linear(self.enet._fc.in_features, num_classes)
     self.enet._fc = nn.Identity()
示例#13
0
 def __init__(self, backbone='efficientnet-b0', num_classes=6):
     super(EnetV2, self).__init__()
     self.enet = EfficientNet.from_pretrained(backbone)
     self.head = nn.Sequential(
         AdaptiveConcatPool2d(), nn.Flatten(),
         nn.Linear(2 * self.enet._fc.in_features, num_classes))
 def __init__(self):
     super(EfficientNetModel, self).__init__()
     self.Effinet = EfficientNet.from_pretrained('efficientnet-b7')
     self.classifier = nn.Linear(1000, 26)
     
     nn.init.xavier_normal_(self.classifier.weight)
示例#15
0
def get_model(model_name, from_old_model, device, model_path, output_channel):

    if model_name == "efficientnet":
        if from_old_model:
            net = EfficientNet.from_name('efficientnet-b4')
            #net._fc.out_features = output_channel
            net._fc = nn.Linear(net._fc.in_features, output_channel)
            net.load_state_dict(torch.load(model_path))
            net = net.to(device)
        else:
            net = EfficientNet.from_pretrained('efficientnet-b4')
            # net._fc.out_features = output_channel
            net._fc = nn.Linear(net._fc.in_features, output_channel)
            net = net.to(device)

    elif model_name == "resnet50":
        if from_old_model:
            net = models.resnet50(pretrained=False)
            #net.fc.out_features = output_channel
            net.fc = nn.Linear(net.fc.in_features, output_channel)
            net.load_state_dict(torch.load(model_path))
            net = net.to(device)
        else:
            net = models.resnet50(pretrained=True)
            # net.fc.out_features = output_channel
            net.fc = nn.Linear(net.fc.in_features, output_channel)
            net = net.to(device)

    elif model_name == "resnext50_32x4d":

        class CustomResNext(nn.Module):
            def __init__(self, model_name='resnext50_32x4d', pretrained=False):
                super().__init__()
                self.model = timm.create_model(model_name,
                                               pretrained=pretrained)
                n_features = self.model.fc.in_features
                self.model.fc = nn.Linear(n_features, output_channel)

            def forward(self, x):
                x = self.model(x)
                return x

        if from_old_model:
            net = CustomResNext()
            net.load_state_dict(torch.load(model_path))
            net = net.to(device)
        else:
            net = CustomResNext()
            net = net.to(device)

    elif model_name == "custom":

        class CustomResNext(nn.Module):
            def __init__(self):
                super().__init__()
                self.res_model = timm.create_model('resnext50_32x4d',
                                                   pretrained=False)
                self.eff_model = EfficientNet.from_pretrained(
                    'efficientnet-b4')
                self.classifier = nn.Sequential(
                    nn.Linear(1000, 1000),
                    nn.Dropout(0.6),
                    nn.Linear(1000, output_channel),
                )

            def forward(self, x):
                x = self.res_model(x) + self.eff_model(x)
                x = self.classifier(x)

                return x

        if from_old_model:
            net = CustomResNext()
            net.load_state_dict(torch.load(model_path))
            net = net.to(device)
        else:
            net = CustomResNext()
            net = net.to(device)
    return net
示例#16
0
def main():
    train_ds = DRDataset(
        images_folder="train/images_preprocessed_1000/",
        path_to_csv="train/trainLabels.csv",
        transform=config.val_transforms,
    )
    val_ds = DRDataset(
        images_folder="train/images_preprocessed_1000/",
        path_to_csv="train/valLabels.csv",
        transform=config.val_transforms,
    )
    test_ds = DRDataset(
        images_folder="test/images_preprocessed_1000",
        path_to_csv="train/trainLabels.csv",
        transform=config.val_transforms,
        train=False,
    )
    test_loader = DataLoader(test_ds,
                             batch_size=config.BATCH_SIZE,
                             num_workers=6,
                             shuffle=False)
    train_loader = DataLoader(
        train_ds,
        batch_size=config.BATCH_SIZE,
        num_workers=config.NUM_WORKERS,
        pin_memory=config.PIN_MEMORY,
        shuffle=False,
    )
    val_loader = DataLoader(
        val_ds,
        batch_size=config.BATCH_SIZE,
        num_workers=2,
        pin_memory=config.PIN_MEMORY,
        shuffle=False,
    )
    loss_fn = nn.MSELoss()

    model = EfficientNet.from_pretrained("efficientnet-b3")
    model._fc = nn.Linear(1536, 1)
    model = model.to(config.DEVICE)
    optimizer = optim.Adam(model.parameters(),
                           lr=config.LEARNING_RATE,
                           weight_decay=config.WEIGHT_DECAY)
    scaler = torch.cuda.amp.GradScaler()

    if config.LOAD_MODEL and config.CHECKPOINT_FILE in os.listdir():
        load_checkpoint(torch.load(config.CHECKPOINT_FILE), model, optimizer,
                        config.LEARNING_RATE)

    # Run after training is done and you've achieved good result
    # on validation set, then run train_blend.py file to use information
    # about both eyes concatenated
    get_csv_for_blend(val_loader, model, "../train/val_blend.csv")
    get_csv_for_blend(train_loader, model, "../train/train_blend.csv")
    get_csv_for_blend(test_loader, model, "../train/test_blend.csv")
    make_prediction(model, test_loader, "submission_.csv")
    import sys
    sys.exit()
    #make_prediction(model, test_loader)

    for epoch in range(config.NUM_EPOCHS):
        train_one_epoch(train_loader, model, optimizer, loss_fn, scaler,
                        config.DEVICE)

        # get on validation
        preds, labels = check_accuracy(val_loader, model, config.DEVICE)
        print(
            f"QuadraticWeightedKappa (Validation): {cohen_kappa_score(labels, preds, weights='quadratic')}"
        )

        # get on train
        #preds, labels = check_accuracy(train_loader, model, config.DEVICE)
        #print(f"QuadraticWeightedKappa (Training): {cohen_kappa_score(labels, preds, weights='quadratic')}")

        if config.SAVE_MODEL:
            checkpoint = {
                "state_dict": model.state_dict(),
                "optimizer": optimizer.state_dict(),
            }
            save_checkpoint(checkpoint, filename=f"b3_{epoch}.pth.tar")
示例#17
0
    def __init__(self, num_classes, model_name):
        super(EfficientNet, self).__init__()

        self.model = efn.from_pretrained(model_name, num_classes=num_classes)
示例#18
0
a = "./proxyless_nas/config/net0.config"
print(a[-11:-7])
import os
model_path = './save/student_model'
model_name = 'S:b:{}_{}'.format(0, 1)
save_folder = os.path.join(model_path, model_name)

print(str(save_folder))
save_file = os.path.join(save_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=1))
print(str(save_file))
import torch
from efficientnet_pytorch import EfficientNet
from proxyless_nas.jj import get_proxyless_model
model_s = get_proxyless_model(net_config_path="./config/net0.config")
model_t = EfficientNet.from_pretrained(
    'efficientnet-b0',
    weights_path='./pretrain_efficientNet/pretrain_efficientNet.pth')
data = torch.randn(2, 3, 224, 224)
model_s.eval()
model_t.eval()
feat_s, ls = model_t(data, is_feat=True)
feat_t, l = model_t(data, is_feat=True)
print(l.shape)
print(ls.shape)

if os.path.exists('./save/log/'):
    pass
else:
    os.mkdir('./save/log/')
示例#19
0
model.load_state_dict(checkpoint['state_dict'])

# In[114]:

### VGGNet
import torchvision.models as models
model = models.vgg16(pretrained=True)
model = model.cuda()
modelname = 'vgg16'

# In[139]:

### efficientNet
from efficientnet_pytorch import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b0', num_classes=2)
model = model.cuda()
modelname = 'efficientNet-b0'

# model = EfficientNet.from_name('efficientnet-b1').cuda()
# modelname = 'efficientNet_random'

# In[ ]:

# In[ ]:

# train
bs = batchsize
votenum = 10
import warnings
warnings.filterwarnings('ignore')
示例#20
0
                           'w')  # open a text file in write mode
    elapsedTimeFile.write(
        'EfficientNet-b0\n{:.0f}dk {:.0f}s\nEn yüksek val doğruluğu: {:4f}'.
        format(time_elapsed // 60, time_elapsed % 60,
               best_acc))  # elapsed time is written on a text file
    elapsedTimeFile.close()  # close the text file

    # load best model weights
    model.load_state_dict(best_model_wts)
    torch.save(best_model_wts,
               "./saved_models_efficientNet/" + best_model + ".pth")
    return model


# Transfer learning
model_conv = EfficientNet.from_pretrained('efficientnet-b0')
model_conv.set_swish(memory_efficient=False)
for param in model_conv.parameters():
    param.requires_grad = False

# Parameters of newly constructed modules have requires_grad=True by default
num_ftrs = model_conv._fc.in_features
model_conv._fc = nn.Linear(num_ftrs, len(class_names))

model_conv = model_conv.to(device)

pytorch_total_params = sum(p.numel() for p in model_conv.parameters())
print(pytorch_total_params)

#print(model_conv)
criterion = nn.CrossEntropyLoss()
 def __init__(self):
     super().__init__()
     self.net = EfficientNet.from_pretrained('efficientnet-b0')
     in_features = self.net._fc.in_features
     self.last_linear = nn.Linear(in_features, 4)
示例#22
0
 def __init__(self, in_channels):
     super(EfficientNet_MultiLabel, self).__init__()
     self.network = EfficientNet.from_pretrained('efficientnet-b0',
                                                 in_channels=in_channels)
     self.output_layer = nn.Linear(1000, 26)
示例#23
0
def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    # create model
    if 'efficientnet' in args.arch:  # NEW
        if args.pretrained:
            model = EfficientNet.from_pretrained(args.arch)
            print("=> using pre-trained model '{}'".format(args.arch))
        else:
            print("=> creating model '{}'".format(args.arch))
            model = EfficientNet.from_name(args.arch)

    else:
        if args.pretrained:
            print("=> using pre-trained model '{}'".format(args.arch))
            model = models.__dict__[args.arch](pretrained=True)
        else:
            print("=> creating model '{}'".format(args.arch))
            model = models.__dict__[args.arch]()

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if args.gpu is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpu)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    if 'efficientnet' in args.arch:
        image_size = EfficientNet.get_image_size(args.arch)
        val_transforms = transforms.Compose([
            transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),
            transforms.CenterCrop(image_size),
            transforms.ToTensor(),
            normalize,
        ])
        print('Using image size', image_size)
    else:
        val_transforms = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])
        print('Using image size', 224)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir, val_transforms),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if args.evaluate:
        res = validate(val_loader, model, criterion, args)
        with open('res.txt', 'w') as f:
            print(res, file=f)
        return

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                }, is_best, args.output_dir)
示例#24
0
    def __init__(self, opt):
        super(PCB_Effi, self).__init__()
        self.class_num = opt.nclasses
        self.part = opt.nparts
        self.single_cls = opt.single_cls
        self.model = EfficientNet.from_pretrained('efficientnet-b0')
        self.avgpool = nn.AdaptiveAvgPool2d((self.part, 1))
        self.dropout = nn.Dropout(p=0.5)

        self.feature_dim = self.model._fc.in_features

        self.bottleneck = nn.BatchNorm1d(self.feature_dim)
        self.bottleneck.bias.requires_grad_(False)
        self.bottleneck.apply(weights_init_kaiming)

        if self.single_cls:
            name = 'classifier'
            setattr(
                self, name,
                ClassBlock(self.part * self.feature_dim,
                           self.class_num,
                           droprate=0.5,
                           relu=False,
                           bnorm=True,
                           num_bottleneck=256))
        else:
            for i in range(self.part):
                name = 'classifierA' + str(i)
                setattr(
                    self, name,
                    ClassBlock(self.feature_dim,
                               self.class_num,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=128))

            for i in range(self.part - 1):
                name = 'classifierB' + str(i)
                setattr(
                    self, name,
                    ClassBlock(2 * 1280,
                               self.class_num,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.part - 1):
                name = 'classifierB' + str(i + self.part - 1)
                setattr(
                    self, name,
                    ClassBlock(2 * 1280,
                               self.class_num,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.part - 2):

                name = 'classifierC' + str(i)
                setattr(
                    self, name,
                    ClassBlock(3 * 1280,
                               self.class_num,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.part - 2):
                name = 'classifierC' + str(i + self.part - 2)
                setattr(
                    self, name,
                    ClassBlock(3 * 1280,
                               self.class_num,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.part - 3):
                name = 'classifierD' + str(i)
                setattr(
                    self, name,
                    ClassBlock(4 * 1280,
                               self.class_num,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))
    def __init__(self, save_model_path, param_file, model_to_use='my'):
        if model_to_use == 'my':
            with open(param_file) as json_params:
                params = json.load(json_params)
            if 'input_size' not in params:
                params['input_size'] = 'default'
            if params['input_size'] == 'default':
                im_size = (64, 64)
                config_path = 'configs.json'
            elif params['input_size'] == 'bigimg':
                im_size = (128, 128)
                config_path = 'configs_big_img.json'
            elif params['input_size'] == 'biggerimg':
                im_size = (192, 168)
                config_path = 'configs_bigger_img.json'
            with open(config_path) as config_params:
                configs = json.load(config_params)
            removed_conns = defaultdict(list)
            removed_conns['shortcut'] = defaultdict(list)
            if_replace_by_zeroes = False  # True
            if False:
                print('Some connections were disabled')
                # removed_conns[5] = [(5, 23), (5, 25), (5, 58)]
                # removed_conns[8] = [(137, 143)]
                # removed_conns[9] = [(142, 188), (216, 188)]
                removed_conns[10] = [(188, 104),
                                     # (86, 104)
                                     ]
                # removed_conns['label'] = [(481, 3),(481, 7)]
                # removed_conns[9] = [#(216, 181)
                #                     (142, 188),
                #                     (216, 188),
                #                     (187, 86),
                #                     (224, 86)
                #                     ]
                # removed_conns[10] = [(181, 279)]
                # removed_conns[11] = [(28, 421)] + [(331, 392)]
                # removed_conns[12] = [(406, 204)]
                # removed_conns['label'] = [  # (356, 9),
                #     # (204, 9),
                #     (126, 9),
                #     (187, 9),
                #     (123, 9),
                #     # (134, 9),
                #     #  (400, 9),
                #     #  (383, 9)
                # ]
                removed_conns['shortcut'][8] = [(86, 86)]
                # removed_conns['shortcut'][10] = [#(193, 125)
                #                                   (118, 125)
                #                                  ]

            if False:
                save_model_path = save_model_path[:save_model_path.find('.pkl')] + '_avgadditives' + '.pkl'
                try:
                    trained_model = load_trained_model(param_file, save_model_path, if_additives_user=True,
                                                       if_store_avg_activations_for_disabling=True,
                                                       conns_to_remove_dict=removed_conns,
                                                       replace_with_avgs_last_layer_mode='restore',
                                                       if_replace_by_zeroes=if_replace_by_zeroes)
                except:
                    print('assume problem where cifar networks ignored the enable_bias parameter')
                    BasicBlockAvgAdditivesUser.id = 0 #need to reset
                    trained_model = load_trained_model(param_file, save_model_path, if_additives_user=True,
                                                       if_store_avg_activations_for_disabling=True,
                                                       conns_to_remove_dict=removed_conns,
                                                       replace_with_avgs_last_layer_mode='restore',
                                                       if_actively_disable_bias=True,
                                                       if_replace_by_zeroes=if_replace_by_zeroes)
            else:
                try:
                    trained_model = load_trained_model(param_file, save_model_path)
                except:
                    print('assume problem where cifar networks ignored the enable_bias parameter')
                    trained_model = load_trained_model(param_file, save_model_path, if_actively_disable_bias=True)

            model = trained_model
            self.params = params
            self.configs = configs
            use_my_model = True
        else:
            im_size = (224, 224)
            if model_to_use == 'resnet18':
                trained_model = torchvision.models.__dict__['resnet18'](pretrained=True).to(device)
                # trained_model = torchvision.models.__dict__['resnet34'](pretrained=True).to(device)
                # trained_model = models.__dict__['vgg19_bn'](pretrained=True).to(device)
            if model_to_use == 'mobilenet':
                trained_model = torchvision.models.__dict__['mobilenet_v2'](pretrained=True).to(device)
            if model_to_use == 'efficientnet':
                trained_model = EfficientNet.from_pretrained('efficientnet-b3').to(device)
            model = trained_model
            self.params = None
            self.configs = None
            use_my_model = False

        self.model = model
        self.use_my_model = use_my_model
        self.size_0 = im_size[0]
        self.size_1 = im_size[1]

        if self.use_my_model:
            for m in self.model:
                model[m].zero_grad()
                model[m].eval()

            self.feature_extractor = self.model['rep']
        else:
            self.model.eval()
            self.model.zero_grad()
            self.feature_extractor = self.model
        for param in self.feature_extractor.parameters():
            param.requires_grad_(False)
from efficientnet_pytorch import EfficientNet
import torch
from torch import nn
from torch import optim
from torch.utils.data import TensorDataset, DataLoader, Dataset
import tokenizers
from torchvision import transforms
from PIL import Image
from tqdm import tqdm
import numpy as np
import pandas as pd

model = EfficientNet.from_pretrained('efficientnet-b7', advprop=False)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cpu = torch.device('cpu')
print(f"使用デバイス: {device}")
model.to(device)


class ThumbnailDataset(Dataset):
    def __init__(self, df):
        self.df = df
        self.tfms = transforms.Compose([
            transforms.Resize((90, 120)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ])

    def __getitem__(self, index):
        data = {}
        row = self.df.iloc[index]
示例#27
0
 def __init__(self, b=0):
     super().__init__()
     model_str = f'efficientnet-b{b}'
     self.efficient_net = EffNet.from_pretrained(model_str, num_classes=1)
     self.sig = nn.Sigmoid()
示例#28
0
 def __init__(self, input_shape, num_classes=23, pretrained=True, b=0):
     super().__init__()
     self.input_shape = input_shape
     model_str = f'efficientnet-b{b}'
     self.efficient_net = EffNet.from_pretrained(model_str, num_classes=num_classes) \
             if pretrained else EffNet.from_name(model_str, num_classes=num_classes)
示例#29
0
    def __init__(self, opt):
        super(PCB_Effi, self).__init__()
        self.opt = opt
        self.model = EfficientNet.from_pretrained('efficientnet-b0')
        self.avgpool = nn.AdaptiveAvgPool2d((self.opt.nparts, 1))
        self.dropout = nn.Dropout(p=0.5)

        self.feature_dim = self.model._fc.in_features

        if self.opt.single_cls:
            name = 'classifier'
            setattr(
                self, name,
                ClassBlock(self.opt.nparts * self.feature_dim,
                           self.opt.nclasses,
                           droprate=0.5,
                           relu=False,
                           bnorm=True,
                           num_bottleneck=256))
        else:
            for i in range(self.opt.nparts):
                name = 'classifierA' + str(i)
                setattr(
                    self, name,
                    ClassBlock(self.feature_dim,
                               self.opt.nclasses,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.opt.nparts - 1):
                name = 'classifierB' + str(i)
                setattr(
                    self, name,
                    ClassBlock(2 * 1280,
                               self.opt.nclasses,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.opt.nparts - 1):
                name = 'classifierB' + str(i + self.opt.nparts - 1)
                setattr(
                    self, name,
                    ClassBlock(2 * 1280,
                               self.opt.nclasses,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.opt.nparts - 2):
                name = 'classifierC' + str(i)
                setattr(
                    self, name,
                    ClassBlock(3 * 1280,
                               self.opt.nclasses,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.opt.nparts - 2):
                name = 'classifierC' + str(i + self.opt.nparts - 2)
                setattr(
                    self, name,
                    ClassBlock(3 * 1280,
                               self.opt.nclasses,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.opt.nparts - 3):
                name = 'classifierD' + str(i)
                setattr(
                    self, name,
                    ClassBlock(4 * 1280,
                               self.opt.nclasses,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))
示例#30
0
def BuildFeatureExtractorCnn2D(model_name, pretrained_dataset='imagenet'):
    if 'efficientnet' not in model_name:
        return pretrainedmodels.__dict__[model_name](
            num_classes=1000, pretrained=pretrained_dataset), 'features'
    else:
        return EfficientNet.from_pretrained(model_name), 'extract_features'