Пример #1
0
 def from_name(name: str, in_channels: int, num_classes: int,
               saving_path: str) -> EfficientNet:
     return EfficientNetWrapper(EfficientNet.from_name(
         name, in_channels=in_channels, num_classes=num_classes),
                                saving_path=saving_path)
Пример #2
0
                    default=0.1,
                    help='set decay rate for learning rate')
parser.add_argument('--smooth', action='store_true', help='use smoothout')
args = parser.parse_args()

model_file = args.network + "_cifar10.ckpt"
fig_file = args.network + "_cifar10.png"
network = NET.index(args.network)
if network <= 1 or args.use32:
    transform = transforms.ToTensor()
elif network <= 7:
    transform = transforms.Compose(
        [transforms.Resize((224, 224)),
         transforms.ToTensor()])
elif network <= 11:
    size = EfficientNet.get_image_size('efficientnet-b{}'.format(network - 8))
    transform = transforms.Compose(
        [transforms.Resize((size, size)),
         transforms.ToTensor()])
else:
    sys.exit(1)

# prepare data
train_Data = dsets.CIFAR10(root='../data_cifar10',
                           train=True,
                           transform=transform,
                           download=False)

test_data = dsets.CIFAR10(root='../data_cifar10',
                          train=False,
                          transform=transform,
Пример #3
0
def EfficientNetB7():
    return EfficientNet.from_pretrained('efficientnet-b7', num_classes=data.c)
Пример #4
0
# train
pth_map = {
    'efficientnet-b0': 'efficientnet-b0-355c32eb.pth',
    'efficientnet-b1': 'efficientnet-b1-f1951068.pth',
    'efficientnet-b2': 'efficientnet-b2-8bb594d6.pth',
    'efficientnet-b3': 'efficientnet-b3-5fb5a3c3.pth',
    'efficientnet-b4': 'efficientnet-b4-6ed6700e.pth',
    'efficientnet-b5': 'efficientnet-b5-b6417697.pth',
    'efficientnet-b6': 'efficientnet-b6-c76e70fd.pth',
    'efficientnet-b7': 'efficientnet-b7-dcc49843.pth',
}
# 自动下载到本地预训练
# model = EfficientNet.from_pretrained('efficientnet-b0')
# 离线加载预训练,需要事先下载好
model_ft = EfficientNet.from_name(net_name)
net_weight = 'eff_weights/' + pth_map[net_name]
state_dict = torch.load(net_weight)
model_ft.load_state_dict(state_dict)

# 修改全连接层
num_ftrs = model_ft._fc.in_features
model_ft._fc = nn.Linear(num_ftrs, class_num)

criterion = nn.CrossEntropyLoss()
if use_gpu:
    model_ft = model_ft.cuda()
    criterion = criterion.cuda()

optimizer = optim.SGD((model_ft.parameters()),
                      lr=lr,
Пример #5
0
        train_dataloader = DataLoader(dataset,
                                      batch_size=cfg.batch_size,
                                      shuffle=False,
                                      num_workers=8,
                                      drop_last=True,
                                      sampler=AgrinetDatasetSampler(dataset))
        #train_dataloader = DataLoader(dataset, batch_size=cfg.batch_size, shuffle=True, num_workers=8, drop_last=True)

        val_dataloader = DataLoader(dataset_val,
                                    batch_size=cfg.batch_size,
                                    shuffle=False,
                                    num_workers=1)

        ## -- LOAD MODEL -- ##

        backbone = EfficientNet.from_pretrained('efficientnet-b7',
                                                num_classes=cfg.num_class)
        model = AudioClassifier(backbone, cfg.resample_freq, window_size,
                                hop_size, mel_bins, fmin, fmax,
                                cfg.num_class).to(cfg.device)

        optimizer = torch.optim.Adam(
            model.parameters(), lr=1e-3, amsgrad=False
        )  # torch.optim.SGD(model.parameters(), lr=1e-3, momentum=5e-4, nesterov=True)#
        reducer = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                             mode='min',
                                                             factor=0.5,
                                                             patience=7,
                                                             verbose=True,
                                                             min_lr=1e-6)
        best_score_bce = np.inf
        best_score_ce = np.inf
Пример #6
0
 def __init__(self):
     super().__init__()
     self.net = EfficientNet.from_pretrained('efficientnet-b7')
     n_features = self.net._fc.in_features
     self.net._fc = nn.Linear(in_features=n_features, out_features=1, bias=True)
Пример #7
0
 def __init__(self, net_version, num_classes):
     super(Net, self).__init__()
     self.backbone = EfficientNet.from_pretrained('efficientnet-' +
                                                  net_version)
     self.backbone._fc = nn.Sequential(nn.Linear(1280, num_classes), )
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import time
import copy
import random
import torchvision
from sklearn.model_selection import train_test_split
from torch.utils.data import Subset
from torchvision import transforms, datasets

from efficientnet_pytorch import EfficientNet

model_name = 'efficientnet-b7'  # b5

image_size = EfficientNet.get_image_size(model_name)
print(image_size)
model = EfficientNet.from_pretrained(model_name, num_classes=2)

## 데이타 로드!!
batch_size = 128
random_seed = 555
random.seed(random_seed)
torch.manual_seed(random_seed)

## make dataset
data_path = 'shape/'  # class 별 폴더로 나누어진걸 확 가져와서 라벨도 달아준다
president_dataset = datasets.ImageFolder(
    data_path,
    transforms.Compose([
        transforms.Resize((224, 224)),
Пример #9
0
 def __init__(self, name: str):
     super().__init__()
     self.model = EfficientNet.from_pretrained(name)
     self.classifier = nn.Linear(1280, 4)
Пример #10
0
def initialize_model(model_name, feature_extract=False, use_pretrained=False):

    '''
    params:
    -------
    model_name: name of the model to train
    num_classes: number of classes
    feature_extract: default = False; start with a pretrained model and only
                    update the final layer weights from which we derive predictions
    use_pretrained: default = False; update all of the model’s
                    parameters for our new task (retrain)
    '''

    num_classes = len(config.CLASS_NAMES)

    # all input size of 224
    if model_name == "resnet18":
        model_ft = models.resnet18(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

    elif model_name == "resnet34":
        model_ft = models.resnet34(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

    elif model_name == "resnet152":
        model_ft = models.resnet152(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.fc.in_features
        model_ft.fc = nn.Linear(num_ftrs, num_classes)

    elif model_name == "alexnet":
        model_ft = models.alexnet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)

    elif model_name == "vgg16":
        model_ft = models.vgg16_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)

    elif model_name == "vgg19":
        model_ft = models.vgg19_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)

    elif model_name == "squeezenet":
        model_ft = models.squeezenet1_1(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        model_ft.classifier[1] = nn.Conv2d(
            512, num_classes, kernel_size=(7, 7), stride=(2, 2)
        )
        # model_ft.num_classes = num_classes

    elif model_name == "densenet169":
        model_ft = models.densenet169(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)

    elif model_name == "densenet201":
        model_ft = models.densenet201(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft, feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs, num_classes)

    elif model_name == "efficient":
        model_ft = EfficientNet.from_name("efficientnet-b0")
    else:
        print("Invalid model name, exiting...")
        exit()

    return model_ft
Пример #11
0
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print('running in device: ', end='')
    print(device)
    if not os.path.exists(params.model_dir):
        os.mkdir(params.model_dir)
    f_id = None
    if args.record == 'true':
        loss_filename = params.model_dir + 'training_loss.txt'
        f_id = open(loss_filename, 'w')

    # model = models.resnet50(pretrained=True)  # type:nn.Module
    # num_ftrs = model.fc.in_features
    # model.fc = nn.Linear(num_ftrs, params.num_classes)
    # model = nets.resnext101_elastic(num_classes=params.num_classes)  # type:nn.Module
    model = EfficientNet.from_pretrained('efficientnet-b5', num_classes=params.num_classes,
                                         elastic=True if args.elastic == 'true' else False,
                                         cbam=True if args.cbam == 'true' else False)
    print('model type: {}'.format(type(model)))
    if args.hard_sample_mining == 'true':
        print('use hard sample mining strategy')
        criterion = nn.CrossEntropyLoss(reduction='none')
    else:
        criterion = nn.CrossEntropyLoss()
    if args.label_smooth == 'true':
        print('use label smooth method')
        criterion = label_smooth.LabelSmoothSoftmaxCE()
    # optimizer = torch.optim.SGD(model.parameters(), params.lr if args.model_path is None else 0.001,
    #                             momentum=params.momentum, weight_decay=params.weight_decay)
    optimizer = torch.optim.Adam(model.parameters(), 1e-3, betas=(0.9, 0.999), eps=1e-9)
    scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.7, patience=3, verbose=True)
Пример #12
0
 def __init__(self):
     super(EfficientNetB4, self).__init__()
     self.efficientnet = EfficientNet.from_pretrained('efficientnet-b4')
Пример #13
0
def get_net():
    net = EfficientNet.from_pretrained('efficientnet-b5', num_classes=4)
    return net
Пример #14
0
    if model_num == 152:
        model = torch.load('models/resnet152.model').to(device)
    elif model_num == 101:
        model = models.resnet101().to(device)
        model.fc = nn.Linear(2048, 42).to(device)
    else:
        model = models.resnet152().to(device)
        model.fc = nn.Linear(2048, 42).to(device)

elif model_type == 'd':
    model = models.densenet169().to(device)
    model.classifier = nn.Linear(1664, 42).to(device)

elif model_type == 'e':
    if model_num == 1:
        model = EfficientNet.from_pretrained('efficientnet-b1',
                                             num_classes=42).to(device)
    elif model_num == 3:
        model = EfficientNet.from_pretrained('efficientnet-b3',
                                             num_classes=42).to(device)
    elif model_num == 4:
        model = EfficientNet.from_pretrained('efficientnet-b4',
                                             num_classes=42).to(device)
    elif model_num == 5:
        model = EfficientNet.from_pretrained('efficientnet-b5',
                                             num_classes=42).to(device)
    elif model_num == 7:
        model = EfficientNet.from_pretrained('efficientnet-b7',
                                             num_classes=42).to(device)

elif model_type == 'v':
    model = models.vgg16_bn().to(device)
Пример #15
0
    def trial_process(self,
                    trial,
                    optimizer,
                    learning_rate,
                    horizontal_flip,
                    horizontal_shift_ratio,
                    vertical_shift_ratio,
                    random_erasing):
        self.best_pred=0.0
        self.start_run(trial)

        # mlflowにtrialごとの情報をロギング
        self.log_trial(trial)

        self.model = EfficientNet.from_pretrained(self.args.backbone)

        # Unfreeze model weights
        for param in self.model.parameters():
            param.requires_grad = True

        num_ftrs = self.model._fc.in_features
        self.model._fc = nn.Linear(num_ftrs, self.args.nclass)

        if self.args.smry_viz:
            from torchinfo import summary
            from torchviz import make_dot
            dummy_image=torch.zeros((2, 3, 32,32))
            dummy_output=self.model(dummy_image)
            make_dot(dummy_output,params=dict(self.model.named_parameters())).render("torchviz", format="png")
            summary(self.model, (1,3, 32,32))
            import sys;sys.exit()

        if self.args.cuda:
            self.model = self.model.to('cuda')

        if optimizer=='SGD':
            self.optimizer = optim.SGD(self.model.parameters(), lr=learning_rate)
        elif optimizer=='Adam':
            self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)


        pipeline = [
            T.ToTensor(),
            T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]

        if strtobool(horizontal_flip) == 1:
            pipeline.append(T.RandomHorizontalFlip(p=0.5))

        pipeline.append(T.RandomAffine(0,translate=(horizontal_shift_ratio,vertical_shift_ratio)))

        if strtobool(random_erasing) == 1:
            pipeline.append(T.RandomErasing())

        transform = T.Compose(pipeline)
                
        train_set = torchvision.datasets.CIFAR10(root='./data', train=True,
                                                download=True, transform=transform)
        self.train_loader = torch.utils.data.DataLoader(train_set, batch_size=self.args.batch_size,
                                                shuffle=True, num_workers=self.args.workers)

        val_set = torchvision.datasets.CIFAR10(root='./data', train=False,
                                                download=True, transform=transform)
        self.val_loader = torch.utils.data.DataLoader(val_set, batch_size=self.args.batch_size,
                                                shuffle=False, num_workers=self.args.workers)

        self.criterion = nn.CrossEntropyLoss()

        for epoch in range(self.args.start_epoch, self.args.epochs):
            self.training(epoch)
            if not self.args.no_val and epoch % self.args.eval_interval == (self.args.eval_interval - 1):
                best_score=self.validating(epoch)

        self.end_run()

        # scoring by best
        return 1.0 - best_score
Пример #16
0
def main(config):

    # For Reproducibility #
    random.seed(config.seed)
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(config.seed)

    # Weights and Plots Path #
    paths = [config.weights_path, config.plots_path]
    for path in paths:
        make_dirs(path)

    # Prepare Data Loader #
    if config.dataset == 'cifar':
        train_loader, val_loader, test_loader = cifar_loader(
            config.num_classes, config.batch_size)
        input_size = 32

    # Prepare Networks #
    if config.model == 'vit':
        model = VisionTransformer(in_channels=config.in_channels,
                                  embed_dim=config.embed_dim,
                                  patch_size=config.patch_size,
                                  num_layers=config.num_layers,
                                  num_heads=config.num_heads,
                                  mlp_dim=config.mlp_dim,
                                  dropout=config.drop_out,
                                  input_size=input_size,
                                  num_classes=config.num_classes).to(device)

    elif config.model == 'efficient':
        model = EfficientNet.from_name(
            'efficientnet-b0', num_classes=config.num_classes).to(device)

    elif config.model == 'resnet':
        model = resnet34(pretrained=False).to(device)
        model.fc = nn.Linear(config.mlp_dim, config.num_classes).to(device)

    else:
        raise NotImplementedError

    # Weight Initialization #
    if not config.model == 'efficient':
        if config.init == 'normal':
            model.apply(init_weights_normal)
        elif config.init == 'xavier':
            model.apply(init_weights_xavier)
        elif config.init == 'he':
            model.apply(init_weights_kaiming)
        else:
            raise NotImplementedError

    # Train #
    if config.phase == 'train':

        # Loss Function #
        criterion = nn.CrossEntropyLoss()

        # Optimizers #
        if config.num_classes == 10:
            optimizer = torch.optim.Adam(model.parameters(),
                                         lr=config.lr,
                                         betas=(0.5, 0.999))
            optimizer_scheduler = get_lr_scheduler(config.lr_scheduler,
                                                   optimizer)
        elif config.num_classes == 100:
            optimizer = torch.optim.SGD(model.parameters(),
                                        lr=config.lr,
                                        momentum=0.9,
                                        weight_decay=5e-4)
            optimizer_scheduler = get_lr_scheduler('step', optimizer)

        # Constants #
        best_top1_acc = 0

        # Lists #
        train_losses, val_losses = list(), list()
        train_top1_accs, train_top5_accs = list(), list()
        val_top1_accs, val_top5_accs = list(), list()

        # Train and Validation #
        print("Training {} has started.".format(model.__class__.__name__))
        for epoch in range(config.num_epochs):

            # Train #
            train_loss, train_top1_acc, train_top5_acc = train(
                train_loader, model, optimizer, criterion, epoch, config)

            # Validation #
            val_loss, val_top1_acc, val_top5_acc = validate(
                val_loader, model, criterion, epoch, config)

            # Add items to Lists #
            train_losses.append(train_loss)
            val_losses.append(val_loss)

            train_top1_accs.append(train_top1_acc)
            train_top5_accs.append(train_top5_acc)

            val_top1_accs.append(val_top1_acc)
            val_top5_accs.append(val_top5_acc)

            # If Best Top 1 Accuracy #
            if val_top1_acc > best_top1_acc:
                best_top1_acc = max(val_top1_acc, best_top1_acc)

                # Save Models #
                print("The best model is saved!")
                torch.save(
                    model.state_dict(),
                    os.path.join(
                        config.weights_path,
                        'BEST_{}_{}_{}.pkl'.format(model.__class__.__name__,
                                                   str(config.dataset).upper(),
                                                   config.num_classes)))

            print("Best Top 1 Accuracy {:.2f}%\n".format(best_top1_acc))

            # Optimizer Scheduler #
            optimizer_scheduler.step()

        # Plot Losses and Accuracies #
        losses = (train_losses, val_losses)
        accs = (train_top1_accs, train_top5_accs, val_top1_accs, val_top5_accs)
        plot_metrics(losses, accs, config.plots_path, model, config.dataset,
                     config.num_classes)

        print("Training {} using {} {} finished.".format(
            model.__class__.__name__,
            str(config.dataset).upper(), config.num_classes))

    # Test #
    elif config.phase == 'test':

        test(test_loader, model, config)

    else:
        raise NotImplementedError
def main():
    # if GPU is availale, use GPU
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print("Use " + str(device))

    # create dataset
    file_list = None
    for path, dirs, files in os.walk(test_path, topdown=False):
        file_list = list(files)

    # preprocessing steps
    transform = transforms.Compose([
        transforms.Resize((512, 512)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225]),
    ])
    test_dataset = Leaf_test_Dataset(file_list, test_path, transform)
    test_loader = DataLoader(dataset=test_dataset, batch_size=batchSize)

    print("Start testing:")

    # net model
    eff_models = []
    for model_path in eff_model_paths:
        eff_net = EfficientNet.from_name('efficientnet-b4')
        eff_net._fc = nn.Linear(eff_net._fc.in_features, 5)
        eff_net.load_state_dict(torch.load(model_path))
        eff_net = eff_net.to(device)
        eff_net.eval()
        eff_models.append(eff_net)

    preds = []
    result = None

    with torch.no_grad():
        batch_num = len(test_loader)
        for index, image in enumerate(test_loader):
            image = image.to(device)

            eff_result = []
            for eff_net in eff_models:
                output = eff_net(image)
                output = output.to('cpu')
                pred = output.argmax(dim=1, keepdim=True).flatten()
                eff_result.append(pred)

            if len(preds) == 0:
                preds = np.dstack(eff_result)[0]
            else:
                preds = np.vstack([preds, np.dstack(eff_result)[0]])

        # start train combine model
        df = pd.read_csv(pred_train_csv)

        # 移除全错选项
        # get the pred acc for this line
        def get_acc(pred_csv, index):
            label = pred_csv.loc[index, 'label']
            acc = 0
            if pred_csv.loc[index, 'pred_0'] == label:
                acc += 0.2
            if pred_csv.loc[index, 'pred_1'] == label:
                acc += 0.2
            if pred_csv.loc[index, 'pred_2'] == label:
                acc += 0.2
            if pred_csv.loc[index, 'pred_3'] == label:
                acc += 0.2
            if pred_csv.loc[index, 'pred_4'] == label:
                acc += 0.2
            return round(acc, 1)

        delete_index = []
        for index in range(len(df)):
            acc = get_acc(df, index)
            # remove noise data
            if acc <= 0:
                delete_index.append(index)

        df = df.drop(delete_index)
        df = df.reset_index(drop=True)

        X = np.array(df[["pred_0", "pred_1", "pred_2", "pred_3", "pred_4"]])
        y = np.array(df[["label"]]).flatten()
        from sklearn.neural_network import MLPClassifier
        # Neural Network
        nn = MLPClassifier(max_iter=2000)
        nn.fit(X, y)
        result = nn.predict(preds)

    pred_result = pd.concat([
        pd.DataFrame(file_list, columns=['image_id']),
        pd.DataFrame(result, columns=['label'])
    ],
                            axis=1)
    pred_result.to_csv(output_path + "submission.csv", index=False, sep=',')

    print("Done.")
from __future__ import absolute_import, division, print_function, unicode_literals

import json
from PIL import Image
import torch
from torchvision import transforms

from efficientnet_pytorch import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b0')

image_path = 'examples/simple/img.jpg'
labels_map = 'examples/simple/labels_map.txt'

# Preprocess image
tfms = transforms.Compose([transforms.Resize(224), transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),])
img = tfms(Image.open(image_path)).unsqueeze(0)
print(img.shape) # torch.Size([1, 3, 224, 224])

# Load ImageNet class names
labels_map = json.load(open(labels_map))
labels_map = [labels_map[str(i)] for i in range(1000)]

# Classify
model.eval()
with torch.no_grad():
    outputs = model(img)

# Print predictions
print('-----')
for idx in torch.topk(outputs, k=5).indices.squeeze(0).tolist():
Пример #19
0
    def __init__(self, config):
        super(FrameWork, self).__init__()
        self.inputs = None
        self.labels = None
        self.config = config
        if torch.cuda.is_available() and self.config['System']['GPU'] and int(
                self.config['System']['GPU_ID']) > -1:
            self.device = torch.device('cuda:{}'.format(
                self.config['System']['GPU_ID']))
        else:
            self.device = torch.device('cpu')
        self.image_channel = self.config['Model']['ImageChannel']
        self.resize = self.config['Model']['RESIZE']
        self.cnn_type = self.config['Train']['CNN']['NAME']
        self.paramters = []
        if self.cnn_type == CNN.MobileNetV2.value:
            from torchvision.models import mobilenet_v2
            self.cnn = mobilenet_v2().features
            self.cnn[0][0] = torch.nn.Conv2d(int(self.image_channel),
                                             32, (3, 3),
                                             stride=(2, 2),
                                             padding=(1, 1),
                                             bias=False)
            self.cnn.to(device=self.device)
            self.paramters.append({'params': self.cnn.parameters()})
            self.out_size = 1280
        elif self.cnn_type == CNN.EfficientNetb0.value:
            from efficientnet_pytorch import EfficientNet
            self.cnn = EfficientNet.from_name('efficientnet-b0')
            self.cnn._conv_stem = torch.nn.Conv2d(int(self.image_channel),
                                                  32,
                                                  kernel_size=3,
                                                  stride=2,
                                                  bias=False)
            self.cnn.to(device=self.device)
            self.paramters.append({'params': self.cnn.parameters()})
            self.out_size = 1280
        else:
            raise CnnNotFoundError("CNN Name not found!")

        rnn = self.config['Train']['RNN']['NAME']
        self.hidden_num = int(self.config['Train']['LSTM']['HIDDEN_NUM'])
        dropout = int(self.config['Train']['LSTM']['DROPOUT'])
        if rnn == RNN.LSTM.value:
            self.lstm = nn.LSTM(input_size=self.out_size,
                                hidden_size=self.hidden_num,
                                num_layers=2,
                                bidirectional=True,
                                dropout=dropout)
            self.lstm.to(device=self.device)
            self.paramters.append({'params': self.lstm.parameters()})
        else:
            raise RnnNotFoundError("RNN Name not found!")

        self.charset = self.config['Model']['CharSet']
        self.charset = json.loads(self.charset)
        self.charset_len = len(self.charset)

        self.fc = nn.Linear(in_features=self.hidden_num * 2,
                            out_features=self.charset_len)
        self.fc.to(device=self.device)
        self.paramters.append({'params': self.fc.parameters()})

        self.ctc_loss = nn.CTCLoss(blank=0, reduction='mean')
        self.ctc_loss.to(device=self.device)
        self.paramters.append({'params': self.ctc_loss.parameters()})

        optimizer = self.config['Train']['OPTIMIZER']
        self.lr = self.config['Train']['LR']
        if optimizer == OPTIMIZER.Momentum.value:
            self.optimizer = optim.SGD(self.paramters,
                                       lr=self.lr,
                                       momentum=0.9)
        elif optimizer == OPTIMIZER.Adma.value:
            self.optimizer = optim.Adam(self.paramters,
                                        lr=self.lr,
                                        betas=(0.9, 0.99))
        else:
            raise OptimizerNotFoundError("Optimizer Name not found!")

        self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer,
                                                          gamma=0.98)
Пример #20
0
def EfficientNetB4(num_classes=8):
    model = efficientnet.from_name('efficientnet-b4')
    model._fc = nn.Linear(model._fc.in_features, num_classes)
    return model
Пример #21
0
    def __init__(self, cnn_encoder):
        super(EncoderCNNefficientnet, self).__init__()
        self.cnn = EfficientNet.from_pretrained(cnn_encoder)

        for param in self.cnn.parameters():
            param.requires_grad = True
Пример #22
0
    def __init__(self, opt):
        super(PCB_Effi, self).__init__()
        self.opt = opt
        self.model = EfficientNet.from_pretrained('efficientnet-b0')
        self.avgpool = nn.AdaptiveAvgPool2d((self.opt.nparts, 1))
        self.dropout = nn.Dropout(p=0.5)

        self.feature_dim = self.model._fc.in_features

        if self.opt.single_cls:
            name = 'classifier'
            setattr(
                self, name,
                ClassBlock(self.opt.nparts * self.feature_dim,
                           self.opt.nclasses,
                           droprate=0.5,
                           relu=False,
                           bnorm=True,
                           num_bottleneck=256))
        else:
            for i in range(self.opt.nparts):
                name = 'classifierA' + str(i)
                setattr(
                    self, name,
                    ClassBlock(self.feature_dim,
                               self.opt.nclasses,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.opt.nparts - 1):
                name = 'classifierB' + str(i)
                setattr(
                    self, name,
                    ClassBlock(2 * 1280,
                               self.opt.nclasses,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.opt.nparts - 1):
                name = 'classifierB' + str(i + self.opt.nparts - 1)
                setattr(
                    self, name,
                    ClassBlock(2 * 1280,
                               self.opt.nclasses,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.opt.nparts - 2):
                name = 'classifierC' + str(i)
                setattr(
                    self, name,
                    ClassBlock(3 * 1280,
                               self.opt.nclasses,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.opt.nparts - 2):
                name = 'classifierC' + str(i + self.opt.nparts - 2)
                setattr(
                    self, name,
                    ClassBlock(3 * 1280,
                               self.opt.nclasses,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))

            for i in range(self.opt.nparts - 3):
                name = 'classifierD' + str(i)
                setattr(
                    self, name,
                    ClassBlock(4 * 1280,
                               self.opt.nclasses,
                               droprate=0.5,
                               relu=False,
                               bnorm=True,
                               num_bottleneck=256))
Пример #23
0
    parser.add_argument('--device', type=int, default=0)
    parser.add_argument('--seed', type=int, default=42)
    args = parser.parse_args()




    torch.manual_seed(args.seed)
    device = args.device


    use_ensemble_model_session = [{'session':'team_27/airush1/372', 'checkpoint':'1'} #'efficientnet-b4'
                                  , {'session':'team_27/airush1/354', 'checkpoint':'9'} #se_resnext50_32x4d
                                  ,{'session':'team_27/airush1/377', 'checkpoint':'14'} ]#'nasnetamobile'

    modelA = EfficientNet.from_name('efficientnet-b4', override_params={'num_classes': args.output_size})
    modelB = make_model('se_resnext50_32x4d', num_classes=args.output_size, pretrained=False, pool=nn.AdaptiveAvgPool2d(1))
    modelC = make_model('nasnetamobile', num_classes=args.output_size, pretrained=False, pool=nn.AdaptiveAvgPool2d(1))
    # DONOTCHANGE: They are reserved for nsml
    bind_model(modelA)
    re_train_info = use_ensemble_model_session[0]#'efficientnet-b4'
    nsml.load(checkpoint=re_train_info['checkpoint'], session=re_train_info['session']) 
    bind_model(modelB)
    re_train_info = use_ensemble_model_session[1]#'se_resnext50_32x4d'
    nsml.load(checkpoint=re_train_info['checkpoint'], session=re_train_info['session']) 
    bind_model(modelC)
    re_train_info = use_ensemble_model_session[2]#'nasnetamobile'
    nsml.load(checkpoint=re_train_info['checkpoint'], session=re_train_info['session']) 

    model = MyEnsembleTTA(modelA,modelB,modelC)
    model = fuse_bn_recursively(model)
Пример #24
0
def train():
    data_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.CenterCrop((200, 200)),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.3),
        transforms.RandomRotation(30),
        transforms.RandomAffine(degrees=0, translate=(0.2, 0.2)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
        #transforms.Lambda(lambda img: img * 2.0 - 1.0)
    ])
    train_set = IMAGE_Dataset(Path(DATASET_ROOT_train), data_transform)
    data_loader = DataLoader(dataset=train_set,
                             batch_size=16,
                             shuffle=True,
                             num_workers=1)
    model = EfficientNet.from_pretrained('efficientnet-b7')
    model = model.cuda(CUDA_DEVICES)
    model.train()

    best_model_params = copy.deepcopy(model.state_dict())
    best_acc = 0.0
    num_epochs = 200
    criterion = nn.CrossEntropyLoss()

    stepsize = 20
    base_lr = 0.001
    max_lr = 0.01
    base_mm = 0.8
    max_mm = 0.99

    for epoch in range(num_epochs):
        #newlr = get_triangular_lr(epoch,stepsize,base_lr,max_lr)
        #mm=get_dynamic_momentum(epoch,stepsize,base_mm,max_mm)
        optimizer = torch.optim.SGD(params=model.parameters(),
                                    lr=0.001,
                                    momentum=0.9)
        print(f'Epoch: {epoch + 1}/{num_epochs}')
        print('-' * len(f'Epoch: {epoch + 1}/{num_epochs}'))

        training_loss = 0.0
        training_corrects = 0

        for i, (inputs, labels) in enumerate(data_loader):
            inputs = Variable(inputs.cuda(CUDA_DEVICES))
            labels = Variable(labels.cuda(CUDA_DEVICES))

            optimizer.zero_grad()

            outputs = model(inputs)

            _, preds = torch.max(outputs.data, 1)
            loss = criterion(outputs, labels)

            loss.backward()
            optimizer.step()

            training_loss += loss.item() * inputs.size(0)
            #	print(training_loss)
            #revise loss.data[0]-->loss.item()
            training_corrects += torch.sum(preds == labels.data)
            #print(f'training_corrects: {training_corrects}')

        training_loss = training_loss / len(train_set)
        training_acc = training_corrects.double() / len(train_set)
        print(
            f'Training loss: {training_loss:.4f}\taccuracy: {training_acc:.4f}\n'
        )

        test_acc = test(model)

        if test_acc > best_acc:
            best_acc = test_acc
            best_model_params = copy.deepcopy(model.state_dict())

    model.load_state_dict(best_model_params)
    torch.save(model, f'model-{best_acc:.02f}-best_train_acc.pth')
    def __init__(self,
                 model_name='efficientnet-b3',
                 n_channels=3,
                 n_classes=None,
                 pretrained=False):
        super(ENet, self).__init__()
        model_encoder = {
            # 'efficientnet-b0': {
            #     'model': EfficientNet.from_name('efficientnet-b0'),
            #     'pretrained_model': EfficientNet.from_pretrained('efficientnet-b0', advprop=True),
            #     'out_channels': 1280
            # },
            # 'efficientnet-b1': {
            #     'model': EfficientNet.from_name('efficientnet-b1'),
            #     'pretrained_model': EfficientNet.from_pretrained('efficientnet-b1', advprop=True),
            #     'out_channels': 1280
            # },
            # 'efficientnet-b2': {
            #     'model': EfficientNet.from_name('efficientnet-b2'),
            #     'pretrained_model': EfficientNet.from_pretrained('efficientnet-b2', advprop=True),
            #     'out_channels': 1408
            # },
            'efficientnet-b3': {
                'model':
                EfficientNet.from_name('efficientnet-b3'),
                'pretrained_model':
                EfficientNet.from_pretrained('efficientnet-b3', advprop=True),
                'out_channels':
                1536
            },
            # 'efficientnet-b4': {
            #     'model': EfficientNet.from_name('efficientnet-b4'),
            #     'pretrained_model': EfficientNet.from_pretrained('efficientnet-b4', advprop=True),
            #     'out_channels': 1792
            # },
            # 'efficientnet-b5': {
            #     'model': EfficientNet.from_name('efficientnet-b5'),
            #     'pretrained_model': EfficientNet.from_pretrained('efficientnet-b5', advprop=True),
            #     'out_channels': 2048
            # },
            # 'efficientnet-b6': {
            #     'model': EfficientNet.from_name('efficientnet-b6'),
            #     'pretrained_model': EfficientNet.from_pretrained('efficientnet-b6', advprop=True),
            #     'out_channels': 2304
            # },
            # 'efficientnet-b7': {
            #     'model': EfficientNet.from_name('efficientnet-b7'),
            #     'pretrained_model': EfficientNet.from_pretrained('efficientnet-b7', advprop=True),
            #     'out_channels': 2560
            # },
        }

        if pretrained:
            self.model = model_encoder[model_name]['pretrained_model']
        else:
            self.model = model_encoder[model_name]['model']
        self.out_channels = model_encoder[model_name]['out_channels']

        self.model._conv_stem.in_channels = n_channels

        conv_stem = self.model._conv_stem
        if n_channels <= 3:
            self.model._conv_stem.weight.data[:, :
                                              n_channels, :, :] = conv_stem.weight.data[:, :
                                                                                        n_channels, :, :]
        else:
            self.model._conv_stem.weight.data[:, :
                                              3, :, :] = conv_stem.weight.data
            self.model._conv_stem.weight.data[:, 3:
                                              n_channels, :, :] = conv_stem.weight.data[:, :int(
                                                  n_channels - 3), :, :]

        self.model._avg_pooling = GeM()
        self.model._fc = nn.Sequential(
            nn.BatchNorm1d(self.out_channels,
                           eps=1e-05,
                           momentum=0.1,
                           affine=True,
                           track_running_stats=True))
        self.model._fc.out_channels = self.out_channels

        self._fc_gr = nn.Sequential(
            nn.BatchNorm1d(self.out_channels,
                           eps=1e-05,
                           momentum=0.1,
                           affine=True,
                           track_running_stats=True),
            nn.Linear(in_features=self.out_channels,
                      out_features=n_classes[0],
                      bias=True))
        self._fc_vo = nn.Sequential(
            nn.BatchNorm1d(self.out_channels,
                           eps=1e-05,
                           momentum=0.1,
                           affine=True,
                           track_running_stats=True),
            nn.Linear(in_features=self.out_channels,
                      out_features=n_classes[1],
                      bias=True))
        self._fc_co = nn.Sequential(
            nn.BatchNorm1d(self.out_channels,
                           eps=1e-05,
                           momentum=0.1,
                           affine=True,
                           track_running_stats=True),
            nn.Linear(in_features=self.out_channels,
                      out_features=n_classes[2],
                      bias=True))
Пример #26
0
def predict_class_probabilities(options):
    """Predicts class probabilities and optionally evaluates accuracy, precision, 
    recall and f1 score if labels are provided

    Args:
        options: parsed arguments
    """
    # Initialize model
    num_classes = 196
    num_attentions = 64

    if options.feature_net_name == 'resnet152cbam':
        feature_net = resnet152_cbam(pretrained=True)
    elif options.feature_net_name == 'efficientnetb3':
        feature_net = EfficientNet.from_pretrained('efficientnet-b3')
    elif options.feature_net_name == 'inception':
        feature_net = inception_v3(pretrained=True)
    else:
        raise RuntimeError('Invalid model name')

    net = WSDAN(num_classes=num_classes, M=num_attentions, net=feature_net)

    # Load ckpt and get state_dict
    checkpoint = torch.load(options.ckpt_path)
    state_dict = checkpoint['state_dict']
    # Load weights
    net.load_state_dict(state_dict)
    logging.info('Network loaded from {}'.format(options.ckpt_path))
    # load feature center
    feature_center = checkpoint['feature_center'].to(torch.device(device))
    logging.info('feature_center loaded from {}'.format(options.ckpt_path))

    # Use cuda
    cudnn.benchmark = True
    net.to(torch.device(device))
    net = nn.DataParallel(net)

    # Load dataset
    dataset, data_loader, image_list = prepare_dataloader(options)
    # Default Parameters
    theta_c = 0.5
    crop_size = image_size  # size of cropped images for 'See Better'
    # metrics initialization
    batches = 0
    epoch_loss = 0
    epoch_acc = np.array([0, 0, 0], dtype='float')  # top - 1, 3, 5
    loss = nn.CrossEntropyLoss()

    start_time = time.time()
    net.eval()
    y_pred_average = np.zeros((len(dataset), 196))

    with torch.no_grad():
        for i, sample in enumerate(tqdm(data_loader)):
            if options.do_eval:
                X, y = sample
                y = y.to(torch.device(device))
            else:
                X = sample
            X = X.to(torch.device(device))

            # Raw Image
            y_pred_raw, feature_matrix, attention_map = net(X)
            # Object Localization and Refinement
            crop_mask = F.upsample_bilinear(
                attention_map, size=(X.size(2), X.size(3))) > theta_c
            crop_images = []
            for batch_index in range(crop_mask.size(0)):
                nonzero_indices = torch.nonzero(crop_mask[batch_index, 0, ...])
                height_min = nonzero_indices[:, 0].min()
                height_max = nonzero_indices[:, 0].max()
                width_min = nonzero_indices[:, 1].min()
                width_max = nonzero_indices[:, 1].max()
                crop_images.append(
                    F.upsample_bilinear(X[batch_index:batch_index + 1, :,
                                          height_min:height_max,
                                          width_min:width_max],
                                        size=crop_size))
            crop_images = torch.cat(crop_images, dim=0)
            y_pred_crop, _, _ = net(crop_images)

            y_pred = (y_pred_raw + y_pred_crop) / 2
            y_pred_average[i * options.batch_size:(i + 1) *
                           options.batch_size] = y_pred.cpu().numpy()
            batches += 1
            if options.do_eval:
                # loss
                batch_loss = loss(y_pred, y)
                epoch_loss += batch_loss.item()

                # metrics: top-1, top-3, top-5 error
                epoch_acc += accuracy(y_pred, y, topk=(1, 3, 5))

    end_time = time.time()
    if options.do_eval:
        epoch_loss /= batches
        epoch_acc /= batches
        logging.info(
            'Valid: Loss %.5f,  Accuracy: Top-1 %.4f, Top-3 %.4f, Top-5 %.4f, Time %3.2f'
            % (epoch_loss, epoch_acc[0], epoch_acc[1], epoch_acc[2],
               end_time - start_time))
        ground_truth = [sample[1] for sample in dataset.samples]
        precision, recall, f1, _ = precision_recall_fscore_support(
            ground_truth, np.argmax(y_pred_average, axis=1), average='macro')
        logging.info(
            f'Precision: {precision}, Recall: {recall}, Macro F1: {f1}')
        y_pred_average = softmax(y_pred_average, axis=1)
        save_predictions(image_list,
                         y_pred_average,
                         options,
                         ground_truth=ground_truth)
    else:
        save_predictions(image_list, y_pred_average, options)
Пример #27
0
def get_net():
    net = EfficientNet.from_pretrained('efficientnet-b5')
    net._fc = nn.Linear(in_features=2048, out_features=4, bias=True)
    return net
training_generator = data.DataLoader(training_set, **params)

# validation data generator
validation_set = Dataset(os.path.join(BASE_VAL_PATH,
                                      'regular-fundus-validation',
                                      'regular-fundus-validation.csv'),
                         BASE_VAL_PATH,
                         transform=transform_train)

validation_generator = data.DataLoader(validation_set, **params)

use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)

model = EfficientNet.from_pretrained('efficientnet-b0', num_classes=5)

model.to(device)

print(summary(model, input_size=(3, 512, 512)))

PATH_SAVE = '../Weights/'
if (not os.path.exists(PATH_SAVE)):
    os.mkdir(PATH_SAVE)

criterion = nn.CrossEntropyLoss()
lr_decay = 0.99
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# Eye to create an 5x5 tensor
eye = torch.eye(5).to(device)
Пример #29
0
valset = JoinDataset_val()
val_loader = DataLoader(dataset=valset, batch_size=64, shuffle=True)

dataloders = {'train': train_loader, 'val': val_loader}

# Trained model의 Weights를 사용하기 위하여 경로 지정
weight_path = 'advprop_efficientnet-b0.pth'
trained_weights_path = os.path.join(
    '../input/efficientnetpytorch/EfficientNet-PyTorch/efficientnet_weights',
    weight_path)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.benchmark = True

# model 생성
model = EfficientNet.from_name('efficientnet-b0')
model.load_state_dict(
    torch.load(trained_weights_path, map_location=torch.device(device)))
model._fc = nn.Linear(model._fc.in_features, out_features=1)

# # Layer를 지정하여 해당 위치까지 Weights를 고정시킴 (Transfer Learning 시 사용)
# for i,param in enumerate(model.parameters()):
#     param.requires_grad = False
#     print(i)
#     if i == 50:
#         break

# # FC Layer에 추가적으로 Dense를 더함 (Transfer Learning 시 사용)
# fc = nn.Sequential(
#         nn.Linear(1280, 512),
#         nn.ReLU(),
Пример #30
0
def main():
    model_choice = input()
    num_bands = int(input())

    if model_choice == 'Resnet-18':
        model = models.resnet18(pretrained=True)
        num_feat = model.fc.in_features
        model.fc = nn.Linear(num_feat, 4)

        model.conv1 = nn.Sequential(
            nn.ConvTranspose2d(num_bands, 32, 2, 1, 2, bias=False),
            nn.Conv2d(32, 3, 1, bias=False), nn.ReLU(inplace=True),
            nn.Conv2d(3, 64, 7, 2, 3, bias=False))

        model.to(device)

    elif model_choice == 'Resnet-50':
        model = models.resnet50(pretrained=True)
        num_feat = model.fc.in_features
        model.fc = nn.Linear(num_feat, 4)

        model.conv1 = nn.Sequential(
            nn.ConvTranspose2d(num_bands, 32, 2, 1, 2, bias=False),
            nn.Conv2d(32, 3, 1, bias=False), nn.ReLU(inplace=True),
            nn.Conv2d(3, 64, 7, 2, 3, bias=False))
        model.to(device)

    elif model_choice == 'EffcientNet-B4':
        model = EfficientNet.from_pretrained('efficientnet-b4', num_classes=4)
        model = model.to(device)

    base_opt = torch.optim.Adam(model.parameters(), 1.)  ## base optimizer

    ## Set up the CLR
    step_size = 2
    max_lr = 1e-4
    base_lr = 1e-5
    cyclic_lr = triangular_lr(step_size, base_lr, max_lr)
    scheduler = LambdaLR(base_opt, cyclic_lr)

    ## Setup the SWA
    opt = torchcontrib.optim.SWA(base_opt)

    ## Training Loop
    n_epochs = 40
    train_loader, valid_loader = load_data(weighted=False)
    valid_acc_max = -np.Inf
    train_losses, valid_losses = [], []
    val_acc = []
    idx = 0
    betas = [0.99, 0.99999]

    for epoch in range(1, n_epochs + 1):

        train_loss = 0.0  ## running losses
        valid_loss = 0.0

        if epoch > 25 and idx == 0:
            idx += 1
            valid_acc_max = -np.Inf
            correct, total = acc_by_class(valid_loader, model)

        beta = betas[idx]
        effective_num = 1.0 - np.power(beta, cls_num_list)
        per_cls_weights = (1.0 - beta) / np.array(effective_num)
        per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(
            cls_num_list)
        per_cls_weights = torch.FloatTensor(per_cls_weights).to(device)

        criterion = LDAMLoss(cls_num_list=cls_num_list,
                             weight=per_cls_weights)  ## Loss

        model.train()  ## training mode
        for data, labels in train_loader:

            data, labels = data.to(device), labels.to(device)
            batch_size = data.size(0)

            opt.zero_grad()  ## clear the gradient
            output = model(data)  ## get the output
            loss = criterion(output, labels)  ## get the output
            loss.backward()

            torch.nn.utils.clip_grad_norm_(model.parameters(),
                                           1)  ## Gradinet clipping
            opt.step()
            train_loss += loss.item() * batch_size

        model.eval()
        for data, labels in valid_loader:

            data, labels = data.to(device), labels.to(device)
            opt.zero_grad()
            output = model(data)
            loss = criterion(output, labels)

            valid_loss += loss.item() * batch_size

    train_loss /= len(train_loader.sampler)
    valid_loss /= len(valid_loader.sampler)

    scheduler.step()
    valid_acc = accuracy(valid_loader, model)

    valid_losses.append(valid_loss)
    train_losses.append(train_loss)

    val_acc.append(valid_acc)

    print(
        'Epoch : {} \tTraining Loss : {} \tValidation Loss :{} \tValidation Acc : {}'
        .format(epoch, train_loss, valid_loss, valid_acc))
    print('-' * 100)
    if valid_acc >= valid_acc_max:
        print('Validation Acc. increased ({:.6f} --> {:.6f})'.format(
            valid_acc_max, valid_acc))
        valid_acc_max = valid_acc