Esempio n. 1
0
def main_predict(cfg, model_name, verbose=False):
    device = get_device()

    try:
        if model_name == "resnet":
            model_path = cfg["models"][model_name]
            print(model_path)
            model = Resnet(cfg=cfg)
            model.load_state_dict(
                torch.load(model_path, map_location=torch.device('cpu')))
            model.to(device)
            print(f'Loaded model from file: {model_path}')

        elif model_name == "efficientnet":
            model_path = cfg["models"][model_name]
            model = EfficientNet(cfg=cfg)
            model.load_state_dict(
                torch.load(model_path, map_location=torch.device('cpu')))
            model.to(device)
            print(f'Loaded model from file: {model_path}')

        else:
            resnet = Resnet(cfg=cfg)
            resnet.load_state_dict(
                torch.load(cfg["models"]["resnet"], map_location=device))
            efficientnet = EfficientNet(cfg=cfg)
            efficientnet.load_state_dict(
                torch.load(cfg["models"]["efficientnet"],
                           map_location=torch.device('cpu')))
            model = Ensemble(resnet=resnet, efficientnet=efficientnet)
    except:
        print("Wrong model name")
        return

    model.eval()
    df_test = pd.read_csv(f'{cfg["DATASET_DIR"]}/sample_submission.csv')

    test_dataset = DatasetTransformer_prediction(cfg=cfg, df=df_test)
    test_dataset_loader = torch.utils.data.DataLoader(
        dataset=test_dataset, batch_size=cfg["BATCH_SIZE"], shuffle=True)

    if verbose and model_name != "ensemble":
        summary(
            model,
            (3, cfg["IMAGE_SIZE"][model_name], cfg["IMAGE_SIZE"][model_name]))

    predictions = get_prediction(model, test_dataset_loader, device,
                                 model_name)

    df_test[cfg["TARGET_COLS"]] = predictions
    df_test[[cfg["IMAGE_COL"]] + cfg["TARGET_COLS"]].to_csv('submission.csv',
                                                            index=False)

    if verbose:
        print(df_test.head(20))
Esempio n. 2
0
def get_model(model_name, num_classes):
    model = None
    if 'resnet' in model_name:
        model = Resnet(model_name, num_classes)
    elif 'efficientnet' in model_name:
        model = EfficientNet(model_name, num_classes)
    else:
        raise ValueError(f'Undefined model name: {model_name}')

    return model
Esempio n. 3
0
    def __init__(self, compound_coef):
        super(EfficientNetFeatures, self).__init__()

        model = EfficientNet.from_pretrained(f'efficientnet-b{compound_coef}',
                                             False)
        del model._conv_head
        del model._bn1
        del model._avg_pooling
        del model._dropout
        del model._fc
        self.model = model
Esempio n. 4
0
class EfficientNetMnistTrainer(object):
    def __init__(self, input_shape, output_dim):
        self.model = EfficientNet(input_shape, output_dim)
        optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.1)
        self.model.compile(optimizer=optimizer,
                           loss='categorical_crossentropy',
                           metrics=['acc'])

    def train(
        self,
        x_train,
        t_train,
        x_val,
        t_val,
        epochs,
        batch_size,
    ):
        self.history = self.model.fit(x_train,
                                      t_train,
                                      batch_size=batch_size,
                                      epochs=30,
                                      validation_data=(x_val, t_val))
Esempio n. 5
0
def Efficientnet(model_name, num_classes, test = False):
    '''
    model_name :'efficientnet-b0', 'efficientnet-b1-7'
    '''
    model = EfficientNet.from_name(model_name)
    if not test:
        if LOCAL_PRETRAINED[model_name] == None:
            state_dict = load_state_dict_from_url(model_urls[model_name], progress=True)
        else:
            state_dict = torch.load(LOCAL_PRETRAINED[model_name])
        model.load_state_dict(state_dict)
    fc_features = model._fc.in_features
    model._fc = nn.Linear(fc_features, num_classes)
    return model
Esempio n. 6
0
def model_from_dataset(dataset, **kwargs):
    if dataset == 'adult':
        return FullyConnected(**kwargs)
    elif dataset == 'credit':
        return FullyConnected(**kwargs)
    elif dataset == 'compass':
        return FullyConnected(**kwargs)
    elif dataset == 'multi_mnist' or dataset == 'multi_fashion_mnist' or dataset == 'multi_fashion':
        return MultiLeNet(**kwargs)
    elif dataset == 'celeba':
        if 'efficientnet' in kwargs['model_name']:
            return EfficientNet.from_pretrained(**kwargs)
        elif kwargs['model_name'] == 'resnet18':
            return ResNet.from_name(**kwargs)
    else:
        raise ValueError("Unknown model name {}".format(dataset))
Esempio n. 7
0
 def __init__(self,
              input_shape,
              encode_dim,
              output_dim,
              model='efficient_net',
              loss='emd'):
     self.model = None
     if model == 'efficient_net':
         self.model = EfficientNet(input_shape, encode_dim, output_dim)
     elif model == 'wide_res_net':
         self.model = WideResNet(input_shape, output_dim)
     else:
         raise Exception('no match model name')
     optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.1)
     loss_func = None
     if loss == 'emd':
         loss_func = EMD
     elif loss == 'categorical_crossentropy':
         loss_func = 'categorical_crossentropy'
     else:
         raise Exception('no match loss function')
     self.model.compile(optimizer=optimizer,
                        loss=loss_func,
                        metrics=['acc'])
def load_model(config, num_classes):
    if config['model']['type'] == 'resnet':
        if config['model']['arch'] == 'resnet50':
            net = ResNet.resnet50(pretrained=False,
                                  progress=False,
                                  num_classes=num_classes)
        elif config['model']['arch'] == 'resnext50':
            net = ResNet.resnext50_32x4d(pretrained=False,
                                         progress=False,
                                         num_classes=num_classes)
        elif config['model']['arch'] == 'resnet50d':
            net = ResNetD.resnet50d(pretrained=False,
                                    progress=False,
                                    num_classes=num_classes)
        else:
            raise ValueError('Unsupported architecture: ' +
                             str(config['model']['arch']))
    elif config['model']['type'] == 'tresnet':
        if config['model']['arch'] == 'tresnetm':
            net = TResNet.TResnetM(num_classes=num_classes)
        elif config['model']['arch'] == 'tresnetl':
            net = TResNet.TResnetL(num_classes=num_classes)
        elif config['model']['arch'] == 'tresnetxl':
            net = TResNet.TResnetXL(num_classes=num_classes)
        else:
            raise ValueError('Unsupported architecture: ' +
                             str(config['model']['arch']))
    elif config['model']['type'] == 'regnet':
        regnet_config = dict()
        if config['model']['arch'] == 'regnetx-200mf':
            regnet_config['depth'] = 13
            regnet_config['w0'] = 24
            regnet_config['wa'] = 36.44
            regnet_config['wm'] = 2.49
            regnet_config['group_w'] = 8
            regnet_config['se_on'] = False
            regnet_config['num_classes'] = num_classes

            net = RegNet.RegNet(regnet_config)
        elif config['model']['arch'] == 'regnetx-600mf':
            regnet_config['depth'] = 16
            regnet_config['w0'] = 48
            regnet_config['wa'] = 36.97
            regnet_config['wm'] = 2.24
            regnet_config['group_w'] = 24
            regnet_config['se_on'] = False
            regnet_config['num_classes'] = num_classes

            net = RegNet.RegNet(regnet_config)
        elif config['model']['arch'] == 'regnetx-4.0gf':
            regnet_config['depth'] = 23
            regnet_config['w0'] = 96
            regnet_config['wa'] = 38.65
            regnet_config['wm'] = 2.43
            regnet_config['group_w'] = 40
            regnet_config['se_on'] = False
            regnet_config['num_classes'] = num_classes

            net = RegNet.RegNet(regnet_config)
        elif config['model']['arch'] == 'regnetx-6.4gf':
            regnet_config['depth'] = 17
            regnet_config['w0'] = 184
            regnet_config['wa'] = 60.83
            regnet_config['wm'] = 2.07
            regnet_config['group_w'] = 56
            regnet_config['se_on'] = False
            regnet_config['num_classes'] = num_classes

            net = RegNet.RegNet(regnet_config)
        elif config['model']['arch'] == 'regnety-200mf':
            regnet_config['depth'] = 13
            regnet_config['w0'] = 24
            regnet_config['wa'] = 36.44
            regnet_config['wm'] = 2.49
            regnet_config['group_w'] = 8
            regnet_config['se_on'] = True
            regnet_config['num_classes'] = num_classes

            net = RegNet.RegNet(regnet_config)
        elif config['model']['arch'] == 'regnety-600mf':
            regnet_config['depth'] = 15
            regnet_config['w0'] = 48
            regnet_config['wa'] = 32.54
            regnet_config['wm'] = 2.32
            regnet_config['group_w'] = 16
            regnet_config['se_on'] = True
            regnet_config['num_classes'] = num_classes

            net = RegNet.RegNet(regnet_config)
        elif config['model']['arch'] == 'regnety-4.0gf':
            regnet_config['depth'] = 22
            regnet_config['w0'] = 96
            regnet_config['wa'] = 31.41
            regnet_config['wm'] = 2.24
            regnet_config['group_w'] = 64
            regnet_config['se_on'] = True
            regnet_config['num_classes'] = num_classes

            net = RegNet.RegNet(regnet_config)
        elif config['model']['arch'] == 'regnety-6.4gf':
            regnet_config['depth'] = 25
            regnet_config['w0'] = 112
            regnet_config['wa'] = 33.22
            regnet_config['wm'] = 2.27
            regnet_config['group_w'] = 72
            regnet_config['se_on'] = True
            regnet_config['num_classes'] = num_classes

            net = RegNet.RegNet(regnet_config)
        else:
            raise ValueError('Unsupported architecture: ' +
                             str(config['model']['arch']))
    elif config['model']['type'] == 'resnest':
        if config['model']['arch'] == 'resnest50':
            net = ResNest.resnest50(pretrained=False, num_classes=num_classes)
        elif config['model']['arch'] == 'resnest101':
            net = ResNest.resnest101(pretrained=False, num_classes=num_classes)
        else:
            raise ValueError('Unsupported architecture: ' +
                             str(config['model']['arch']))
    elif config['model']['type'] == 'efficient':
        if config['model']['arch'] == 'b0':
            net = EfficientNet.efficientnet_b0(pretrained=False,
                                               num_classes=num_classes)
        elif config['model']['arch'] == 'b1':
            net = EfficientNet.efficientnet_b1(pretrained=False,
                                               num_classes=num_classes)
        elif config['model']['arch'] == 'b2':
            net = EfficientNet.efficientnet_b2(pretrained=False,
                                               num_classes=num_classes)
        elif config['model']['arch'] == 'b3':
            net = EfficientNet.efficientnet_b3(pretrained=False,
                                               num_classes=num_classes)
        elif config['model']['arch'] == 'b4':
            net = EfficientNet.efficientnet_b4(pretrained=False,
                                               num_classes=num_classes)
        elif config['model']['arch'] == 'b5':
            net = EfficientNet.efficientnet_b5(pretrained=False,
                                               num_classes=num_classes)
        elif config['model']['arch'] == 'b6':
            net = EfficientNet.efficientnet_b6(pretrained=False,
                                               num_classes=num_classes)
        else:
            raise ValueError('Unsupported architecture: ' +
                             str(config['model']['arch']))
    elif config['model']['type'] == 'assembled':
        pass
    elif config['model']['type'] == 'shufflenet':
        if config['model']['arch'] == 'v2_x0_5':
            net = ShuffleNetV2.shufflenet_v2_x0_5(pretrained=False,
                                                  progress=False,
                                                  num_classes=num_classes)
        elif config['model']['arch'] == 'v2_x1_0':
            net = ShuffleNetV2.shufflenet_v2_x1_0(pretrained=False,
                                                  progress=False,
                                                  num_classes=num_classes)
        elif config['model']['arch'] == 'v2_x1_5':
            net = ShuffleNetV2.shufflenet_v2_x1_5(pretrained=False,
                                                  progress=False,
                                                  num_classes=num_classes)
        elif config['model']['arch'] == 'v2_x2_0':
            net = ShuffleNetV2.shufflenet_v2_x2_0(pretrained=False,
                                                  progress=False,
                                                  num_classes=num_classes)
        else:
            raise ValueError('Unsupported architecture: ' +
                             str(config['model']['arch']))
    elif config['model']['type'] == 'mobilenet':
        if config['model']['arch'] == 'small_075':
            net = Mobilenetv3.mobilenetv3_small_075(pretrained=False,
                                                    num_classes=num_classes)
        elif config['model']['arch'] == 'small_100':
            net = Mobilenetv3.mobilenetv3_small_100(pretrained=False,
                                                    num_classes=num_classes)
        elif config['model']['arch'] == 'large_075':
            net = Mobilenetv3.mobilenetv3_large_075(pretrained=False,
                                                    num_classes=num_classes)
        elif config['model']['arch'] == 'large_100':
            net = Mobilenetv3.mobilenetv3_large_100(pretrained=False,
                                                    num_classes=num_classes)
        else:
            raise ValueError('Unsupported architecture: ' +
                             str(config['model']['arch']))
    elif config['model']['type'] == 'rexnet':
        if config['model']['arch'] == 'rexnet1.0x':
            net = ReXNet.rexnet(num_classes=num_classes, width_multi=1.0)
        elif config['model']['arch'] == 'rexnet1.5x':
            net = ReXNet.rexnet(num_classes=num_classes, width_multi=1.5)
        elif config['model']['arch'] == 'rexnet2.0x':
            net = ReXNet.rexnet(num_classes=num_classes, width_multi=2.0)
        else:
            raise ValueError('Unsupported architecture: ' +
                             str(config['model']['arch']))
    else:
        raise ValueError('Unsupported architecture: ' +
                         str(config['model']['type']))

    return net
Esempio n. 9
0
    print('loss: %2.3f, top1: %2.3f, top5: %2.3f' %
          (tloss / len(loader), top1 / len(loader), top5 / len(loader)))


img_tfs = tfs.Compose([
    tfs.Resize((512, 512)),
    tfs.ToTensor(),
    tfs.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])

if __name__ == '__main__':
    # net = Classifier()
    '''
    net = resnet50(True)
    '''
    net = EfficientNet.from_name('efficientnet-b0')
    if os.path.exists(args.model):
        net.load_state_dict(torch.load(args.model))
    imgnet = ImageFolder(args.data, transform=img_tfs)
    loader = DataLoader(imgnet,
                        args.batch_size,
                        shuffle=True,
                        num_workers=6,
                        pin_memory=True)
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(net.parameters(), args.learning_rate)
    #optimizer=torch.optim.SGD(net.parameters(),args.learning_rate,args.momentum,weight_decay=args.weight_decay)
    '''
    train(loader,net,criterion,optimizer,args.epochs)
    '''
    validate(loader, net, criterion)
Esempio n. 10
0
from sklearn.metrics import accuracy_score, roc_auc_score
from tqdm import tqdm
import time
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True

opt = TestOptions().parse(print_options=False)
print("{} from {} model testing on {}".format(opt.arch, opt.source_dataset,
                                              opt.target_dataset))

gpu_id = opt.gpu_id
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
use_cuda = torch.cuda.is_available()
print("GPU device %d:" % (gpu_id), use_cuda)

model = EfficientNet.from_name(opt.arch, num_classes=opt.classes)

if opt.resume:
    pretrained = opt.resume
    print("=> using pre-trained model '{}'".format(pretrained))
    model.load_state_dict(torch.load(pretrained)['state_dict'])

model.to('cuda')
cudnn.benchmark = True
print('Total params: %.2fM' % (sum(p.numel()
                                   for p in model.parameters()) / 1000000.0))

criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum)

def fit(data, fold=None, log=True):

    best_score = 0.0
    model = EfficientNet("tf_efficientnet_b0_ns").to(device)
    # model.load_state_dict(
    #     torch.load("/content/siim-isic_efficientnet_b0_2.ckpt")[
    #         "model_state_dict"
    #     ]
    # )
    # if log:
    #    neptune.init("utsav/SIIM-ISIC", api_token=NEPTUNE_API_TOKEN)
    #    neptune.create_experiment(
    #        FLAGS["exp_name"],
    #        exp_description,
    #        params=FLAGS,
    #        upload_source_files="*.txt",
    #    )

    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=FLAGS["learning_rate"],
        weight_decay=FLAGS["weight_decay"],
    )

    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        factor=0.5,
        cooldown=0,
        mode="min",
        patience=3,
        verbose=True,
        min_lr=1e-8,
    )

    datasets = get_datasets(data)

    # sampler
    # labels_vcount = y_train["target"].value_counts()
    # class_counts = [
    #     labels_vcount[0].astype(np.float32),
    #     labels_vcount[1].astype(np.float32),
    # ]
    # num_samples = sum(class_counts)
    # class_weights = [
    #     num_samples / class_counts[i] for i in range(len(class_counts))
    # ]
    # weights = [
    #     class_weights[y_train["target"].values[i]]
    #     for i in range(int(num_samples))
    # ]
    # sampler = WeightedRandomSampler(
    #     torch.DoubleTensor(weights), int(num_samples)
    # )

    # loaders
    train_loader = DataLoader(
        datasets["train"],
        batch_size=FLAGS["batch_size"],
        num_workers=FLAGS["num_workers"],
        shuffle=True,  # sampler=sampler,
        pin_memory=True,
    )
    val_loader = DataLoader(
        datasets["valid"],
        batch_size=FLAGS["batch_size"] * 2,
        shuffle=False,
        num_workers=FLAGS["num_workers"],
        drop_last=True,
    )

    scaler = GradScaler()
    # train loop
    for epoch in range(0, FLAGS["num_epochs"]):

        print("-" * 27 + f"Epoch #{epoch+1} started" + "-" * 27)

        train_loss = train_one_epoch(
            train_loader,
            model,
            optimizer,
            epoch,
            scheduler=None,
            scaler=scaler,
            log=log,
        )

        print(f"\nAverage loss for epoch #{epoch+1} : {train_loss:.5f}")
        val_output = val_one_epoch(val_loader, model)
        val_loss, auc_score, roc_plot, hist, error_scaled = val_output
        scheduler.step(error_scaled)

        # logs
        # if log:
        #     neptune.log_metric("AUC/val", auc_score)
        #     neptune.log_image("ROC/val", roc_plot)
        #     neptune.log_metric("Loss/val", val_loss)
        #     neptune.log_image("hist/val", hist)

        # checkpoint+upload
        if (auc_score > best_score) or (best_score - auc_score < 0.025):
            if auc_score > best_score:
                best_score = auc_score
            save_upload(
                model,
                optimizer,
                best_score,
                epoch,
                fold,
                exp_name=FLAGS["exp_name"],
            )

        print("-" * 28 + f"Epoch #{epoch+1} ended" + "-" * 28)

    # if log:
    #    neptune.stop()

    return model
Esempio n. 12
0
def initialize_model(model_name,
                     num_classes,
                     learning_rate,
                     use_pretrained=True):
    # Initialize these variables which will be set in this if statement. Each of these
    #   variables is model specific.
    model = None
    input_size = 0
    optimizer = None
    criterion = None

    if model_name == "resnet101":
        """ Resnet101
        """
        model = models.resnet101(pretrained=use_pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224
        params_to_update = model.parameters()
        optimizer = optim.Adam(params_to_update, lr=learning_rate)
        criterion = nn.CrossEntropyLoss()

    elif model_name == "resnet152":
        """ Resnet101
        """
        model = models.resnet152(pretrained=use_pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224
        params_to_update = model.parameters()
        optimizer = optim.Adam(params_to_update, lr=learning_rate)
        criterion = nn.CrossEntropyLoss()

    elif model_name == "resnet101-next":
        """ Resnet101
        """
        model = models.resnext101_32x8d(pretrained=use_pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224
        params_to_update = model.parameters()
        optimizer = optim.Adam(params_to_update, lr=learning_rate)
        criterion = nn.CrossEntropyLoss()

    elif model_name == "resnet50-next":
        """ Resnet101
        """
        model = models.resnext50_32x4d(pretrained=use_pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224
        params_to_update = model.parameters()
        optimizer = optim.Adam(params_to_update, lr=learning_rate)
        criterion = nn.CrossEntropyLoss()

    elif model_name == "wide-resnet101-2":
        """ Resnet101
        """
        model = models.wide_resnet101_2(pretrained=use_pretrained)
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 224
        params_to_update = model.parameters()
        optimizer = optim.Adam(params_to_update, lr=learning_rate)
        criterion = nn.CrossEntropyLoss()

    elif model_name == "densenet161":
        """ Densenet
        """
        model = models.densenet161(pretrained=use_pretrained)
        num_ftrs = model.classifier.in_features
        model.classifier = nn.Linear(num_ftrs, num_classes)
        input_size = 224
        params_to_update = model.parameters()
        optimizer = optim.Adam(params_to_update, lr=learning_rate)
        criterion = nn.CrossEntropyLoss()

    elif model_name == "densenet201":
        """ Densenet
        """
        model = models.densenet201(pretrained=use_pretrained)
        num_ftrs = model.classifier.in_features
        model.classifier = nn.Linear(num_ftrs, num_classes)
        input_size = 224
        params_to_update = model.parameters()
        optimizer = optim.Adam(params_to_update, lr=learning_rate)
        criterion = nn.CrossEntropyLoss()

    elif model_name == "mobilenet":
        """ MobileNet
        """
        model = models.mobilenet_v2(pretrained=use_pretrained)
        # num_ftrs = model.classifier.in_features
        # model.classifier = nn.Linear(num_ftrs, num_classes)
        model.classifier[1] = nn.Linear(model.last_channel, num_classes)
        input_size = 224
        params_to_update = model.parameters()
        optimizer = optim.Adam(params_to_update, lr=learning_rate)
        criterion = nn.CrossEntropyLoss()

    elif model_name == "inception":
        """ Inception v3
        Be careful, expects (299,299) sized images and has auxiliary output
        """
        model = models.inception_v3(pretrained=use_pretrained)
        # Handle the auxilary net
        num_ftrs = model.AuxLogits.fc.in_features
        model.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
        # Handle the primary net
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)
        input_size = 299
        params_to_update = model.parameters()
        optimizer = optim.Adam(params_to_update, lr=learning_rate)
        criterion = nn.CrossEntropyLoss()

    elif model_name == "efficientnet-b3":
        model = EfficientNet.from_pretrained(model_name)
        in_ftrs = model._fc.in_features
        model._fc = nn.Linear(in_ftrs, num_classes)
        input_size = 224
        params_to_update = model.parameters()
        optimizer = optim.RMSprop(params_to_update, lr=learning_rate)
        criterion = nn.CrossEntropyLoss()

    else:
        print("Invalid model name, exiting...")
        exit()

    return model, optimizer, criterion, input_size
Esempio n. 13
0
def main_train(cfg, model_name, verbose=False):
    device = get_device()

    data = get_data(cfg=cfg)
    if data is not None:
        train_data = data.sample(frac=cfg["TRAIN_VALIDATION_FRAC"])
        validation_data = data.drop(train_data.index)
    else:
        return

    train_transform = get_transform(image_size=cfg["IMAGE_SIZE"][model_name],
                                    model_name=model_name,
                                    cfg=cfg,
                                    augmented=True)
    validation_transform = get_transform(
        image_size=cfg["IMAGE_SIZE"][model_name],
        model_name=model_name,
        cfg=cfg,
    )

    train_dataset = DatasetTransformer(train_data,
                                       transform=train_transform,
                                       cfg=cfg)
    validation_dataset = DatasetTransformer(validation_data,
                                            transform=validation_transform,
                                            cfg=cfg)

    train_dataset_loader = torch.utils.data.DataLoader(
        dataset=train_dataset, batch_size=cfg["BATCH_SIZE"], shuffle=True)
    validation_dataset_loader = torch.utils.data.DataLoader(
        dataset=validation_dataset, batch_size=cfg["BATCH_SIZE"], shuffle=True)

    if model_name == "resnet":
        model = Resnet(cfg=cfg, pretrained=True)
        pass
    elif model_name == "efficientnet":
        model = EfficientNet(cfg=cfg, pretrained=True)
    else:
        print("Wrong model name")
        return

    model.to(device)

    if verbose:
        summary(
            model,
            (3, cfg["IMAGE_SIZE"][model_name], cfg["IMAGE_SIZE"][model_name]))

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=cfg["LR"],
                                 weight_decay=cfg["WEIGHT_DECAY"])

    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
                                                           patience=3,
                                                           factor=0.5,
                                                           verbose=verbose,
                                                           mode='min',
                                                           threshold=2 * 1e-2)

    loss = nn.BCELoss()

    train_updater = GraphUpdater(type="Train")
    validation_updater = GraphUpdater(type="Validation")
    model_checkpoint = ModelCheckpoint(model)

    print(f"Training on {len(train_dataset)} images")

    for t in range(cfg["EPOCHS"]):
        print(f'Epoch: {t}')

        train_loss, train_auc = train(model=model,
                                      data_loader=train_dataset_loader,
                                      loss_function=loss,
                                      optimizer=optimizer,
                                      device=device)

        train_updater.update(**{"loss": train_loss, "accuracy": train_auc})

        print(f'\tTraining step: Loss: {train_loss}, AUC: {train_auc}',
              end='\n')

        val_loss, val_auc = test(model=model,
                                 data_loader=validation_dataset_loader,
                                 loss_function=loss,
                                 device=device)
        validation_updater.update(**{"loss": val_loss, "accuracy": val_auc})

        print(f'\tValidation step: Loss: {val_loss}, AUC: {val_auc}', end='\n')

        torch.save(model.state_dict(), f'checkpoint_epoch_{t}.pth')
        print('\tModel saved')

        model_checkpoint.update(loss=val_loss)
        scheduler.step(val_loss)

    train_updater.display()
    validation_updater.display()
Esempio n. 14
0
from utils.aug import data_augment, rand_bbox
from utils.train_utils import save_checkpoint, adjust_learning_rate

from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from options.base import BaseOptions

opt = BaseOptions().parse(print_options=False)
#print("{} from {} model testing on {}".format(opt.arch, opt.source_dataset, opt.target_dataset))

gpu_id = opt.gpu_id
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
use_cuda = torch.cuda.is_available()
print("GPU device %d:" %(gpu_id), use_cuda)

model = EfficientNet.from_name(opt.arch, num_classes=opt.classes,
                              override_params={'dropout_rate': opt.dropout, 'drop_connect_rate': opt.dropconnect})
    
model.to('cuda')
cudnn.benchmark = True
best_acc = 0

data_dir = opt.source_dataset
train_dir = os.path.join(data_dir, 'train')
train_aug = transforms.Compose([
    transforms.Lambda(lambda img: data_augment(img, opt)),
    transforms.Resize(opt.size),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
#     transforms.RandomErasing(p=0.3, scale=(0.02, 0.10), ratio=(0.3, 3.3), value=0, inplace=True),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
Esempio n. 15
0
FLAGS["num_workers"] = 8
FLAGS["learning_rate"] = 4e-4
FLAGS["num_epochs"] = 40
FLAGS["weight_decay"] = 1e-4
FLAGS["log_steps"] = 20
FLAGS["img_size"] = IMG_SIZE
FLAGS["loss"] = "focal"
FLAGS["optimizer"] = "AdamW"
FLAGS["scheduler"] = "ReduceLROnPlateau"
FLAGS["exp_name"] = "enet_b0"
FLAGS["fold"] = [0]  # , 1, 2, 3, 4]
FLAGS["val_freq"] = 1
FLAGS["num_cores"] = 8
FLAGS["seed"] = 42

model_cpu = EfficientNet("tf_efficientnet_b0_ns")
WRAPPED_MODEL = xmp.MpModelWrapper(model_cpu)
SERIAL_EXEC = xmp.MpSerialExecutor()

for fold_no in FLAGS["fold"]:
    X_train = df_train[df_train["fold"] != fold_no][[
        col for col in df_train.columns if col != "target"
    ]]
    X_val = df_train[df_train["fold"] == fold_no][[
        col for col in df_train.columns if col != "target"
    ]]
    y_train = df_train[df_train["fold"] != fold_no][[
        col for col in df_train.columns if col == "target"
    ]]
    y_val = df_train[df_train["fold"] == fold_no][[
        col for col in df_train.columns if col == "target"
Esempio n. 16
0
 def __init__(self, input_shape, output_dim):
     self.model = EfficientNet(input_shape, output_dim)
     optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.1)
     self.model.compile(optimizer=optimizer,
                        loss='categorical_crossentropy',
                        metrics=['acc'])