Ejemplo n.º 1
0
    def features(self) -> Tuple[nn.Module, nn.Module, int, int]:

        # Choose one  [ResNeXt101_32x4d, ResNeXt101_64x4d]
        _pretrained = None

        if self._pretrained is True:
            _pretrained = "imagenet"

        resnext101 = pretrainedmodels.resnext101_64x4d(pretrained=_pretrained)

        children = list(resnext101.children())
        features = children[:-2]
        num_features_out = 2048

        hidden = children[-2]
        num_hidden_out = 2048

        for parameters in [
                feature.parameters() for i, feature in enumerate(features)
                if i <= 4
        ]:
            for parameter in parameters:
                parameter.requires_grad = False

        features = nn.Sequential(*features)

        return features, hidden, num_features_out, num_hidden_out
Ejemplo n.º 2
0
 def __init__(self, num_classes):
     super(ResNext, self).__init__()
     self.name = "ResNext"
     pretrained = model_zoo.resnext101_64x4d()
     cfg.mean = pretrained.mean
     cfg.std = pretrained.std
     self.features = pretrained.features
     self.fc = nn.Linear(2048, num_classes)
Ejemplo n.º 3
0
    def __init__(self,
                 num_classes=1,
                 num_filters=32,
                 pretrained=True,
                 is_deconv=False):
        """
        :param num_classes:
        :param num_filters:
        :param pretrained:
            False - no pre-trained network is used
            True  - encoder is pre-trained with resnet34
        :is_deconv:
            False: bilinear interpolation is used in decoder
            True: deconvolution is used in decoder
        """
        super().__init__()
        self.num_classes = num_classes

        self.mean = (0.485, 0.456, 0.406)
        self.std = (0.229, 0.224, 0.225)        
        
        self.pool = nn.MaxPool2d(2, 2)

        self.encoder = resnext101_64x4d()

        self.relu = nn.ReLU(inplace=True)

        self.conv1 = nn.Sequential(self.encoder.features[0],
                                   self.encoder.features[1],
                                   self.encoder.features[2],
                                   self.pool)

        self.conv2 = self.encoder.features[4]

        self.conv3 = self.encoder.features[5]

        self.conv4 = self.encoder.features[6]

        self.conv5 = self.encoder.features[7]

        self.center = DecoderBlockV2(2048, num_filters * 8 * 2, num_filters * 8, is_deconv)

        self.dec5 = DecoderBlockV2(2048 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
        self.dec4 = DecoderBlockV2(1024 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
        self.dec3 = DecoderBlockV2(512 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv)
        self.dec2 = DecoderBlockV2(256 + num_filters * 2, num_filters * 2 * 2, num_filters * 2 * 2, is_deconv)
        self.dec1 = DecoderBlockV2(num_filters * 2 * 2, num_filters * 2 * 2, num_filters, is_deconv)
        self.dec0 = ConvRelu(num_filters, num_filters)
        self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
Ejemplo n.º 4
0
    def __init__(self,
                 feature_cascade=(512, 256, 64, 32),
                 classes=262,
                 training=None):
        super(ResNextColorizeClassifier, self).__init__()
        fc = feature_cascade
        self.upsample = nn.Upsample(scale_factor=2)
        fe = (1024, 512, 256, 64)

        resnet = pretrainedmodels.resnext101_64x4d(pretrained=None)
        resnet.features[0].weight = nn.Parameter(
            resnet.features[0].weight.sum(dim=1).unsqueeze(1).data)
        res_children = list(resnet.features.children())

        self.ft1 = nn.Sequential(*res_children[0:3])  #64 filters,
        self.ft2 = nn.Sequential(*res_children[3:5])  # 256 filters
        self.ft3 = res_children[5]  # 512 filters
        self.ft4 = res_children[6]  # 1024 filters

        self.bn4 = nn.BatchNorm2d(fe[0])
        self.conv4 = nn.Conv2d(fe[0],
                               fc[0],
                               kernel_size=3,
                               stride=1,
                               padding=1)

        sum3 = fc[0] + fe[1]
        self.bn3 = nn.BatchNorm2d(sum3)
        self.conv3 = nn.Conv2d(sum3, fc[1], kernel_size=3, stride=1, padding=1)

        sum2 = fc[1] + fe[2]
        self.bn2 = nn.BatchNorm2d(sum2)
        self.conv2 = nn.Conv2d(sum2, fc[2], kernel_size=3, stride=1, padding=1)

        sum1 = fc[2] + fe[3]
        self.bn1 = nn.BatchNorm2d(sum1)
        self.conv1 = nn.Conv2d(sum1, fc[3], kernel_size=3, stride=1, padding=1)

        self.bn0 = nn.BatchNorm2d(fc[3])
        self.conv0 = nn.Conv2d(fc[3],
                               classes,
                               kernel_size=3,
                               stride=1,
                               padding=1)
Ejemplo n.º 5
0
def resnext101_64x4d_(pretrained='imagenet', **kwargs):
    model = pretrainedmodels.resnext101_64x4d(pretrained=pretrained)
    model.avg_pool = nn.AvgPool2d((2, 5), stride=(2, 5))
    model.last_linear = nn.Linear(512 * 4, 10)
    return model
Ejemplo n.º 6
0
def build_model(phase, num_classes=2):
    model = pm.resnext101_64x4d()
    base_model=list(model.children())[0] 
    return S3FD(phase, base_model, multibox())
Ejemplo n.º 7
0
 def __init__(self, pretrained="imagenet"):
     super(ResNext101_64x4d, self).__init__()
     self.model = pretrainedmodels.resnext101_64x4d(pretrained=pretrained)
     self.l0 = nn.Linear(2048, 1)

# In[15]:


batch_size = 10
num_workers = 4

train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True)
test_loader = DataLoader(test_set, batch_size=batch_size, num_workers=num_workers, pin_memory=True)


# In[16]:


model = pretrainedmodels.resnext101_64x4d()


# In[17]:


model.avg_pool = nn.AvgPool2d((5,10))


# In[18]:


model.last_linear = nn.Linear(model.last_linear.in_features, NUM_CLASSES)


# In[19]:
Ejemplo n.º 9
0
def waveResnext101_64x4d(pretrained='imagenet', num_classes=41):

    base = pretrainedmodels.resnext101_64x4d(pretrained=pretrained)
    modules = list(base.children())[0]
    model = WaveResnext(modules, num_classes)
    return model
Ejemplo n.º 10
0
def Model_builder(configer):

    model_name = configer.model['name']
    No_classes = configer.dataset_cfg["id_cfg"]["num_classes"]
    model_pretrained = configer.model['pretrained']
    model_dataparallel = configer.model["DataParallel"]
    model_gpu_replica = configer.model["Multi_GPU_replica"]
    gpu_ids = configer.train_cfg["gpu"]

    if model_name == "Inceptionv3":
        model = PM.inceptionv3(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Xception":
        model = PM.xception(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "VGG_19":
        model = PM.vgg19(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet18":
        model = PM.resnet18(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet50":
        model = PM.resnet50(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet101":
        model = PM.resnet101(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet152":
        model = PM.resnet152(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Resnet34":
        model = PM.resnet34(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "Densenet121":
        model = PM.densenet121(num_classes=1000, pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "ResNeXt101-32":
        model = PM.resnext101_32x4d(num_classes=1000,
                                    pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "ResNeXt101-64":
        model = PM.resnext101_64x4d(num_classes=1000,
                                    pretrained=model_pretrained)
        d = model.last_linear.in_features
        model.last_linear = nn.Linear(d, No_classes)

    elif model_name == "MobilenetV2":
        model = MobileNetV2(n_class=No_classes)

    else:
        raise ImportError("Model Architecture not supported")

    # Performing Data Parallelism if configured

    if model_dataparallel:

        model = torch.nn.DataParallel(model.to(device), device_ids=gpu_ids)

    elif model_gpu_replica:

        torch.distributed.init_process_group(backend='nccl',
                                             world_size=1,
                                             rank=1)
        model = torch.nn.DistributedDataParallel(model.to(device),
                                                 device_ids=gpu_ids)

    else:
        model = model.to(device)

    print('---------- Model Loaded')

    return model
Ejemplo n.º 11
0
train_sampler = SubsetRandomSampler(list(range(len(os.listdir('train')))))
valid_sampler = SubsetRandomSampler(list(range(len(os.listdir('test')))))
batch_size = 10
num_workers = 2
# prepare data loaders (combine dataset and sampler)
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=batch_size,
                                           sampler=train_sampler,
                                           num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_set,
                                          batch_size=10,
                                          num_workers=num_workers)

# get pretrained resnet
model_conv = pretrainedmodels.resnext101_64x4d()
for param in model_conv.parameters():
    param.requires_grad = False
model_conv.avg_pool = nn.AvgPool2d((5, 10))

model_conv.last_linear = nn.Linear(model_conv.last_linear.in_features, 5005)

criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model_conv.parameters(), lr=0.01)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)

n_epochs = 20
for epoch in range(1, n_epochs + 1):
    print(time.ctime(), 'Epoch:', epoch)

    train_loss = []
Ejemplo n.º 12
0
def main(train_root, train_csv, val_root, val_csv, test_root, test_csv,
         epochs, aug, model_name, batch_size, num_workers, val_samples,
         test_samples, early_stopping_patience, limit_data, images_per_epoch,
         split_id, _run):
    assert(model_name in ('inceptionv4', 'resnet152', 'densenet161',
                          'senet154', 'pnasnet5large', 'nasnetalarge',
                          'xception', 'squeezenet', 'resnext', 'dpn',
                          'inceptionresnetv2', 'mobilenetv2'))

    cv2.setNumThreads(0)

    AUGMENTED_IMAGES_DIR = os.path.join(fs_observer.dir, 'images')
    CHECKPOINTS_DIR = os.path.join(fs_observer.dir, 'checkpoints')
    BEST_MODEL_PATH = os.path.join(CHECKPOINTS_DIR, 'model_best.pth')
    LAST_MODEL_PATH = os.path.join(CHECKPOINTS_DIR, 'model_last.pth')
    RESULTS_CSV_PATH = os.path.join('results', 'results.csv')
    EXP_NAME = _run.meta_info['options']['--name']
    EXP_ID = _run._id

    for directory in (AUGMENTED_IMAGES_DIR, CHECKPOINTS_DIR):
        os.makedirs(directory)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if model_name == 'inceptionv4':
        model = ptm.inceptionv4(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = 299
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'resnet152':
        model = models.resnet152(pretrained=True)
        model.fc = nn.Linear(model.fc.in_features, 2)
        aug['size'] = 224
        aug['mean'] = [0.485, 0.456, 0.406]
        aug['std'] = [0.229, 0.224, 0.225]
    elif model_name == 'densenet161':
        model = models.densenet161(pretrained=True)
        model.classifier = nn.Linear(model.classifier.in_features, 2)
        aug['size'] = 224
        aug['mean'] = [0.485, 0.456, 0.406]
        aug['std'] = [0.229, 0.224, 0.225]
    elif model_name == 'senet154':
        model = ptm.senet154(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'squeezenet':
        model = ptm.squeezenet1_1(num_classes=1000, pretrained='imagenet')
        model.last_conv = nn.Conv2d(
            512, 2, kernel_size=(1, 1), stride=(1, 1))
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'pnasnet5large':
        model = ptm.pnasnet5large(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'nasnetalarge':
        model = ptm.nasnetalarge(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'xception':
        model = ptm.xception(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'dpn':
        model = ptm.dpn131(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Conv2d(model.last_linear.in_channels, 2,
                                      kernel_size=1, bias=True)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'resnext':
        model = ptm.resnext101_64x4d(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'inceptionresnetv2':
        model = ptm.inceptionresnetv2(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2)
        aug['size'] = model.input_size[1]
        aug['mean'] = model.mean
        aug['std'] = model.std
    elif model_name == 'mobilenetv2':
        model = MobileNetV2()
        model.load_state_dict(torch.load('./auglib/models/mobilenet_v2.pth'))
        model.classifier = nn.Sequential(
            nn.Dropout(0.2),
            nn.Linear(model.last_channel, 2),
        )
        aug['size'] = 224
        aug['mean'] = [0.485, 0.456, 0.406]
        aug['std'] = [0.229, 0.224, 0.225]
    model.to(device)

    augs = Augmentations(**aug)
    model.aug_params = aug

    datasets = {
        'samples': CSVDataset(train_root, train_csv, 'image_id', 'melanoma',
                              transform=augs.tf_augment, add_extension='.jpg',
                              limit=(400, 433)),
        'train': CSVDataset(train_root, train_csv, 'image_id', 'melanoma',
                            transform=augs.tf_transform, add_extension='.jpg',
                            random_subset_size=limit_data),
        'val': CSVDatasetWithName(
            val_root, val_csv, 'image_id', 'melanoma',
            transform=augs.tf_transform, add_extension='.jpg'),
        'test': CSVDatasetWithName(
            test_root, test_csv, 'image_id', 'melanoma',
            transform=augs.tf_transform, add_extension='.jpg'),
        'test_no_aug': CSVDatasetWithName(
            test_root, test_csv, 'image_id', 'melanoma',
            transform=augs.no_augmentation, add_extension='.jpg'),
        'test_144': CSVDatasetWithName(
            test_root, test_csv, 'image_id', 'melanoma',
            transform=augs.inception_crop, add_extension='.jpg'),
    }

    dataloaders = {
        'train': DataLoader(datasets['train'], batch_size=batch_size,
                            shuffle=True, num_workers=num_workers,
                            worker_init_fn=set_seeds),
        'samples': DataLoader(datasets['samples'], batch_size=batch_size,
                              shuffle=False, num_workers=num_workers,
                              worker_init_fn=set_seeds),
    }

    save_images(datasets['samples'], to=AUGMENTED_IMAGES_DIR, n=32)
    sample_batch, _ = next(iter(dataloaders['samples']))
    save_image(make_grid(sample_batch, padding=0),
               os.path.join(AUGMENTED_IMAGES_DIR, 'grid.jpg'))

    criterion = nn.CrossEntropyLoss()

    optimizer = optim.SGD(model.parameters(),
                          lr=0.001,
                          momentum=0.9,
                          weight_decay=0.001)

    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1,
                                                     min_lr=1e-5,
                                                     patience=8)
    metrics = {
        'train': pd.DataFrame(columns=['epoch', 'loss', 'acc', 'auc']),
        'val': pd.DataFrame(columns=['epoch', 'loss', 'acc', 'auc'])
    }

    best_val_auc = 0.0
    best_epoch = 0
    epochs_without_improvement = 0
    if images_per_epoch:
        batches_per_epoch = images_per_epoch // batch_size
    else:
        batches_per_epoch = None

    for epoch in range(epochs):
        print('train epoch {}/{}'.format(epoch+1, epochs))
        epoch_train_result = train_epoch(
            device, model, dataloaders, criterion, optimizer,
            batches_per_epoch)

        metrics['train'] = metrics['train'].append(
            {**epoch_train_result, 'epoch': epoch}, ignore_index=True)
        print('train', epoch_train_result)

        epoch_val_result, _ = test_with_augmentation(
            model, datasets['val'], device, num_workers, val_samples)

        metrics['val'] = metrics['val'].append(
            {**epoch_val_result, 'epoch': epoch}, ignore_index=True)
        print('val', epoch_val_result)
        print('-' * 40)

        scheduler.step(epoch_val_result['loss'])

        if epoch_val_result['auc'] > best_val_auc:
            best_val_auc = epoch_val_result['auc']
            best_val_result = epoch_val_result
            best_epoch = epoch
            epochs_without_improvement = 0
            torch.save(model, BEST_MODEL_PATH)
        else:
            epochs_without_improvement += 1

        if epochs_without_improvement > early_stopping_patience:
            last_val_result = epoch_val_result
            torch.save(model, LAST_MODEL_PATH)
            break

        if epoch == (epochs-1):
            last_val_result = epoch_val_result
            torch.save(model, LAST_MODEL_PATH)

    for phase in ['train', 'val']:
        metrics[phase].epoch = metrics[phase].epoch.astype(int)
        metrics[phase].to_csv(os.path.join(fs_observer.dir, phase + '.csv'),
                              index=False)

    # Run testing
    # TODO: reduce code repetition
    test_result, preds = test_with_augmentation(
        torch.load(BEST_MODEL_PATH), datasets['test'], device,
        num_workers, test_samples)
    print('[best] test', test_result)

    test_noaug_result, preds_noaug = test_with_augmentation(
        torch.load(BEST_MODEL_PATH), datasets['test_no_aug'], device,
        num_workers, 1)
    print('[best] test (no augmentation)', test_noaug_result)

    test_result_last, preds_last = test_with_augmentation(
        torch.load(LAST_MODEL_PATH), datasets['test'], device,
        num_workers, test_samples)
    print('[last] test', test_result_last)

    test_noaug_result_last, preds_noaug_last = test_with_augmentation(
        torch.load(LAST_MODEL_PATH), datasets['test_no_aug'], device,
        num_workers, 1)
    print('[last] test (no augmentation)', test_noaug_result_last)

    # Save predictions
    preds.to_csv(os.path.join(fs_observer.dir, 'test-aug-best.csv'),
                 index=False, columns=['image', 'label', 'score'])
    preds_noaug.to_csv(os.path.join(fs_observer.dir, 'test-noaug-best.csv'),
                 index=False, columns=['image', 'label', 'score'])
    preds_last.to_csv(os.path.join(fs_observer.dir, 'test-aug-last.csv'),
                 index=False, columns=['image', 'label', 'score'])
    preds_noaug_last.to_csv(os.path.join(fs_observer.dir, 'test-noaug-last.csv'),
                 index=False, columns=['image', 'label', 'score'])

    # TODO: Avoid repetition.
    #       use ordereddict, or create a pandas df before saving
    with open(RESULTS_CSV_PATH, 'a') as file:
        file.write(','.join((
            EXP_NAME,
            str(EXP_ID),
            str(split_id),
            str(best_epoch),
            str(best_val_result['loss']),
            str(best_val_result['acc']),
            str(best_val_result['auc']),
            str(best_val_result['avp']),
            str(best_val_result['sens']),
            str(best_val_result['spec']),
            str(last_val_result['loss']),
            str(last_val_result['acc']),
            str(last_val_result['auc']),
            str(last_val_result['avp']),
            str(last_val_result['sens']),
            str(last_val_result['spec']),
            str(best_val_auc),
            str(test_result['auc']),
            str(test_result_last['auc']),
            str(test_result['acc']),
            str(test_result_last['acc']),
            str(test_result['spec']),
            str(test_result_last['spec']),
            str(test_result['sens']),
            str(test_result_last['sens']),
            str(test_result['avp']),
            str(test_result_last['avp']),
            str(test_noaug_result['auc']),
            str(test_noaug_result_last['auc']),
            str(test_noaug_result['acc']),
            str(test_noaug_result_last['acc']),
            str(test_noaug_result['spec']),
            str(test_noaug_result_last['spec']),
            str(test_noaug_result['sens']),
            str(test_noaug_result_last['sens']),
            str(test_noaug_result['avp']),
            str(test_noaug_result_last['avp']),
            )) + '\n')

    return (test_noaug_result['auc'],
            test_result['auc'],
            )
Ejemplo n.º 13
0
def resnext(groups, width_per_group):
    if groups == 64 and width_per_group == 4:
        return pretrainedmodels.resnext101_64x4d()