Exemple #1
0
    def __init__(self,
                 root,
                 csv_path,
                 part='all',
                 input_size=None,
                 transforms=None,
                 train=True,
                 test=False):
        """
        主要目标: 获取所有图片的地址,并根据训练,验证,测试划分数据

        train set:     train = True,  test = False
        val set:       train = False, test = False
        test set:      train = False, test = True

        part = 'all', 'XR_HAND', XR_ELBOW etc.
        用于提取特定部位的数据。
        """

        with open(csv_path, 'rb') as F:
            d = F.readlines()
            if part == 'all':
                imgs = [root + str(x, encoding='utf-8').strip()
                        for x in d]  # 所有图片的存储路径, [:-1]目的是抛弃最末尾的\n
            else:
                imgs = [
                    root + str(x, encoding='utf-8').strip() for x in d
                    if str(x, encoding='utf-8').strip().split('/')[2] == part
                ]

        self.imgs = imgs
        self.train = train
        self.test = test

        self.max_width = 0
        self.max_height = 0

        if transforms is None:

            if self.train and not self.test:
                # 这里的X光图是1 channel的灰度图
                self.transforms = T.Compose([
                    # T.Lambda(logo_filter),
                    SquarePad(),
                    T.Resize(320),
                    T.RandomCrop(320),
                    #T.RandomResizedCrop(300),
                    T.RandomHorizontalFlip(),
                    T.RandomVerticalFlip(),
                    #T.RandomRotation(30),
                    T.ToTensor(),
                    T.Lambda(lambda x: t.cat([
                        x[0].unsqueeze(0), x[0].unsqueeze(0), x[0].unsqueeze(0)
                    ], 0)),  # 转换成3 channel
                    T.Normalize(mean=MURA_MEAN_CROP, std=MURA_STD_CROP),
                ])
            if not self.train:
                # 这里的X光图是1 channel的灰度图
                self.transforms = T.Compose([
                    # T.Lambda(logo_filter),
                    SquarePad(),
                    T.Resize(320),
                    T.CenterCrop(320),
                    T.ToTensor(),
                    T.Lambda(lambda x: t.cat([
                        x[0].unsqueeze(0), x[0].unsqueeze(0), x[0].unsqueeze(0)
                    ], 0)),  # 转换成3 channel
                    T.Normalize(mean=MURA_MEAN_CROP, std=MURA_STD_CROP),
                ])
Exemple #2
0
def main():
    ## run first 'python ./faceScrub download.py' to generate the actors folder... quite time consuming

    args = parse_args()

    # Create datasets with equal number of pos and neg classes
    ### The next line creates new datasets with randomly selected images from the actors/ folder
    if args.new_datasets:
        train_set, valid_set, test_set = utils.create_datasets(
            args.train, args.valid, args.test, (args.res, args.res),
            args.verbose - 1 if args.verbose > 0 else 0)

    if args.norm:
        # Approximate mean and standard deviation using the train and validation datasets
        images_mean, images_std, _ = utils.image_normalization(
            traindir, validdir, args.verbose)
        trans = transforms.Compose([
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.RandomRotation(degrees=(-90, 90)),
            transforms.RandomVerticalFlip(p=0.5),
            transforms.ToTensor(),  # rescale to [0.0, 1.0]
            transforms.Normalize(mean=images_mean, std=images_std)
        ])
        if args.verbose > 2:
            # Normalized Data Visualization
            utils.visualize_normalization(traindir,
                                          images_mean,
                                          images_std,
                                          batch_size=args.batch_size,
                                          filename='./images/normalized_' +
                                          args.suffix + '.png')
    else:
        trans = transforms.Compose([
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.RandomRotation(degrees=(-90, 90)),
            transforms.RandomVerticalFlip(p=0.5),
            transforms.ToTensor(),  # rescale to [0.0, 1.0]
        ])

    # Load data from folders
    train_dataset = datasets.ImageFolder(traindir, trans)
    valid_dataset = datasets.ImageFolder(validdir, trans)

    # Samples count
    if args.verbose > 1:
        print('Training samples: \t%d' % (train_dataset.__len__()))
        print('Validation samples: \t%d' % (valid_dataset.__len__()))

    # Create data loaders
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers)
    valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.workers)

    # Use GPU if available
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    if args.verbose > 1:
        print("Device to use is", device)

    # Vanilla CNN aquitecture
    net = model.CNN(args.res).to(device)  # Send CNN to GPU if available
    if args.verbose > 2:
        print('Model summary:')
        summary(net, input_size=(3, args.res, args.res))

    # Loss function and Optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.l2)

    # Save training and validation history in a dictionary
    hist = {
        # Lists for train results
        'train_loss': [],  # Stores train loss at each iteration
        'train_loss_epoch': [],  # Stores train Loss per Epoch
        'train_acc': [],  # Stores train accuracy per mini batch
        # List for validation results
        'val_loss_epoch': [],  # Stores train Loss per Epoch
        'val_acc': [],  # Stores train accuracy per mini batch
        # List learning rate
        'lr_list': [],
        # Test accuracy
        'test_acc': None
    }

    # List to store prediction labels
    train_predict = []
    val_predict = []

    # Training and validation for a fixed number of epochs
    for epoch in range(args.epochs):
        loss_batch = 0.0
        correct = 0
        total = 0

        if args.lr_decay_rate > 0:
            lr = optimizer.param_groups[0]['lr']
            optimizer.param_groups[0]['lr'] = lr - args.lr_decay_rate * lr

        # Trainining step
        for i, (images, labels) in enumerate(train_loader, 0):
            # Copy to GPU if available
            images, labels = images.to(device), labels.to(device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # Fiting
            output = net.forward(images)
            loss = criterion(output, labels)
            loss.backward()
            optimizer.step()

            # Storing loss
            hist['train_loss'].append(loss.item())
            loss_batch += loss.item()

            # Storing accuracy
            _, predicted = torch.max(output.data, 1)
            total += len(labels)
            correct += (predicted == labels).sum().item()
            train_predict += (predicted).tolist()

        # Train loss and accuracy per epoch
        hist['train_loss_epoch'].append(loss_batch / (i + 1))
        hist['train_acc'].append(correct / total)

        # Reset variables for validation
        loss_batch = 0.0
        total = 0
        correct = 0

        # Validation step
        for j, (images, labels) in enumerate(valid_loader):
            with torch.no_grad():
                # Copy to GPU if available
                images, labels = images.to(device), labels.to(device)

                output = net(images)

                # Storing Loss
                loss = criterion(output, labels)
                loss_batch += loss.item()

                # Accuracy
                _, predicted = torch.max(output.data, 1)
                total += len(labels)
                correct += (predicted == labels).sum().item()
                val_predict += (predicted).tolist()

        # Validation loss and accuracy per epoch
        hist['val_loss_epoch'].append(loss_batch / (j + 1))
        hist['val_acc'].append(correct / total)
        loss_batch = 0.0
        total = 0
        correct = 0
        hist['lr_list'].append(optimizer.param_groups[0]['lr'])

        # Print results
        if args.verbose:
            print("Epoch %2d -> train_loss: %.5f, train_acc: %.5f | val_loss: %.5f, val_acc: %.5f | lr: %.5f"
                %(epoch+1,hist['train_loss_epoch'][epoch],hist['train_acc'][epoch], \
                    hist['val_loss_epoch'][epoch],hist['val_acc'][epoch],hist['lr_list'][epoch]))

    if args.verbose > 1:
        print('Training complete!\n')

    # Generate and save plots
    if args.verbose > 2:
        utils.plot_loss(hist['train_loss'],
                        hist['train_loss_epoch'],
                        filename='./images/loss_train_' + args.suffix + '.png',
                        scatter=True)
        utils.plot_loss(hist['train_loss_epoch'],
                        hist['val_loss_epoch'],
                        filename='./images/loss_' + args.suffix + '.png')
        utils.plot_accuracy(hist['train_acc'],
                            hist['val_acc'],
                            filename='./images/accuracy_' + args.suffix +
                            '.png')

    ## Measure performance using the Test dataset

    if args.norm:
        test_trans = transforms.Compose([
            transforms.ToTensor(),  # rescale to [0.0, 1.0]
            transforms.Normalize(mean=images_mean, std=images_std)
        ])
    else:
        test_trans = transforms.Compose([
            transforms.ToTensor(),  # rescale to [0.0, 1.0]
        ])

    # Load data from folder
    test_dataset = datasets.ImageFolder(testdir, test_trans)
    # Samples count
    if args.verbose > 1:
        print('Test samples: \t%d' % (test_dataset.__len__()))
    # Create data loader
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=args.workers)
    # Test data evaluation
    true_labels = test_loader.dataset.targets
    predict_labels = []
    correct = 0
    total = 0
    with torch.no_grad():
        for (images, labels) in test_loader:
            # Copy to GPU if available
            images, labels = images.to(device), labels.to(device)
            output = net(images)
            # Accuracy
            _, predicted = torch.max(output.data, 1)
            total += len(labels)
            predict_labels += predicted.tolist()
            correct += (predicted == labels).sum().item()
    hist['test_acc'] = 100 * correct / total
    if args.verbose > 1:
        print('Accuracy in test set: %3.3f%%' % (hist['test_acc']))

    if args.verbose > 2:
        # Add test accuracy and save history file
        utils.save_history(hist,
                           './images/CNN_history_' + args.suffix + '.csv')

        # Generate report
        utils.build_report(predict_labels, true_labels)

        # Plot and save confusion matrix
        utils.plot_confusion_matrix(predict_labels,
                                    true_labels,
                                    filename='./images/conf_mtx_' +
                                    args.suffix + '.png')

        # Plot and save some mispredicted images
        utils.plot_mispredictions(predict_labels,
                                  true_labels,
                                  test_dataset,
                                  filename='./images/mispredicted_' +
                                  args.suffix + '.png')
Exemple #3
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    args.distributed = args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
                                world_size=args.world_size)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_transform = transforms.Compose(
        [
            transforms.RandomResizedCrop(224, scale=(0.3, 1.)),
            transforms.RandomGrayscale(p=0.5),
            transforms.ColorJitter(0.5, 0.5, 0.5, 0.5),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.ToTensor(),
            normalize])

    val_transform = transforms.Compose(
        [
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            normalize])

    train_dataset = Foundation_Type_Binary(args.train_data, transform=train_transform, mask_buildings=args.mask_buildings)
    val_dataset = Foundation_Type_Binary(args.val_data, transform=val_transform, mask_buildings=args.mask_buildings)

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(val_dataset,
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    model = resnet50(low_dim=args.low_dim)
    model = torch.nn.DataParallel(model).cuda()

    print ('Train dataset instances: {}'.format(len(train_loader.dataset)))
    print ('Val dataset instances: {}'.format(len(val_loader.dataset)))

    ndata = train_dataset.__len__()
    if args.nce_k > 0:
        lemniscate = NCEAverage(args.low_dim, ndata, args.nce_k, args.nce_t, args.nce_m).cuda()
        criterion = NCECriterion(ndata).cuda()
    else:
        lemniscate = LinearAverage(args.low_dim, ndata, args.nce_t, args.nce_m).cuda()
        criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                 momentum=args.momentum,
                                 weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            args.epochs = args.start_epoch + args.epochs
            best_prec1 = checkpoint['best_prec1']

            missing_keys, unexpected_keys = model.load_state_dict(checkpoint['state_dict'], strict=False)
            if len(missing_keys) or len(unexpected_keys):
                print('Warning: Missing or unexpected keys found.')
                print('Missing: {}'.format(missing_keys))
                print('Unexpected: {}'.format(unexpected_keys))

            low_dim_checkpoint = checkpoint['lemniscate'].memory.shape[1]
            if low_dim_checkpoint == args.low_dim:
                lemniscate = checkpoint['lemniscate']
            else:
                print('Chosen low dim does not fit checkpoint. Assuming fine-tuning and not loading memory bank.')
            try:
                optimizer.load_state_dict(checkpoint['optimizer'])
            except ValueError:
                print('Training optimizer does not fit to checkpoint optimizer. Assuming fine-tuning and load optimizer from scratch. ')

            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    if args.evaluate:
        kNN(0, model, lemniscate, train_loader, val_loader, 200, args.nce_t)
        return

    prec1 = NN(0, model, lemniscate, train_loader, val_loader)
    print('Start out precision: {}'.format(prec1))
    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, lemniscate, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = NN(epoch, model, lemniscate, train_loader, val_loader)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint({
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'lemniscate': lemniscate,
            'best_prec1': best_prec1,
            'optimizer' : optimizer.state_dict(),
        }, is_best, args.name)
Exemple #4
0
    def __init__(self, mdlParams, indSet, fold=-1):
        """
        Args:
            mdlParams (dict): Configuration for loading
            indSet (string): Indicates train, val, test
        """
        # Mdlparams
        self.mdlParams = mdlParams
        # Current indSet = 'trainInd'/'valInd'/'testInd'
        self.indSet = indSet
        if self.indSet == 'trainInd':
            self.root = mdlParams['dataDir'] + '/train'
            assert (fold > 0 and fold <= mdlParams['fold'])
            self.fold = fold
        elif self.indSet == 'valInd':
            self.root = mdlParams['dataDir'] + '/train'
            assert (fold > 0 and fold <= mdlParams['fold'])
            self.fold = fold
        else:
            self.root = mdlParams['dataDir'] + '/test'
            assert (fold < 0)
        self.names_list = []
        # Number of classes
        self.numClasses = mdlParams['numClasses']
        # Size to crop
        self.crop_size = mdlParams['crop_size']
        # Model input size
        self.input_size = (np.int32(mdlParams['input_size'][0]),
                           np.int32(mdlParams['input_size'][1]))
        # Potential class balancing option
        self.balancing = mdlParams['balance_classes']
        # Potential setMean and setStd
        self.setMean = mdlParams['setMean'].astype(np.float32)
        self.setStd = mdlParams['setStd'].astype(np.float32)
        # Only downsample
        self.only_downsmaple = mdlParams.get('only_downsmaple', True)
        # Meta csv
        self.meta_path = mdlParams['meta_path']
        self.meta_df = pd.read_pickle(self.meta_path)

        class_label = 0
        self.subsets_size = []
        self.image_path = []
        for dir in os.listdir(self.root):
            subset_img_path = []
            subset_name_list = []
            dir_path = os.path.join(self.root, dir)
            for image in os.listdir(dir_path):
                subset_name_list.append({
                    'id': image.split('.')[0],
                    'label': class_label,
                })
                subset_img_path.append(os.path.join(dir_path))
            subset_size = len(subset_img_path)
            self.names_list.append(subset_name_list)
            self.image_path.append(subset_img_path)
            self.subsets_size.append(subset_size)
            class_label += 1

        if indSet == 'trainInd':
            new_names_list = []
            new_image_path = []
            for i in range(self.numClasses):
                print('Before folding: ' + str(self.subsets_size[i]))
                fold_len = self.subsets_size[i] // self.mdlParams['fold']
                self.names_list[i] = self.names_list[i][:(
                    self.fold - 1) * fold_len] + self.names_list[i][self.fold *
                                                                    fold_len:]
                self.image_path[i] = self.image_path[i][:(
                    self.fold - 1) * fold_len] + self.image_path[i][self.fold *
                                                                    fold_len:]
                self.subsets_size[i] = len(self.names_list[i])
                print('After folding: ' + str(self.subsets_size[i]))
                new_names_list += self.names_list[i]
                new_image_path += self.image_path[i]
            self.names_list = new_names_list
            self.image_path = new_image_path
            all_transforms = []
            if self.only_downsmaple:
                all_transforms.append(transforms.Resize(self.input_size))
            else:
                all_transforms.append(transforms.Resize(self.crop_size))
                all_transforms.append(transforms.RandomCrop(self.input_size))
            if mdlParams.get('flip_lr_ud', False):
                all_transforms.append(transforms.RandomHorizontalFlip())
                all_transforms.append(transforms.RandomVerticalFlip())
            # Full rot
            if mdlParams.get('full_rot', 0) > 0:
                if mdlParams.get('scale', False):
                    all_transforms.append(
                        transforms.RandomChoice([
                            transforms.RandomAffine(mdlParams['full_rot'],
                                                    scale=mdlParams['scale'],
                                                    shear=mdlParams.get(
                                                        'shear', 0),
                                                    resample=Image.NEAREST),
                            transforms.RandomAffine(mdlParams['full_rot'],
                                                    scale=mdlParams['scale'],
                                                    shear=mdlParams.get(
                                                        'shear', 0),
                                                    resample=Image.BICUBIC),
                            transforms.RandomAffine(mdlParams['full_rot'],
                                                    scale=mdlParams['scale'],
                                                    shear=mdlParams.get(
                                                        'shear', 0),
                                                    resample=Image.BILINEAR)
                        ]))
                else:
                    all_transforms.append(
                        transforms.RandomChoice([
                            transforms.RandomRotation(mdlParams['full_rot'],
                                                      resample=Image.NEAREST),
                            transforms.RandomRotation(mdlParams['full_rot'],
                                                      resample=Image.BICUBIC),
                            transforms.RandomRotation(mdlParams['full_rot'],
                                                      resample=Image.BILINEAR)
                        ]))
            # Color distortion
            if mdlParams.get('full_color_distort') is not None:
                all_transforms.append(
                    transforms.ColorJitter(
                        brightness=mdlParams.get('brightness_aug', 32. / 255.),
                        saturation=mdlParams.get('saturation_aug', 0.5),
                        contrast=mdlParams.get('contrast_aug', 0.5),
                        hue=mdlParams.get('hue_aug', 0.2)))
            else:
                all_transforms.append(
                    transforms.ColorJitter(brightness=32. / 255.,
                                           saturation=0.5))
            # Autoaugment
            if self.mdlParams.get('randaugment', False):
                all_transforms.append(
                    RandAugment(self.mdlParams.get('N'),
                                self.mdlParams.get('M')))
            # Cutout
            if self.mdlParams.get('cutout', 0) > 0:
                all_transforms.append(
                    Cutout_v0(n_holes=1, length=self.mdlParams['cutout']))
            # Normalize
            all_transforms.append(transforms.ToTensor())
            all_transforms.append(
                transforms.Normalize(np.float32(self.mdlParams['setMean']),
                                     np.float32(self.mdlParams['setStd'])))
            # All transforms
            self.composed = transforms.Compose(all_transforms)
        elif indSet == 'validInd':
            new_names_list = []
            new_image_path = []
            for i in range(self.numClasses):
                print('Before folding: ' + str(self.subsets_size[i]))
                fold_len = self.subsets_size[i] / self.fold
                self.names_list[i] = self.names_list[i][(self.fold - 1) *
                                                        fold_len:self.fold *
                                                        fold_len]
                self.image_path[i] = self.image_path[i][(self.fold - 1) *
                                                        fold_len:self.fold *
                                                        fold_len]
                self.subsets_size[i] = len(self.names_list[i])
                print('After folding: ' + str(self.subsets_size[i]))
                new_names_list += self.names_list[i]
                new_image_path += self.image_path[i]
            self.names_list = new_names_list
            self.image_path = new_image_path

            self.composed = transforms.Compose([
                transforms.Resize(self.input_size),
                transforms.ToTensor(),
                transforms.Normalize(
                    torch.from_numpy(self.setMean).float(),
                    torch.from_numpy(self.setStd).float())
            ])
        else:
            new_names_list = []
            new_image_path = []
            for i in range(self.numClasses):
                new_names_list += self.names_list[i]
                new_image_path += self.image_path[i]
            self.names_list = new_names_list
            self.image_path = new_image_path

            self.composed = transforms.Compose([
                transforms.Resize(self.input_size),
                transforms.ToTensor(),
                transforms.Normalize(
                    torch.from_numpy(self.setMean).float(),
                    torch.from_numpy(self.setStd).float())
            ])
    return acc, Sen, Spe


def my_softmax(data):
    # print(data)
    for i in range(data.shape[0]):
        data[i] = np.exp(data[i]) / sum(np.exp(data[i]))
        # print(data[i])
    # print(data)
    return data


train_transforms = T.Compose([
    T.Resize((112, 112)),
    T.RandomHorizontalFlip(),
    T.RandomVerticalFlip(),
    # T.RandomRotation(45),
    T.ToTensor(),
    # T.Normalize(mean=[0.3],
    #             std=[0.2])
    T.Normalize(mean=[0.459], std=[0.250])
])

valid_transforms = T.Compose([
    T.Resize((112, 112)),
    T.ToTensor(),
    # T.Normalize(mean=[0.3],
    #             std=[0.2])
    T.Normalize(mean=[0.459], std=[0.250])
])
        rescaled_image_data *= 255
        image = PIL.Image.fromarray(np.uint8(rescaled_image_data))

        labels = self.label_info.iloc[index, 4]
        labels = np.array([labels])
        labels = labels.astype('float32').reshape(-1, 1)
        labels = torch.from_numpy(labels)

        if self.transform:  #if any transforms were given to initialiser
            image = self.transform(image)

        return image, labels


training_transform = transforms.Compose([
    transforms.RandomVerticalFlip(p=0.4),
    transforms.RandomHorizontalFlip(p=0.5),
    transforms.ColorJitter(contrast=0.2, hue=0.2),
    transforms.ToTensor()
])

validation_transform = transforms.Compose([
    transforms.RandomVerticalFlip(p=0.2),
    transforms.RandomHorizontalFlip(p=0.5),
    transforms.ColorJitter(contrast=0.2, hue=0.2),
    transforms.ToTensor()
])

batch_size = 128

classes = ('good labels', 'bad labels')
Exemple #7
0
def get_trf(trfs: str):
    """
  A function to quickly get required transforms.

  Arguments
  ---------
  trfs : str
         An str that represents what T are needed. See Notes

  Returns
  -------
  trf : torch.transforms
        The transforms as a transforms object from torchvision.

  Notes
  -----
  >>> get_trf('rz256_cc224_tt_normimgnet')
  >>> T.Compose([T.Resize(256),
                          T.CenterCrop(224),
                          T.ToTensor(),
                          T.Normalize([0.485, 0.456, 0.406], 
                                      [0.229, 0.224, 0.225])])
  """
    # TODO Write tests
    # TODO Add more options
    trf_list = []
    for trf in trfs.split('_'):
        if trf.startswith('rz'):
            val = (int(trf[2:]), int(trf[2:]))
            trf_list.append(T.Resize(val))
        elif trf.startswith('cc'):
            val = (int(trf[2:]), int(trf[2:]))
            trf_list.append(T.CenterCrop(val))
        elif trf.startswith('rr'):
            trf_list.append(T.RandomRotation(int(trf[2:])))
        elif trf.startswith('rc'):
            trf_list.append(T.RandomCrop(int(trf[2:])))
        # TODO Add other padding modes
        elif trf.startswith('pad'):
            trf_list.append(T.Pad(int(trf[3:]), padding_mode='reflect'))
        elif trf.startswith('rhf'):
            val = float(trf[3:]) if trf[3:].strip() != '' else 0.5
            trf_list.append(T.RandomHorizontalFlip(val))
        elif trf.startswith('rvf'):
            val = float(trf[3:]) if trf[3:].strip() != '' else 0.5
            trf_list.append(T.RandomVerticalFlip(val))
        # T.ColorJitter
        # TODO Add a way to specify all three values
        # As of we take just 1 value and pass 3 equal ones.
        elif trf.startswith('cj'):
            val = [float(trf[2:])] * 3
            trf_list.append(T.ColorJitter(*val))
        elif trf == 'tt':
            trf_list.append(T.ToTensor())
        elif trf == 'normimgnet':
            trf_list.append(
                T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))
        elif trf == 'normmnist':
            trf_list.append(T.Normalize((0.1307, ), (0.3081, )))
        elif trf == 'fm255':
            trf_list.append(T.Lambda(lambda x: x.mul(255)))
        else:
            raise NotImplementedError

    return T.Compose(trf_list)
Exemple #8
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    if args.tensorboard:
        configure("runs/%s" % (args.name))
    print("Launch...")
    print(sys.argv)

    # Global configuration of the datasets
    USE_COLOR = not args.monkey
    kwargs = {'num_workers': 4, 'pin_memory': True}

    ##############################
    # Database loading
    ##############################
    # Data loading code
    # TODO: For now only support 224x224 input size of 32x32. Need more work to support more resolutions
    if args.database == 'cifar-10':
        if not USE_COLOR:
            raise "CIFAR-10 does not handle training with gray images"
        # Data augumentation
        normalize = transforms.Normalize(
            mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
            std=[x / 255.0 for x in [63.0, 62.1, 66.7]])

        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])
        transform_test = transforms.Compose([transforms.ToTensor(), normalize])

        # CIFAR-10
        data_CIFAR10 = datasets.CIFAR10(args.tempdir,
                                        train=True,
                                        download=True,
                                        transform=transform_train)
        if args.split_data > 0:
            sampler = torch.utils.data.sampler.WeightedRandomSampler(
                weights=[1] * 10000, num_samples=args.split_data)
            shuffle = False
        else:
            sampler = None
            shuffle = True

        train_loader = torch.utils.data.DataLoader(data_CIFAR10,
                                                   batch_size=args.batch_size,
                                                   shuffle=shuffle,
                                                   sampler=sampler,
                                                   **kwargs)

        val_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
            args.tempdir, train=False, transform=transform_test),
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 **kwargs)

        NUM_CLASS = 10
        INPUT_SIZE = 32
    elif args.database == 'cifar-100':
        if not USE_COLOR:
            raise "CIFAR-100 does not handle training with gray images"
        # Data augumentation
        # From: https://github.com/meliketoy/wide-resnet.pytorch/blob/master/config.py
        normalize = transforms.Normalize(mean=[0.5071, 0.4867, 0.4408],
                                         std=[0.2675, 0.2565, 0.2761])
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])
        transform_test = transforms.Compose([transforms.ToTensor(), normalize])
        data_CIFAR100 = datasets.CIFAR100(args.tempdir,
                                          train=True,
                                          download=True,
                                          transform=transform_train)
        if args.split_data > 0:
            sampler = torch.utils.data.sampler.WeightedRandomSampler(
                weights=[1] * 10000, num_samples=args.split_data)
            shuffle = False
        else:
            sampler = None
            shuffle = True

        # CIFAR-100
        train_loader = torch.utils.data.DataLoader(data_CIFAR100,
                                                   batch_size=args.batch_size,
                                                   shuffle=shuffle,
                                                   sampler=sampler,
                                                   **kwargs)

        val_loader = torch.utils.data.DataLoader(datasets.CIFAR100(
            args.tempdir, train=False, transform=transform_test),
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 **kwargs)

        NUM_CLASS = 100
        INPUT_SIZE = 32
    elif args.database == 'kth':
        if not USE_COLOR and args.gcn:
            raise "It is not possible to use grayimage and GCN"
        # Data auguementation
        if args.gcn:
            normalize = GCN()
        else:
            # TODO: Use the same normalization than CIFAR-10
            # TODO: That might be suboptimal...
            if USE_COLOR:
                normalize = transforms.Normalize(
                    mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
                    std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
            else:
                normalize = transforms.Normalize(
                    mean=[x / 255.0 for x in [125.3]],
                    std=[x / 255.0 for x in [63.0]])

        add_transform = []
        if not USE_COLOR:
            add_transform += [transforms.Grayscale(num_output_channels=1)]

        transform_train = transforms.Compose(add_transform + [
            transforms.Resize((256, 256)),
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.ToTensor(), normalize
        ])
        transform_test = transforms.Compose(add_transform + [
            transforms.Resize((256, 256)),
            transforms.CenterCrop((224, 224)),
            transforms.ToTensor(), normalize
        ])

        kth_train_dataset = datasets.ImageFolder(root=args.traindir,
                                                 transform=transform_train)

        kth_test_dataset = datasets.ImageFolder(root=args.valdir,
                                                transform=transform_test)
        train_loader = torch.utils.data.DataLoader(kth_train_dataset,
                                                   shuffle=True,
                                                   batch_size=args.batch_size,
                                                   **kwargs)

        val_loader = torch.utils.data.DataLoader(kth_test_dataset,
                                                 shuffle=True,
                                                 batch_size=args.batch_size,
                                                 **kwargs)
        NUM_CLASS = args.num_clases
        INPUT_SIZE = 224

    elif args.database == 'imagenet_ECCV':

        if not USE_COLOR:
            raise "Imagenet does not handle training with gray images"

        _IMAGENET_PCA = {
            'eigval':
            torch.Tensor([0.2175, 0.0188, 0.0045]),
            'eigvec':
            torch.Tensor([
                [-0.5675, 0.7192, 0.4009],
                [-0.5808, -0.0045, -0.8140],
                [-0.5836, -0.6948, 0.4203],
            ])
        }

        if args.gcn:
            normalize = GCN()
        else:
            normalize = transforms.Normalize(
                mean=[x for x in [0.485, 0.456, 0.406]],
                std=[x for x in [0.229, 0.224, 0.225]])

        transform_train = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.RandomCrop(224),
            transforms.ColorJitter(brightness=0.4,
                                   contrast=0.4,
                                   saturation=0.4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),
            normalize
        ])
        transform_test = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.CenterCrop((224, 224)),
            transforms.ToTensor(), normalize
        ])

        train_dataset = datasets.ImageFolder(root=args.traindir,
                                             transform=transform_train)
        test_dataset = datasets.ImageFolder(root=args.valdir,
                                            transform=transform_test)
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   shuffle=True,
                                                   batch_size=args.batch_size,
                                                   **kwargs)

        val_loader = torch.utils.data.DataLoader(test_dataset,
                                                 shuffle=True,
                                                 batch_size=args.batch_size,
                                                 **kwargs)
        NUM_CLASS = 1000
        INPUT_SIZE = 224

    else:
        raise "Unknown database"

    if not USE_COLOR and not args.model == "dawn":
        raise "Only DAWN support training with gray images"

    ##################################
    # Create model
    ##################################
    # TODO: This condition only work if we have only two image size
    big_input = INPUT_SIZE != 32
    # Only for scattering transform (as coefficient are precomptuted)
    scattering = None
    if args.model == 'densenet':
        no_init_conv = args.no_init_conv
        if (INPUT_SIZE > 128):
            # For these input size, init conv are necessary
            no_init_conv = False
        model = dn.DenseNet3(args.layers,
                             NUM_CLASS,
                             args.growth,
                             reduction=args.reduce,
                             bottleneck=args.bottleneck,
                             dropRate=args.droprate,
                             init_conv=not no_init_conv)
    elif args.model == 'vgg':
        model = vgg.VGG(NUM_CLASS, big_input=big_input)
    elif args.model == 'wcnn':
        model = wcnn.WCNN(NUM_CLASS,
                          big_input=big_input,
                          wavelet=args.wavelet,
                          levels=args.levels)
    elif args.model == 'dawn':
        # Our model
        model = dawn.DAWN(NUM_CLASS,
                          big_input=big_input,
                          first_conv=args.first_conv,
                          number_levels=args.levels,
                          kernel_size=args.kernel_size,
                          no_bootleneck=args.no_bootleneck,
                          classifier=args.classifier,
                          share_weights=args.share_weights,
                          simple_lifting=args.simple_lifting,
                          COLOR=USE_COLOR,
                          regu_details=args.regu_details,
                          regu_approx=args.regu_approx,
                          haar_wavelet=args.haar_wavelet)
    elif args.model == 'scatter':
        from kymatio import Scattering2D
        from models.scatter.Scatter_WRN import Scattering2dCNN, ScatResNet

        if (INPUT_SIZE == 224):
            # KTH
            scattering = Scattering2D(J=args.scat,
                                      shape=(args.N, args.N),
                                      max_order=args.mode)
            scattering = scattering.cuda()
            model = ScatResNet(args.scat, INPUT_SIZE, NUM_CLASS,
                               args.classifier, args.mode)
        else:
            # Precomputation
            scattering = Scattering2D(J=args.scat,
                                      shape=(args.N, args.N),
                                      max_order=args.mode)
            scattering = scattering.cuda()
            model = Scattering2dCNN(args.classifier,
                                    J=args.scat,
                                    N=args.N,
                                    num_classes=NUM_CLASS,
                                    blocks=args.blocks,
                                    mode=args.mode,
                                    use_avg_pool=args.use_avg_pool)
    elif args.model == 'resnet':
        if big_input:
            import torchvision
            model = torchvision.models.resnet18(pretrained=args.pretrained)
            model.fc = nn.Linear(512, NUM_CLASS)
        else:
            if args.use_normal:
                model = resnet.ResNetCIFARNormal(
                    [args.size_normal, args.size_normal, args.size_normal],
                    num_classes=NUM_CLASS)
            else:
                model = resnet.ResNetCIFAR([2, 2, 2, 2],
                                           num_classes=NUM_CLASS,
                                           levels=args.levels)
    else:
        raise "Unknown model"

    # get the number of model parameters
    print("Number of model parameters            : {:,}".format(
        sum([p.data.nelement() for p in model.parameters()])))
    print("Number of *trainable* model parameters: {:,}".format(
        sum(p.numel() for p in model.parameters() if p.requires_grad)))

    # for training on multiple GPUs.
    # Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use
    if args.multigpu:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model = model.cuda()

    # Print network summary
    if args.summary:
        # For display like Keras
        from torchsummary import summary
        summary(model, input_size=(3, INPUT_SIZE, INPUT_SIZE))

    # CSV files statistics
    csv_logger = CSVStats()

    if args.monkey:
        # This is a special condition
        # Only to test and visualize the multi-resolution
        # output
        f = open('./data/baboon.png', 'rb')
        from PIL import Image
        img = Image.open(f)
        img.convert('RGB')
        img.show()

        # Do the transformation
        to_pil = transforms.ToPILImage()
        if USE_COLOR:
            unormalize = UnNormalize(
                mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
                std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
        else:
            unormalize = UnNormalize(mean=[x / 255.0 for x in [125.3]],
                                     std=[x / 255.0 for x in [63.0]])
        tensor_trans = transform_test(img)
        img_trans = to_pil(torch.clamp(unormalize(tensor_trans), 0.0, 1.0))
        img_trans.show()
        img_trans.save("trans.png")

        # Make pytorch with the batch size
        tensor_trans = tensor_trans[None, :, :, :]

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))

            if args.monkey:
                dump_image(tensor_trans, args.start_epoch, model, unormalize)
                raise "stop here!"
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                nesterov=True,
                                weight_decay=args.weight_decay)

    if args.model == 'dawn':
        if args.warmup:
            model.initialization = True
            train_init(train_loader, model, criterion, optimizer)
            model.initialization = False  # Switch

    for epoch in range(args.start_epoch, args.epochs):
        t0 = time.time()
        adjust_learning_rate(optimizer, epoch, args.drop)
        # TODO: Clean this code
        if args.model == 'dawn':
            args_model = 'dawn'

        #model, optimizer = amp.initialize(model, optimizer)
        # train for one epoch
        prec1_train, prec5_train, loss_train = train(train_loader,
                                                     model,
                                                     criterion,
                                                     optimizer,
                                                     epoch,
                                                     args.model == args_model,
                                                     scattering=scattering)

        # Optimize cache on cuda
        # This is quite important to avoid memory overflow
        # when training big models. The performance impact
        # seems minimal
        torch.cuda.empty_cache()

        # evaluate on validation set
        prec1_val, prec5_val, loss_val = validate(val_loader,
                                                  model,
                                                  criterion,
                                                  epoch,
                                                  args.model == args_model,
                                                  scattering=scattering)

        if args.monkey:
            # In this case, we will output the Monkey image
            dump_image(tensor_trans, epoch, model, unormalize)

        # Print some statistics inside CSV
        csv_logger.add(prec1_train, prec1_val, prec5_train, prec5_val,
                       loss_train, loss_val)

        # remember best prec@1 and save checkpoint
        is_best = prec1_val > best_prec1
        best_prec1 = max(prec1_val, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            }, is_best)
        csv_logger.write()

        # Final print
        print(
            ' * Train[{:.3f} %, {:.3f} %, {:.3f} loss] Val [{:.3f} %, {:.3f}%, {:.3f} loss] Best: {:.3f} %'
            .format(prec1_train, prec5_train, loss_train, prec1_val, prec5_val,
                    loss_val, best_prec1))
        print('Time for', epoch, "/", args.epochs, time.time() - t0)

    print('Best accuracy: ', best_prec1)
Exemple #9
0
def main():
    # set up data transforms and device
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.Resize(256),
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'val':
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    }
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    image_datasets = {
        phase: SampleDataset(root=os.path.join(data_dir, phase),
                             transform=data_transforms[phase])
        for phase in PHASES
    }
    dataloaders = {
        phase: torch.utils.data.DataLoader(dataset=image_datasets[phase],
                                           batch_size=BATCH_SIZE,
                                           shuffle=SHUFFLE,
                                           num_workers=NUM_WORKERS)
        for phase in PHASES
    }

    if not os.path.exists(model_path):
        os.mkdir(model_path)

    # set up model
    model, params_to_optimize, _ = initialize_model(model_name=MODEL,
                                                    num_classes=NUM_CLASSES,
                                                    feature_extract=EXTRACTOR,
                                                    use_pretrained=PRETRAINED)
    model = model.to(device)

    criterion = nn.CrossEntropyLoss(
    )  # classification task, cross entropy loss function
    optimizer = optim.SGD(
        params_to_optimize,
        lr=LEARNING_RATE,
        momentum=MOMENTUM,
        weight_decay=WEIGHT_DECAY)  # stochastic gradient descent optimizer
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                           step_size=STEP_SIZE,
                                           gamma=GAMMA)
    print('model training starts...')
    trained_model, _ = train_model(device,
                                   model,
                                   dataloaders,
                                   criterion,
                                   optimizer,
                                   exp_lr_scheduler,
                                   num_epochs=NUM_EPOCHS)
    torch.save(trained_model.state_dict(),
               os.path.join(model_path, model_name))
    print('model training completes...')
import random
import numpy as np
from PIL import Image, ImageOps
import torchvision.transforms as transforms
from torchvision.transforms import ToPILImage
from torchvision.transforms.functional import rotate
from scipy.ndimage import zoom
from augmentation import *

to_img = ToPILImage()

last_preprocess = transforms.Compose(
    [transforms.RandomGrayscale(),
     transforms.ToTensor()])

vflip = transforms.Compose([transforms.RandomVerticalFlip(p=1.0)])

hflip = transforms.Compose([transforms.RandomHorizontalFlip(p=1.0)])

colorjitter = transforms.Compose([transforms.ColorJitter()])


class ImageDataset(dt.Dataset):
    def __init__(self, data_path, mask_path, augment=True):

        self.files = os.listdir(data_path)
        self.files.sort()
        self.mask_files = os.listdir(mask_path)
        self.mask_files.sort()
        self.data_path = data_path
        self.mask_path = mask_path
def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
                                world_size=args.world_size, rank=args.rank)
    # create model
    if 'efficientnet' in args.arch:  # NEW
        if args.pretrained:
            model = EfficientNet.from_pretrained(args.arch, advprop=args.advprop)
            print("=> using pre-trained model '{}'".format(args.arch))
        else:
            print("=> creating model '{}'".format(args.arch))
            model = EfficientNet.from_name(args.arch)

    else:
        if args.pretrained:
            print("=> using pre-trained model '{}'".format(args.arch))
            if args.arch.find('alexnet') != -1:
                model = models.__dict__[args.arch](pretrained=True)
            elif args.arch.find('inception_v3') != -1:
                model = models.inception_v3(pretrained=True)
            elif args.arch.find('densenet121') != -1:
                model = models.densenet121(pretrained=True)
            elif args.arch.find('resnet') != -1:  # ResNet
                model = models.__dict__[args.arch](pretrained=True)
            else:
                print('### please check the args.arch for load model in training###')
                exit(-1)
        else:
            print("=> creating model '{}'".format(args.arch))
            model = models.__dict__[args.arch]()

    if args.fine_tuning:
        print("=> transfer-learning mode + fine-tuning (train only the last FC layer)")
        # Freeze Previous Layers(now we are using them as features extractor)
        #jiangjiewei
        # for param in model.parameters():
        #    param.requires_grad = False

        # Fine Tuning the last Layer For the new task
        # juge network: alexnet, inception_v3, densennet, resnet50
        if args.arch.find('alexnet') != -1:
            num_ftrs = model.classifier[6].in_features
            model.classifier[6] = nn.Linear(num_ftrs, 3)
        elif args.arch.find('inception_v3') != -1:
            num_ftrs = model.fc.in_features
            num_auxftrs = model.AuxLogits.fc.in_features
            model.fc = nn.Linear(num_ftrs, 3)
            model.AuxLogits.fc =nn.Linear(num_auxftrs,3)
            model.aux_logits = False
        elif args.arch.find('densenet121') != -1:
            num_ftrs = model.classifier.in_features
            model.classifier = nn.Linear(num_ftrs, 3)
        elif args.arch.find('resnet') != -1: # ResNet
            num_ftrs = model.fc.in_features
            model.fc = nn.Linear(num_ftrs, 3)
        else:
            print("###Error: Fine-tuning is not supported on this architecture.###")
            exit(-1)

        print(model)
    else:
        parameters = model.parameters()
    # name, parma_1 = model.classifier[6].parameters()

    # for name, param in model.named_parameters():
    #     if param.requires_grad:
    #         print(name)

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if args.arch.startswith('alexnet-1') or args.arch.startswith('vgg-1'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    # criterion = nn.CrossEntropyLoss().cuda(args.gpu)
    #jiangjiewei add weight for crossentropyloss
    criterion = nn.CrossEntropyLoss(weight=torch.Tensor([1.5, 1.0,3.0])).cuda(args.gpu)
    # use_cuda = True
    # device = torch.device("cuda" if use_cuda else "cpu")
    # class_weights = torch.FloatTensor([1.0, 0.2, 1.0]).cuda()
    # criterion = nn.CrossEntropyLoss(weight=class_weights).to(device)


    if args.arch.find('alexnet') != -1:
        fine_tune_parameters =model.classifier[6].parameters()
    elif args.arch.find('inception_v3') != -1:
        fine_tune_parameters = model.module.fc.parameters()
    elif args.arch.find('densenet121') != -1:
        fine_tune_parameters = model.module.classifier.parameters()
    elif args.arch.find('resnet') != -1:  # ResNet
        fine_tune_parameters = model.module.fc.parameters()
    else:
        print('### please check the ignored params ###')
        exit(-1)

    ignored_params = list(map(id, fine_tune_parameters))

    if args.arch.find('alexnet') != -1:
        base_params = filter(lambda p: id(p) not in ignored_params,
                             model.parameters())
    else:
        base_params = filter(lambda p: id(p) not in ignored_params,
                             model.module.parameters())

    optimizer = torch.optim.SGD([{'params': base_params},  #model.parameters()
                                {'params': fine_tune_parameters, 'lr': 10*args.lr}],
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if args.gpu is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpu)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True


    # Data loading code
    traindir = os.path.join(args.data, 'train1')
    valdir = os.path.join(args.data, 'val1')
    if args.advprop:
        normalize = transforms.Lambda(lambda img: img * 2.0 - 1.0)
    else:
        normalize = transforms.Normalize(mean=[0.5765036, 0.34929818, 0.2401832],
                                        std=[0.2179051, 0.19200659, 0.17808074])


    if 'efficientnet' in args.arch:
        image_size = EfficientNet.get_image_size(args.arch)
    else:
        image_size = args.image_size

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            # transforms.Resize((256, 256), interpolation=PIL.Image.BICUBIC),
            # transforms.Resize((224, 224)),
            transforms.Resize((args.image_size, args.image_size), interpolation=PIL.Image.BICUBIC),
            # transforms.RandomResizedCrop((image_size, image_size) ),  #RandomRotation scale=(0.9, 1.0)
            transforms.RandomRotation(90),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))
    print ('classes:', train_dataset.classes)
    # Get number of labels
    labels_length = len(train_dataset.classes)


    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True, sampler=train_sampler)

    val_transforms = transforms.Compose([
        transforms.Resize((args.image_size, args.image_size), interpolation=PIL.Image.BICUBIC),
        # transforms.CenterCrop((image_size,image_size)),
        transforms.ToTensor(),
        normalize,
    ])
    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, val_transforms),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    if args.evaluate:
        res = validate(val_loader, model, criterion, args)
        with open('res.txt', 'w') as f:
            print(res, file=f)
        return

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if args.arch.find('alexnet') != -1:
            pre_name = './alexnet'
        elif args.arch.find('inception_v3') != -1:
            pre_name = './inception_v3'
        elif args.arch.find('densenet121') != -1:
            pre_name = './densenet121'
        elif args.arch.find('resnet50') != -1:
            pre_name = './resnet50'
        else:
            print('### please check the args.arch for pre_name###')
            exit(-1)

        if not args.multiprocessing_distributed or (args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer' : optimizer.state_dict(),
            }, is_best,pre_name)

    # PATH = pre_name + '_fundus_net.pth'
    # torch.save(model.state_dict(), PATH)
    print('Finished Training')
def get_dataloaders(train_batchsize, val_batchsize):
    kwargs = {'num_workers': 20, 'pin_memory': True}
    input_size = INFO['model-info']['input-size']
    base = '{}/{}'.format(os.environ['datadir-base'], INFO['dataset'])
    normalize = T.Normalize(mean=INFO['dataset-info']['normalization']['mean'],
                            std=INFO['dataset-info']['normalization']['std'])
    transform = {
        'train':
        T.Compose([
            T.Resize(tuple([int(x * (4 / 3)) for x in input_size])),  # 放大
            T.RandomResizedCrop(input_size),  # 随机裁剪后resize
            T.RandomHorizontalFlip(0.5),  # 随机水平翻转
            T.RandomVerticalFlip(0.5),  # 随机垂直翻转
            T.RandomApply([T.RandomRotation(90)], 0.5),  # 随机旋转90/270度
            T.RandomApply([T.RandomRotation(180)], 0.25),  # 随机旋转180度
            T.RandomApply(
                [T.ColorJitter(brightness=np.random.random() / 5 + 0.9)],
                0.5),  #随机调整图像亮度
            T.RandomApply(
                [T.ColorJitter(contrast=np.random.random() / 5 + 0.9)],
                0.5),  # 随机调整图像对比度
            T.RandomApply(
                [T.ColorJitter(saturation=np.random.random() / 5 + 0.9)],
                0.5),  # 随机调整图像饱和度
            T.ToTensor(),
            normalize
        ]),
        'val':
        T.Compose([
            T.Resize(input_size),  # 放大
            # T.CenterCrop(input_size),
            T.ToTensor(),
            normalize
        ])
    }
    train_dset = dset.ImageFolder('{}/{}'.format(base, 'Train'),
                                  transform=transform['train'])
    train4val_dset = dset.ImageFolder('{}/{}'.format(base, 'Train'),
                                      transform=transform['val'])
    val_dset = dset.ImageFolder('{}/{}'.format(base, 'Val'),
                                transform=transform['val'])

    labels = torch.from_numpy(np.array(train_dset.imgs)[:, 1].astype(int))
    num_of_images_by_class = torch.zeros(len(train_dset.classes))
    for i in range(len(train_dset.classes)):
        num_of_images_by_class[i] = torch.where(
            labels == i, torch.ones_like(labels),
            torch.zeros_like(labels)).sum().item()

    mapping = {}
    for c in train_dset.classes:
        if c in val_dset.classes:
            mapping[train_dset.class_to_idx[c]] = val_dset.class_to_idx[c]
        else:
            mapping[
                train_dset.class_to_idx[c]] = val_dset.class_to_idx['UNKNOWN']
    mapping[-1] = val_dset.class_to_idx['UNKNOWN']

    train_len = train_dset.__len__()
    val_len = val_dset.__len__()

    train_loader = DataLoader(train_dset,
                              batch_size=train_batchsize,
                              sampler=sampler.RandomSampler(range(train_len)),
                              **kwargs)
    train4val_loader = DataLoader(train4val_dset,
                                  batch_size=val_batchsize,
                                  sampler=sampler.SequentialSampler(
                                      range(train_len)),
                                  **kwargs)
    val_loader = DataLoader(val_dset,
                            batch_size=val_batchsize,
                            sampler=sampler.SequentialSampler(range(val_len)),
                            **kwargs)

    imgs = np.array(val_dset.imgs)

    return train_loader, train4val_loader, val_loader, num_of_images_by_class, mapping, imgs
Exemple #13
0
    def __init__(self, root, ann_file, is_train=True):

        # load annotations
        print('Loading annotations from: ' + os.path.basename(ann_file))
        with open(ann_file) as data_file:
            ann_data = json.load(data_file)

        # set up the filenames and annotations
        self.imgs = [aa['file_name'] for aa in ann_data['images']]
        self.ids = [aa['id'] for aa in ann_data['images']]

        # if we dont have class labels set them to '0'
        if 'annotations' in ann_data.keys():
            self.classes = [
                aa['category_id'] for aa in ann_data['annotations']
            ]
        else:
            self.classes = [0] * len(self.imgs)

        # load taxonomy
        self.tax_levels = [
            'id', 'genus', 'family', 'order', 'class', 'phylum', 'kingdom'
        ]
        #8142, 4412,    1120,     273,     57,      25,       6
        self.taxonomy, self.classes_taxonomic = load_taxonomy(
            ann_data, self.tax_levels, self.classes)

        # print out some stats
        print('\t' + str(len(self.imgs)) + ' images')
        print('\t' + str(len(set(self.classes))) + ' classes')

        self.root = root
        self.is_train = is_train
        self.loader = default_loader

        # # augmentation params
        self.im_size = [256, 256]  # can change this to train on higher res
        self.mu_data = [0.485, 0.456, 0.406]
        self.std_data = [0.229, 0.224, 0.225]
        self.brightness = 0.4
        self.contrast = 0.4
        self.saturation = 0.4
        self.hue = 0.25

        # augmentations
        self.center_crop = transforms.CenterCrop(
            (self.im_size[0], self.im_size[1]))
        self.scale_aug = transforms.RandomResizedCrop(size=self.im_size[0])
        self.flip_aug = transforms.RandomHorizontalFlip()
        self.color_aug = transforms.ColorJitter(self.brightness, self.contrast,
                                                self.saturation, self.hue)
        self.tensor_aug = transforms.ToTensor()
        self.norm_aug = transforms.Normalize(mean=self.mu_data,
                                             std=self.std_data)

        self.train_compose = transforms.Compose([
            transforms.Lambda(lambda img: RandomErase(img)),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.RandomVerticalFlip(p=0.5),
            transforms.Lambda(lambda img: crops_and_random(img)),
            #transforms.Resize((512,512),interpolation=2),
            #transforms.Lambda(lambda img: four_and_random(img)),
            transforms.Lambda(lambda crops: torch.stack([
                transforms.Compose([
                    transforms.ToTensor(),
                    transforms.Normalize(
                        mean=[n / 255. for n in [129.3, 124.1, 112.4]],
                        std=[n / 255. for n in [68.2, 65.4, 70.4]])
                ])(crop) for crop in crops
            ]))
        ])

        self.valid_compose = transforms.Compose([
            transforms.Lambda(lambda img: val_crops(img)),
            transforms.Lambda(lambda crops: torch.stack([
                transforms.Compose([
                    transforms.ToTensor(),
                    transforms.Normalize(
                        mean=[n / 255. for n in [129.3, 124.1, 112.4]],
                        std=[n / 255. for n in [68.2, 65.4, 70.4]])
                ])(crop) for crop in crops
            ]))
        ])
Exemple #14
0
def main():
    #os.environ['CUDA_VISIBLE_DEVICES'] = '1'
    device = torch.device('cuda')
    doc = sys.argv[1]
    x_train, y_train, x_val, y_val = load_data(doc)

    train_dataset = ImageDataset(
        x_train,
        y_train,
        transforms.Compose([
            #data augmentation
            transforms.ToPILImage(),
            transforms.RandomAffine(degrees=87,
                                    translate=(0.2, 0.2),
                                    scale=(0.87, 1.3),
                                    shear=0.2),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.RandomVerticalFlip(p=0.5),
            transforms.ToTensor(),
        ]))

    val_dataset = ImageDataset(
        x_val,
        y_val,
        transforms.Compose([
            #data augmentation
            transforms.ToTensor(),
        ]))

    x_val_tensor = val_dataset[0][0]
    y_val_tensor = val_dataset[0][1].reshape(1)

    for i in range(1, len(val_dataset)):
        x_val_tensor = torch.cat((x_val_tensor, val_dataset[i][0]), 0)
        y_val_tensor = torch.cat((y_val_tensor, val_dataset[i][1].reshape(1)),
                                 0)

    x_val_cuda = x_val_tensor.view((2000, 1, 48, 48)).float().to(device)
    y_val_cuda = y_val_tensor
    #print(x_val_tensor)
    #print(x_val_tensor.size())
    #print(y_val_tensor.size())

    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=128,
                              shuffle=True)
    print(len(train_dataset))

    model = Model()
    model.to(device)
    optimizer = Adam(model.parameters(), lr=0.001)
    loss_fn = nn.CrossEntropyLoss()

    min_val = 0.0

    for epoch in range(400):
        train_loss = []
        train_acc = []
        model.train()

        for _, (img, target) in enumerate(train_loader):

            img_cuda = img.float().to(device)
            target_cuda = target.to(device)

            optimizer.zero_grad()
            output = model(img_cuda)

            #regularization
            l2_reg = torch.tensor(0.).to(device)
            for param in model.parameters():
                l2_reg += torch.norm(param)**2

            loss = loss_fn(output, target_cuda)
            loss.backward()
            optimizer.step()

            predict = torch.max(output, 1)[1]
            acc = np.mean((target_cuda == predict).cpu().numpy())

            train_acc.append(acc)
            train_loss.append(loss.item())

        model.eval()
        label = np.array([0])
        for i in range(4):
            j = (i + 1) * 500
            val_output = model(x_val_cuda[i * 500:j])
            pred_y = torch.max(val_output, 1)[1].data.cpu().numpy().squeeze()
            label = np.concatenate((label, pred_y), axis=0)
        label = np.delete(label, 0)
        acc_val = np.mean((np.array(y_val_cuda) == label))

        print("Epoch: {}, Loss: {:.4f}, Acc: {:.4f}, Validation : {}".format(
            epoch + 1, np.mean(train_loss), np.mean(train_acc), acc_val))

        if epoch > 100 and acc_val > min_val:
            min_val = acc_val
            torch.save(model.state_dict(), 'best5.pkl')
            f = open('record5.txt', 'a')
            f.write("Epoch: {}, Validation : {}\n".format(epoch + 1, min_val))
    def __init__(self,
                 root,
                 init_size=4,
                 growt_number=8,
                 images_multiplier=0,
                 upright=False,
                 do_rgb=False,
                 preload=True):
        self.extensions = torchvision.datasets.folder.IMG_EXTENSIONS

        classes, class_to_idx = torchvision.datasets.folder.find_classes(root)
        samples = torchvision.datasets.folder.make_dataset(
            root, class_to_idx, self.extensions)
        if len(samples) == 0:
            raise (RuntimeError("Found 0 files in subfolders of: " + root +
                                "\n"
                                "Supported extensions are: " +
                                ",".join(self.extensions)))
        self.name = root
        self.root = root
        self.loader = torchvision.datasets.folder.default_loader

        self.classes = classes
        self.class_to_idx = class_to_idx
        self.samples = samples

        self.transforms = []
        self.sizes = []
        self.current_size = 0

        size = init_size

        for i in range(growt_number + 1):
            temp_transforms = []
            temp_transforms.append(
                transforms.ColorJitter(
                    brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1
                ) if do_rgb else transforms.Grayscale())
            temp_transforms.append(
                transforms.Resize(size) if upright else transforms.
                Resize(int(size * 1.5)))
            if upright:
                temp_transforms.append(transforms.CenterCrop(size))
            else:
                temp_transforms.append(transforms.RandomCrop(size))
                temp_transforms.append(transforms.RandomHorizontalFlip())
                temp_transforms.append(transforms.RandomVerticalFlip())
            temp_transforms.append(transforms.ToTensor())
            temp_transforms.append(RandomNoise(0.5))
            temp_transforms.append(
                transforms.Normalize((0.5, 0.5, 0.5), (1.0, 1.0, 1.0)))
            self.transforms.append(transforms.Compose(temp_transforms))
            self.sizes.append(size)
            size = size * 2

        self.transform = self.transforms[self.current_size]

        self.images = {}

        self.preloaded = False
        if preload:
            self.preloaded = True
            for s, (path, target) in enumerate(samples):
                img = self.loader(path)
                self.images[path] = img

        self.images_multiplier = images_multiplier
        self.images_idx = [
            i for j in range(self.images_multiplier)
            for i in range(len(self.samples))
        ]
Exemple #16
0
    def __init__(self, args, dataset_dir, pairs_file = "train_pairs.csv", datamode = "train", image_height = 128, image_width = 128, n_classes = 20, data_augument = False, onehot = False, debug = False ):
        super(ZalandoDataset, self).__init__()
        self.args = args
        self.dataset_dir = dataset_dir
        self.pairs_file = pairs_file
        self.datamode = datamode
        self.data_augument= data_augument
        self.onehot = onehot
        self.image_height = image_height
        self.image_width = image_width
        self.n_classes = n_classes
        self.debug = debug
        self.df_pairs = pd.read_csv( os.path.join(dataset_dir, pairs_file) )

        # transform
        if( data_augument ):
            self.transform = transforms.Compose(
                [
                    transforms.Resize( (args.image_height, args.image_width), interpolation=Image.LANCZOS ),
                    transforms.RandomHorizontalFlip(),
                    transforms.RandomVerticalFlip(),
                    transforms.RandomAffine( degrees = (-10,10),  translate=(0.25,0.25), scale = (0.80,1.25), resample=Image.BICUBIC ),
                    transforms.CenterCrop( size = (args.image_height, args.image_width) ),
                    TPSTransform( tps_points_per_dim = 3 ),
                    transforms.ToTensor(),
                    transforms.Normalize( [0.5,0.5,0.5], [0.5,0.5,0.5] ),
                    RandomErasing( probability = 0.5, sl = 0.02, sh = 0.2, r1 = 0.3, mean=[0.5, 0.5, 0.5] ),
                ]
            )

            self.transform_mask = transforms.Compose(
                [
                    transforms.Resize( (args.image_height, args.image_width), interpolation=Image.NEAREST ),
                    transforms.RandomHorizontalFlip(),
                    transforms.RandomVerticalFlip(),
                    transforms.RandomAffine( degrees = (-10,10),  translate=(0.25,0.25), scale = (0.80,1.25), resample=Image.NEAREST ),
                    transforms.CenterCrop( size = (args.image_height, args.image_width) ),
                    TPSTransform( tps_points_per_dim = 3 ),
                    transforms.ToTensor(),
                    transforms.Normalize( [0.5], [0.5] ),
                    RandomErasing( probability = 0.5, sl = 0.02, sh = 0.2, r1 = 0.3, mean=[0.5] ),
                ]
            )

            self.transform_mask_rgb = transforms.Compose(
                [
                    transforms.Resize( (args.image_height, args.image_width), interpolation=Image.NEAREST ),
                    transforms.RandomHorizontalFlip(),
                    transforms.RandomVerticalFlip(),
                    transforms.RandomAffine( degrees = (-10,10),  translate=(0.25,0.25), scale = (0.80,1.25), resample=Image.NEAREST ),
                    transforms.CenterCrop( size = (args.image_height, args.image_width) ),
                    TPSTransform( tps_points_per_dim = 3 ),
                    transforms.ToTensor(),
                    transforms.Normalize( [0.5,0.5,0.5], [0.5,0.5,0.5] ),
                    RandomErasing( probability = 0.5, sl = 0.02, sh = 0.2, r1 = 0.3, mean=[0.5, 0.5, 0.5] ),
                ]
            )

            self.transform_mask_woToTensor = transforms.Compose(
                [
                    transforms.Resize( (args.image_height, args.image_width), interpolation=Image.NEAREST ),
                    transforms.RandomHorizontalFlip(),
                    transforms.RandomVerticalFlip(),
                    transforms.RandomAffine( degrees = (-10,10),  translate=(0.25,0.25), scale = (0.80,1.25), resample=Image.NEAREST ),
                    transforms.CenterCrop( size = (args.image_height, args.image_width) ),
                    TPSTransform( tps_points_per_dim = 3 ),
                    RandomErasing( probability = 0.5, sl = 0.02, sh = 0.2, r1 = 0.3, mean=[0.5, 0.5, 0.5] ),
                ]
            )
        else:
            self.transform = transforms.Compose(
                [
                    transforms.Resize( (args.image_height, args.image_width), interpolation=Image.LANCZOS ),
                    transforms.CenterCrop( size = (args.image_height, args.image_width) ),
                    transforms.ToTensor(),
                    transforms.Normalize( [0.5,0.5,0.5], [0.5,0.5,0.5] ),
                ]
            )
            self.transform_mask = transforms.Compose(
                [
                    transforms.Resize( (args.image_height, args.image_width), interpolation=Image.NEAREST ),
                    transforms.CenterCrop( size = (args.image_height, args.image_width) ),
                    transforms.ToTensor(),
                    transforms.Normalize( [0.5], [0.5] ),
                ]
            )
            self.transform_mask_rgb = transforms.Compose(
                [
                    transforms.Resize( (args.image_height, args.image_width), interpolation=Image.NEAREST ),
                    transforms.CenterCrop( size = (args.image_height, args.image_width) ),
                    transforms.ToTensor(),
                    transforms.Normalize( [0.5,0.5,0.5], [0.5,0.5,0.5] ),
                ]
            )
            self.transform_mask_woToTensor = transforms.Compose(
                [
                    transforms.Resize( (args.image_height, args.image_width), interpolation=Image.NEAREST ),
                    transforms.CenterCrop( size = (args.image_height, args.image_width) ),
                ]
            )

        if( self.debug ):
            print( self.df_pairs.head() )

        return
    def transform(self, img, seg):
        # Function for data augmentation
        # 1) Affine Augmentations: Rotation (-15 to +15 degrees), Scaling, Flipping.
        # 2) Elastic deformations
        # 3) Intensity augmentations

        ia.seed(int(time.time()))  # Seed for random augmentations

        # Needed for iaa
        img = (img * 255).astype('uint8')
        seg = (seg).astype('uint8')

        if self.aug:  # Augmentation only performed on train set
            img = np.expand_dims(img, axis=0)
            segmap = SegmentationMapsOnImage(seg, shape=img.shape[1:])  # Create segmentation map

            seq_all = iaa.Sequential([
                iaa.Fliplr(0.5),  # Horizontal flips
                iaa.Affine(
                    scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
                    translate_percent={"x": (0, 0), "y": (0, 0)},
                    rotate=(-10, 10),
                    shear=(0, 0)),  # Scaling, rotating
                iaa.ElasticTransformation(alpha=(0.0, 100.0), sigma=10.0)  # Elastic
            ], random_order=True)

            seq_img = iaa.Sequential([
                iaa.blur.AverageBlur(k=(0, 4)),  # Gausian blur
                iaa.LinearContrast((0.8, 1.2)),  # Contrast
                iaa.Multiply((0.8, 1.2), per_channel=1),  # Intensity
            ], random_order=True)

            img, seg = seq_all(images=img, segmentation_maps=segmap)  # Rest of augmentations

            mask = np.zeros(img.shape) # Create mask
            mask[img > 0] = 1

            img = seq_img(images=img)  # Intensity and contrast only on input image

            img = np.squeeze(img, axis=0)
            mask = np.squeeze(mask,axis=0)

            # Get segmentation map
            seg = seg.draw(size=img.shape)[0]
            seg = seg[:, :, 0]
            seg[seg > 0] = 1
        else:
            mask = np.zeros(img.shape)
            mask[img > 0] = 1

        # To PIL for Flip and ToTensor
        img_PIL = Image.fromarray(img)
        seg_PIL = Image.fromarray(seg * 255)
        mask_PIL = Image.fromarray(mask)

        flip_tensor_trans = transforms.Compose([
            transforms.RandomVerticalFlip(p=1),  # Flipped due to camcan
            transforms.ToTensor()
        ])

        return flip_tensor_trans(img_PIL), flip_tensor_trans(seg_PIL), flip_tensor_trans(mask_PIL)
Exemple #18
0
def main(config, checkpoint_dir=None):
    args = parse_args('train')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    torch.manual_seed(10)

    if tune_hyperparam:
        batch_size = config['batch_size']
        lr = config['lr']
    else:
        batch_size = args.batch_size
        lr = args.lr

    gamma = config['gamma']

    best_f1 = 0

    transform_train = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomRotation(30),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomVerticalFlip(p=0.5),
        transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
        GaussianBlur(),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        RandomErasing()
    ])

    transform_val = transforms.Compose([
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    train_set = DataLoader(args.train_list, transform=transform_train)

    if args.resample:
        train_loader = torch.utils.data.DataLoader(
            train_set,
            batch_size=batch_size,
            sampler=ImbalancedDatasetSampler(
                train_set,
                num_samples=len(train_set),
                callback_get_label=train_set.data),
            num_workers=args.num_workers)
    else:
        train_loader = torch.utils.data.DataLoader(
            train_set,
            batch_size=batch_size,
            shuffle=True,
            num_workers=args.num_workers)

    val_set = DataLoader(args.val_list, transform=transform_val)
    val_loader = torch.utils.data.DataLoader(val_set,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.num_workers)

    # Load model
    print("==> Creating model")
    num_classes = args.num_classes
    model = create_model(num_classes, args).to(device)

    # choose loss function
    if args.weighted_loss:
        targets = [i['target'] for i in train_set.data]
        weights = compute_class_weight('balanced',
                                       classes=np.unique(targets),
                                       y=np.array(targets))
        criterion = select_loss_func(choice=args.loss,
                                     weights=torch.tensor(weights,
                                                          dtype=torch.float),
                                     gamma=gamma)
    else:
        criterion = select_loss_func(choice=args.loss, gamma=gamma)

    # choose optimizer
    print('==> {} optimizer'.format(args.optimizer))
    if args.optimizer == 'SAM':
        base_optimizer = torch.optim.SGD
        optimizer = SAM(model.parameters(),
                        base_optimizer,
                        lr=lr,
                        momentum=0.9)
    elif args.optimizer == 'ADAM':
        optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    else:
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr,
                                    momentum=0.9,
                                    weight_decay=1e-3,
                                    nesterov=True)

    # set up logger
    writer = SummaryWriter(log_dir=args.log_dir)

    start_epoch = args.start_epoch
    if args.dataset == 'renal':
        df = pd.DataFrame(columns=[
            'model', 'lr', 'epoch_num', 'train_loss', 'val_loss', 'train_acc',
            'val_acc', 'normal', 'obsolescent', 'solidified', 'disappearing',
            'non_glom', 'f1'
        ])

    elif args.dataset == 'ham':
        df = pd.DataFrame(columns=[
            'model', 'lr', 'epoch_num', 'train_loss', 'val_loss', 'train_acc',
            'val_acc', 'MEL', 'NV', 'BCC', 'AKIEC', 'BKL', 'DF', 'VASC', 'f1'
        ])
    else:
        raise ValueError('no such dataset exists!')

    # start training
    for epoch in range(start_epoch, args.epochs):
        epoch += 1

        if args.optimizer != 'ADAM':
            cur_lr = adjust_learning_rate(lr, optimizer, epoch)
        else:
            cur_lr = lr

        print('\nEpoch: [%d | %d] LR: %f' % (epoch, args.epochs, cur_lr))

        train_loss, train_acc, train_f1, train_f1s = train(
            train_loader, model, optimizer, criterion, device, args)
        val_loss, val_acc, val_f1, val_f1s = validate(val_loader, model,
                                                      criterion, epoch, device,
                                                      args)

        if tune_hyperparam:
            tune.report(loss=val_loss, accuracy=val_f1)

        writer.add_scalars("loss/", {
            'train': train_loss,
            'val': val_loss
        }, epoch)
        writer.add_scalars("f1/", {'train': train_f1, 'val': val_f1}, epoch)

        # write to csv
        df.loc[epoch] = [
            args.network, cur_lr, epoch, train_loss, val_loss, train_acc,
            val_acc
        ] + val_f1s + [val_f1]

        output_csv_file = os.path.join(args.output_csv_dir, 'output.csv')
        df.to_csv(output_csv_file, index=False)

        # save model
        is_best_f1 = val_f1 > best_f1
        best_f1 = max(val_f1, best_f1)
        save_checkpoint(
            {
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'acc': val_acc,
                'best_f1': best_f1,
                'optimizer': optimizer.state_dict(),
            }, is_best_f1, epoch, args.save_model_dir)

    print('Best f1:')
    print(best_f1)
Exemple #19
0
def set_up_dataloaders(model_expected_input_size,
                       dataset_folder,
                       batch_size,
                       workers,
                       disable_dataset_integrity,
                       enable_deep_dataset_integrity,
                       inmem=False,
                       **kwargs):
    """
    Set up the dataloaders for the specified datasets.

    Parameters
    ----------
    model_expected_input_size : tuple
        Specify the height and width that the model expects.
    dataset_folder : string
        Path string that points to the three folder train/val/test. Example: ~/../../data/svhn
    batch_size : int
        Number of datapoints to process at once
    workers : int
        Number of workers to use for the dataloaders
    inmem : boolean
        Flag: if False, the dataset is loaded in an online fashion i.e. only file names are stored and images are loaded
        on demand. This is slower than storing everything in memory.

    Returns
    -------
    train_loader : torch.utils.data.DataLoader
    val_loader : torch.utils.data.DataLoader
    test_loader : torch.utils.data.DataLoader
        Dataloaders for train, val and test.
    int
        Number of classes for the model.
    """

    # Recover dataset name
    dataset = os.path.basename(os.path.normpath(dataset_folder))
    logging.info('Loading {} from:{}'.format(dataset, dataset_folder))

    ###############################################################################################
    # Load the dataset splits as images
    try:
        logging.debug("Try to load dataset as images")
        train_ds, val_ds, test_ds = image_folder_dataset.load_dataset(
            dataset_folder, inmem, workers)

        # Loads the analytics csv and extract mean and std
        mean, std = _load_mean_std_from_file(dataset_folder, inmem, workers)

        # Set up dataset transforms
        logging.debug('Setting up dataset transforms - image classification')
        # TODO: Cropping not resizing needed.
        logging.debug(
            '******************************************************************** regular image'
            'classification')
        transform = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.RandomCrop((768, 768)),
            transforms.Resize(model_expected_input_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=mean, std=std)
        ])

        transform_test = transforms.Compose([
            transforms.Resize(model_expected_input_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=mean, std=std)
        ])

        train_ds.transform = transform
        val_ds.transform = transform_test
        test_ds.transform = transform_test

        train_loader, val_loader, test_loader = _dataloaders_from_datasets(
            batch_size, train_ds, val_ds, test_ds, workers)
        logging.info("Dataset loaded as images")
        _verify_dataset_integrity(dataset_folder, disable_dataset_integrity,
                                  enable_deep_dataset_integrity)
        return train_loader, val_loader, test_loader, len(train_ds.classes)

    except RuntimeError:
        logging.debug("No images found in dataset folder provided")

    ###############################################################################################
    # Load the dataset splits as bidimensional
    try:
        logging.debug("Try to load dataset as bidimensional")
        train_ds, val_ds, test_ds = bidimensional_dataset.load_dataset(
            dataset_folder)

        # Loads the analytics csv and extract mean and std
        # TODO: update bidimensional to work with new load_mean_std functions
        mean, std = _load_mean_std_from_file(dataset_folder, inmem, workers)

        # Bring mean and std into range [0:1] from original domain
        mean = np.divide((mean - train_ds.min_coords),
                         np.subtract(train_ds.max_coords, train_ds.min_coords))
        std = np.divide((std - train_ds.min_coords),
                        np.subtract(train_ds.max_coords, train_ds.min_coords))

        # Set up dataset transforms
        logging.debug(
            'Setting up dataset transforms - dataset as bidimensional')
        print(
            "********************************************************************"
        )
        transform = transforms.Compose(
            [transforms.ToTensor(),
             transforms.Normalize(mean=mean, std=std)])

        train_ds.transform = transform
        val_ds.transform = transform
        test_ds.transform = transform

        train_loader, val_loader, test_loader = _dataloaders_from_datasets(
            batch_size, train_ds, val_ds, test_ds, workers)
        logging.info("Dataset loaded as bidimensional data")
        _verify_dataset_integrity(dataset_folder, disable_dataset_integrity,
                                  enable_deep_dataset_integrity)
        return train_loader, val_loader, test_loader, len(train_ds.classes)

    except RuntimeError:
        logging.debug("No bidimensional found in dataset folder provided")

    ###############################################################################################
    # Verify that eventually a dataset has been correctly loaded
    logging.error(
        "No datasets have been loaded. Verify dataset folder location or dataset folder structure"
    )
    sys.exit(-1)
# plot the first element in the dataset
#show_data(dataset[0])
# plot the second element int he dataset
show_data(dataset[1])

## Combine two transforms: Crop and convert to tensor, Apply the compose to MNIST dataset

cropTensor_data_transform = transforms.Compose(
    [transforms.CenterCrop(20),
     transforms.ToTensor()])
dataset = dsets.MNIST(root='./data',
                      train=False,
                      download=True,
                      transform=cropTensor_data_transform)
print("The shape of the first element in the first tuple: ",
      dataset[0][0].shape)

#show_data(dataset[0], shape=(20, 20))
#show_data(dataset[1], shape=(20, 20))
""" PRACTICE """
RandomVerticalFlip = transforms.Compose(
    [transforms.RandomVerticalFlip(p=1),
     transforms.ToTensor()])
dataset = dsets.MNIST(root='./data',
                      train=False,
                      download=True,
                      transform=RandomVerticalFlip)

show_data(dataset[1])
Exemple #21
0
    def __init__(self,
                 args,
                 dataset_dir,
                 datamode="train",
                 image_height=512,
                 image_width=512,
                 n_classes=92,
                 data_augument=False,
                 debug=False):
        super(ImaterialistDataset, self).__init__()
        self.args = args
        self.dataset_dir = dataset_dir
        self.datamode = datamode
        self.image_height = image_height
        self.image_width = image_width
        self.n_classes = n_classes
        self.data_augument = data_augument
        self.debug = debug

        self.df_train = pd.read_csv(os.path.join(self.dataset_dir,
                                                 "train.csv"),
                                    index_col='ImageId')
        df_mask = self.df_train.groupby('ImageId')[
            'EncodedPixels', 'ClassId'].agg(lambda x: list(x))
        df_size = self.df_train.groupby('ImageId')['Height', 'Width'].mean()
        self.df_train = df_mask.join(df_size, on='ImageId')

        self.image_names = sorted([
            f
            for f in os.listdir(os.path.join(self.dataset_dir, self.datamode))
            if f.endswith(IMG_EXTENSIONS)
        ])

        # test データに対する file name, image height, image width のデータフレーム
        image_heights = []
        image_widths = []
        for image_name in self.image_names:
            image = Image.open(
                os.path.join(self.dataset_dir, self.datamode, image_name))
            image_heights.append(image.height)
            image_widths.append(image.width)

        self.df_test = pd.DataFrame(
            {
                'Height': image_heights,
                'Width': image_widths
            },
            index=self.image_names)
        self.df_test.index.names = ['ImageId']

        # transform
        mean = [0.5 for i in range(args.n_in_channels)]
        std = [0.5 for i in range(args.n_in_channels)]
        if (data_augument):
            self.transform = transforms.Compose([
                transforms.Resize((args.image_height, args.image_width),
                                  interpolation=Image.LANCZOS),
                #                    transforms.RandomResizedCrop( (args.image_height, args.image_width) ),
                transforms.RandomHorizontalFlip(),
                transforms.RandomVerticalFlip(),
                transforms.RandomAffine(degrees=(-10, 10),
                                        translate=(0.0, 0.0),
                                        scale=(1.00, 1.00),
                                        resample=Image.BICUBIC),
                transforms.CenterCrop(size=(args.image_height,
                                            args.image_width)),
                transforms.ToTensor(),
                transforms.Normalize(mean, std),
            ])

            self.transform_mask = transforms.Compose([
                transforms.Resize((args.image_height, args.image_width),
                                  interpolation=Image.NEAREST),
                #                    transforms.RandomResizedCrop( (args.image_height, args.image_width) ),
                transforms.RandomHorizontalFlip(),
                transforms.RandomVerticalFlip(),
                transforms.RandomAffine(degrees=(-10, 10),
                                        translate=(0.0, 0.0),
                                        scale=(1.00, 1.00),
                                        resample=Image.BICUBIC),
                transforms.CenterCrop(size=(args.image_height,
                                            args.image_width)),
                transforms.ToTensor(),
                transforms.Normalize([0.5], [0.5]),
            ])

            self.transform_mask_woToTernsor = transforms.Compose([
                transforms.Resize((args.image_height, args.image_width),
                                  interpolation=Image.NEAREST),
                #                    transforms.RandomResizedCrop( (args.image_height, args.image_width) ),
                transforms.RandomHorizontalFlip(),
                transforms.RandomVerticalFlip(),
                transforms.RandomAffine(degrees=(-10, 10),
                                        translate=(0.0, 0.0),
                                        scale=(1.00, 1.00),
                                        resample=Image.BICUBIC),
                transforms.CenterCrop(size=(args.image_height,
                                            args.image_width)),
            ])

        else:
            self.transform = transforms.Compose([
                transforms.Resize((args.image_height, args.image_width),
                                  interpolation=Image.LANCZOS),
                transforms.CenterCrop(size=(args.image_height,
                                            args.image_width)),
                transforms.ToTensor(),
                transforms.Normalize(mean, std),
            ])

            self.transform_mask = transforms.Compose([
                transforms.Resize((args.image_height, args.image_width),
                                  interpolation=Image.NEAREST),
                transforms.CenterCrop(size=(args.image_height,
                                            args.image_width)),
                transforms.ToTensor(),
                transforms.Normalize([0.5], [0.5]),
            ])

            self.transform_mask_woToTernsor = transforms.Compose([
                transforms.Resize((args.image_height, args.image_width),
                                  interpolation=Image.NEAREST),
                transforms.CenterCrop(size=(args.image_height,
                                            args.image_width)),
            ])

        if (self.debug):
            print("self.dataset_dir :", self.dataset_dir)
            print("len(self.image_names) :", len(self.image_names))
            print("self.df_train.head() \n:", self.df_train.head())
            print("self.df_test.head() \n:", self.df_test.head())

        return
    def __init__(self,
                 data_root,
                 index_root,
                 padding,
                 augment=False,
                 cls_num=2):
        self.padding = padding
        self.data = []
        self.data_root = open(data_root, 'r').readlines()
        self.text_book = [item.split('\t') for item in self.data_root]
        self.padding = padding
        self.augment = augment
        self.cls_num = cls_num
        self.train_augmentation = transforms.Compose([
            transforms.Resize(288),  ##just for abnormal detector
            transforms.RandomCrop(224),
            #transforms.RandomRotation(45),
            transforms.RandomHorizontalFlip(0.2),
            transforms.RandomVerticalFlip(0.2),
            transforms.RandomAffine(45, translate=(0, 0.2), fillcolor=0),
            transforms.ToTensor(),
            transforms.RandomErasing(p=0.1),
            transforms.Normalize([0, 0, 0], [1, 1, 1])
        ])
        self.test_augmentation = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0, 0, 0], [1, 1, 1])
        ])
        with open(index_root, 'r') as f:
            #list=os.listdir(data_root)
            self.data = f.readlines()

        #for item in list:
        #   self.data.append(item)

        #print('index file:', index_root)
        print('num of data:', len(self.data))
        pa_id = list(set([st.split('/')[-1].split('_')[0]
                          for st in self.data]))
        #pa_id_0=[id[0]=='c' or id[1]=='.' for id in pa_id]
        #print(np.sum(pa_id_0),len(pa_id)-np.sum(pa_id_0))
        if self.cls_num == 2:
            cls = [
                1 - int(
                    data_path.split('/')[-1][0] == 'c'
                    or data_path.split('/')[-1][1] == '.'
                    or data_path.split('/')[-2] == 'masked_ild')
                for data_path in self.data
            ]
        elif self.cls_num == 4:
            cls = []
            for data_path in self.data:
                if data_path.split('/')[-1][0] == 'c':
                    cls.append(0)
                elif 'CAP' in data_path:
                    cls.append(1)
                elif 'ILD' in data_path:
                    cls.append(2)
                else:
                    cls.append(3)  #covid
        elif self.cls_num == 5:
            cls = []
            for data_path in self.data:
                if data_path.split('/')[-1][0] == 'c':
                    cls.append(0)
                elif 'lidc' in data_path:
                    cls.append(1)
                elif 'ild' in data_path:
                    cls.append(2)
                elif 'CAP' in data_path:
                    cls.append(3)  #covid
                else:
                    cls.append(4)
        nums = [np.sum(np.array(cls) == i) for i in range(max(cls) + 1)]
        print(nums)
        self.nums = nums
Exemple #23
0
    def __init__(self, mdlParams, indSet, index=None):
        """
        Args:
            mdlParams (dict): Configuration for loading
            indSet (string): Indicates train, val, test
        """
        # Mdlparams
        self.mdlParams = mdlParams
        # Current indSet = 'trainInd'/'valInd'/'testInd'
        self.indSet = indSet
        if self.indSet == 'trainInd':
            self.root = mdlParams['dataDir'] + '/train'
            self.index = index
        elif self.indSet == 'valInd':
            self.root = mdlParams['dataDir'] + '/train'
            self.index = index
        else:
            self.root = mdlParams['dataDir'] + '/test'
        self.names_list = []
        # Number of classes
        self.numClasses = mdlParams['numClasses']
        # Size to crop
        self.crop_size = mdlParams['crop_size']
        # Model input size
        self.input_size = (np.int32(mdlParams['input_size'][0]),
                           np.int32(mdlParams['input_size'][1]))
        # Potential class balancing option
        self.balancing = mdlParams['balance_classes']
        # Potential setMean and setStd
        self.setMean = mdlParams['setMean'].astype(np.float32)
        self.setStd = mdlParams['setStd'].astype(np.float32)
        # Only downsample
        self.only_downsmaple = mdlParams.get('only_downsmaple', True)
        # Meta csv
        self.meta_path = mdlParams['meta_path']
        self.meta_df = pd.read_pickle(self.meta_path)

        self.subsets_size = [0, 0, 0, 0, 0, 0, 0]
        self.image_path = []
        self.image_labels = []
        self.image_meta = []
        for img in os.listdir(self.root):
            id = img.split('.')[0]
            label = list(self.meta_df.loc[self.meta_df['image_id'] == id,
                                          'dx'])[0]
            self.image_labels.append(CLASS_LABELS.index(label))
            self.subsets_size[CLASS_LABELS.index(label)] += 1
            self.image_meta.append(
                self.meta_df.loc[self.meta_df['image_id'] == id,
                                 self.mdlParams['meta_features']].to_numpy())
            self.image_path.append(os.path.join(self.root, img))
        self.image_path = np.asarray(self.image_path)
        self.image_labels = np.asarray(self.image_labels)
        self.image_meta = np.asarray(self.image_meta)

        if indSet == 'trainInd':
            if index is not None:
                self.image_path = self.image_path[index]
                self.image_labels = self.image_labels[index]
                self.image_meta = self.image_meta[index]
            all_transforms = []
            if self.only_downsmaple:
                all_transforms.append(transforms.Resize(self.input_size))
            else:
                all_transforms.append(transforms.Resize(self.crop_size))
                all_transforms.append(transforms.RandomCrop(self.input_size))
            if mdlParams.get('flip_lr_ud', False):
                all_transforms.append(transforms.RandomHorizontalFlip())
                all_transforms.append(transforms.RandomVerticalFlip())
            # Full rot
            if mdlParams.get('full_rot', 0) > 0:
                if mdlParams.get('scale', False):
                    all_transforms.append(
                        transforms.RandomChoice([
                            transforms.RandomAffine(mdlParams['full_rot'],
                                                    scale=mdlParams['scale'],
                                                    shear=mdlParams.get(
                                                        'shear', 0),
                                                    resample=Image.NEAREST),
                            transforms.RandomAffine(mdlParams['full_rot'],
                                                    scale=mdlParams['scale'],
                                                    shear=mdlParams.get(
                                                        'shear', 0),
                                                    resample=Image.BICUBIC),
                            transforms.RandomAffine(mdlParams['full_rot'],
                                                    scale=mdlParams['scale'],
                                                    shear=mdlParams.get(
                                                        'shear', 0),
                                                    resample=Image.BILINEAR)
                        ]))
                else:
                    all_transforms.append(
                        transforms.RandomChoice([
                            transforms.RandomRotation(mdlParams['full_rot'],
                                                      resample=Image.NEAREST),
                            transforms.RandomRotation(mdlParams['full_rot'],
                                                      resample=Image.BICUBIC),
                            transforms.RandomRotation(mdlParams['full_rot'],
                                                      resample=Image.BILINEAR)
                        ]))
            # Color distortion
            if mdlParams.get('full_color_distort') is not None:
                all_transforms.append(
                    transforms.ColorJitter(
                        brightness=mdlParams.get('brightness_aug', 32. / 255.),
                        saturation=mdlParams.get('saturation_aug', 0.5),
                        contrast=mdlParams.get('contrast_aug', 0.5),
                        hue=mdlParams.get('hue_aug', 0.2)))
            else:
                all_transforms.append(
                    transforms.ColorJitter(brightness=32. / 255.,
                                           saturation=0.5))
            # Autoaugment
            if self.mdlParams.get('randaugment', False):
                all_transforms.append(
                    RandAugment(self.mdlParams.get('N'),
                                self.mdlParams.get('M')))
            # Cutout
            if self.mdlParams.get('cutout', 0) > 0:
                all_transforms.append(
                    Cutout_v0(n_holes=1, length=self.mdlParams['cutout']))
            # Normalize
            all_transforms.append(transforms.ToTensor())
            all_transforms.append(
                transforms.Normalize(np.float32(self.mdlParams['setMean']),
                                     np.float32(self.mdlParams['setStd'])))
            # All transforms
            self.train_composed = transforms.Compose(all_transforms)
            self.valid_composed = transforms.Compose([
                transforms.Resize(self.input_size),
                transforms.ToTensor(),
                transforms.Normalize(
                    torch.from_numpy(self.setMean).float(),
                    torch.from_numpy(self.setStd).float())
            ])
            self.image_path = duplist(self.image_path)
            self.image_labels = duplist(self.image_labels)
            self.image_meta = duplist(self.image_meta)
        else:
            if index is not None:
                self.image_path = self.image_path[index]
                self.image_labels = self.image_labels[index]
                self.image_meta = self.image_meta[index]
            self.valid_composed = transforms.Compose([
                transforms.Resize(self.input_size),
                transforms.ToTensor(),
                transforms.Normalize(
                    torch.from_numpy(self.setMean).float(),
                    torch.from_numpy(self.setStd).float())
            ])
import torch
from torchvision import transforms
from Caltech101Data import Caltech101Data
from classifier import Classifier
from Trainer import Trainer
from torch.utils.data import DataLoader
from torch.optim import Adam, lr_scheduler
from torch.utils.data import random_split

tr = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.Resize((224,224))])
model = Classifier(102) # or you can torch.load(model_complete.mdl)
cd = Caltech101Data('image_label', tr)
optimizer = Adam(model.parameters())
scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
loss_function = torch.nn.CrossEntropyLoss()
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")

'''
# if you want to load a checkpoint of a model
checkpoint = torch.load('model_checkpoint.mdl')
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
'''

dataset_size = {'train': 7000, 'val': 2144}
data = {}
data['train'], data['val'] = random_split(cd, [dataset_size['train'], dataset_size['val']])
loader = {phase: DataLoader(data[phase], batch_size=20) for phase in ['train', 'val']}
Exemple #25
0
print ("outfolder " + opt.outf)

cudnn.benchmark = True


import numpy as np
import matplotlib
matplotlib.use('Agg')  # #when not having displac variable
import matplotlib.pyplot as plt

wh = opt.imageSize
rbuf=[]
if opt.resizeAugment:
    rbuf +=[transforms.RandomRotation(180, resample=False, expand=False)]
    rbuf +=[transforms.CenterCrop(wh)]
tbuf = [transforms.Resize(size=wh, interpolation=2),transforms.RandomVerticalFlip(),transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5, 0.5), (0.5, 0.5, 0.5, 0.5))]
transform = transforms.Compose(rbuf+tbuf)
dataset = NailDataset(transform=transform)

print ("inited live dataset")
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
                                         shuffle=True, num_workers=1,drop_last=True)

ngf = int(opt.ndf)
ndf = int(opt.ndf)
nz = opt.nz
nGL = opt.nGL
nDep = opt.nDep

##TODO for condition - -flag? opt.CGAN
# extra dim in d and g
def train(optimizer, **kwargs):
    # load training data
    print 'Loading and splitting data ...'
    if os.path.isfile(os.path.join(kwargs['data_path'], 'X_train.npy')):
        X_train = np.load(os.path.join(kwargs['data_path'], 'X_train.npy'))
        y_train = np.load(os.path.join(kwargs['data_path'], 'y_train.npy'))
        X_val = np.load(os.path.join(kwargs['data_path'], 'X_val.npy'))
        y_val = np.load(os.path.join(kwargs['data_path'], 'y_val.npy'))
    else:
        X = np.load(os.path.join(kwargs['data_path'], 'X_patches.npy'))
        y = np.load(os.path.join(kwargs['data_path'], 'y_patches.npy'))

        # split into train, val in stratified fashion
        sss = StratifiedShuffleSplit(n_splits=1,
                                     test_size=kwargs['n_val'],
                                     random_state=kwargs['random_seed'])
        train_ind, val_ind = list(sss.split(np.zeros_like(y), y))[0]
        X_train = X[train_ind]
        y_train = y[train_ind]
        X_val = X[val_ind]
        y_val = y[val_ind]
        np.save(os.path.join(kwargs['data_path'], 'X_train.npy'), X_train)
        np.save(os.path.join(kwargs['data_path'], 'y_train.npy'), y_train)
        np.save(os.path.join(kwargs['data_path'], 'X_val.npy'), X_val)
        np.save(os.path.join(kwargs['data_path'], 'y_val.npy'), y_val)

    rng = RNG()
    # noinspection PyTypeChecker
    train_transform = transforms.Compose([
        transforms.Lambda(lambda x: Image.fromarray(x)),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.Lambda(
            lambda img: [img, img.transpose(Image.ROTATE_90)][int(rng.rand() <
                                                                  0.5)]),
        transforms.Lambda(
            lambda img: adjust_gamma(img, gamma=rng.uniform(0.8, 1.25))),
        transforms.Lambda(
            lambda img: jpg_compress(img, quality=rng.randint(70, 100 + 1))),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])

    val_transform = transforms.Compose([
        transforms.Lambda(lambda x: Image.fromarray(x)),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])

    train_dataset = make_numpy_dataset(X_train, y_train, train_transform)
    val_dataset = make_numpy_dataset(X_val, y_val, val_transform)

    # define loaders
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=kwargs['batch_size'],
                              shuffle=False,
                              num_workers=4,
                              sampler=StratifiedSampler(
                                  class_vector=y_train,
                                  batch_size=kwargs['batch_size']))
    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=kwargs['batch_size'],
                            shuffle=False,
                            num_workers=4)

    print 'Starting training ...'
    optimizer.train(train_loader, val_loader)
Exemple #27
0
        res = []
        for k in topk:
            correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(1.0 / batch_size))
    return res


if __name__ == '__main__':
    root = '/media/space/imagenet/imagenet_object_localization_patched2019/ILSVRC/Data/CLS-LOC/'
    batch_size = 384
    train_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])
    val_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])

    trainset = torchvision.datasets.ImageFolder(root + 'train',
                                                transform=train_transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=batch_size,
                                              shuffle=True,
def predict(optimizer, **kwargs):
    # load data
    X_test = np.load(os.path.join(kwargs['data_path'], 'X_test.npy'))
    y_test = np.zeros((len(X_test), ), dtype=np.int64)

    test_transform = transforms.Compose([
        transforms.Lambda(lambda x: Image.fromarray(x)),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])

    # TTA
    rng = RNG(seed=1337)
    base_transform = transforms.Compose([
        transforms.Lambda(lambda x: Image.fromarray(x)),
        transforms.RandomHorizontalFlip(),
        transforms.RandomVerticalFlip(),
        transforms.Lambda(
            lambda img: [img, img.transpose(Image.ROTATE_90)][int(rng.rand() <
                                                                  0.5)]),
        transforms.Lambda(
            lambda img: adjust_gamma(img, gamma=rng.uniform(0.8, 1.25))),
        transforms.Lambda(
            lambda img: jpg_compress(img, quality=rng.randint(70, 100 + 1))),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    tta_n = 10

    def tta_f(img, n=tta_n - 1):
        out = [test_transform(img)]
        for _ in xrange(n):
            out.append(base_transform(img))
        return torch.stack(out, 0)

    tta_transform = transforms.Compose([
        transforms.Lambda(lambda img: tta_f(img)),
    ])

    test_loader = DataLoader(dataset=make_numpy_dataset(
        X_test, y_test, tta_transform),
                             batch_size=kwargs['batch_size'],
                             shuffle=False,
                             num_workers=4)
    test_dataset = KaggleCameraDataset(kwargs['data_path'],
                                       train=False,
                                       lazy=not kwargs['not_lazy'])

    # compute predictions
    logits, _ = optimizer.test(test_loader)

    # compute and save raw probs
    logits = np.vstack(logits)
    proba = softmax(logits)

    # group and average predictions
    K = 16 * tta_n
    proba = proba.reshape(len(proba) / K, K, -1).mean(axis=1)

    fnames = [os.path.split(fname)[-1] for fname in test_dataset.X]
    df = pd.DataFrame(proba)
    df['fname'] = fnames
    df = df[['fname'] + range(10)]
    dirpath = os.path.split(kwargs['predict_from'])[0]
    df.to_csv(os.path.join(dirpath, 'proba.csv'), index=False)

    # compute predictions and save in submission format
    index_pred = unhot(one_hot_decision_function(proba))
    data = {
        'fname': fnames,
        'camera':
        [KaggleCameraDataset.target_labels()[int(c)] for c in index_pred]
    }
    df2 = pd.DataFrame(data, columns=['fname', 'camera'])
    df2.to_csv(os.path.join(dirpath, 'submission.csv'), index=False)
Exemple #29
0
else:
    print("CUDA is available. Training on GPU")


# %%
data_dir = '/Users/swastik/ophthalmology/Racial_Bias/Dataset2'
train_dir = os.path.join(data_dir,'training/')
test_dir = os.path.join(data_dir,'test/')

classes = ['DR','Non_DR']


# %%
train_transform = transforms.Compose([transforms.Resize((512,512)),
                                transforms.RandomHorizontalFlip(p=0.5),
                                transforms.RandomVerticalFlip(p=0.5),
                                transforms.RandomRotation(30),
                                transforms.RandomRotation(60),
                                transforms.RandomRotation(90),
                                transforms.ToTensor(),
                                transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.224])
                               ])
test_transform = transforms.Compose([transforms.Resize((512,512)),
                                transforms.ToTensor(),
                                transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.224])
                                    ])


# %%
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
def main():
    global args, best_acc
    args = parser.parse_args()
    args.cuda = args.cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)
        torch.cuda.set_device(0)
    global plotter
    plotter = VisdomLinePlotter(env_name=args.name)

    kwargs = {'num_workers': 20, 'pin_memory': True} if args.cuda else {}

    print('==>Preparing data...')

    base_path = "./data/handpose_data_cpm/"
    train_loader = torch.utils.data.DataLoader(
        SimpleImageLoader(
            base_path,
            train=True,
            transform=transforms.Compose([
                transforms.Resize(380),
                transforms.CenterCrop(368),
                transforms.ColorJitter(0.1, 0.05, 0.05, 0.05),
                transforms.RandomVerticalFlip(p=0.1),
                transforms.RandomHorizontalFlip(p=0.5),
                transforms.RandomRotation(10),
                transforms.ToTensor(),
                Lighting(0.1, _imagenet_pca['eigval'],
                         _imagenet_pca['eigvec']),
                # transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
            ])),
        batch_size=args.batch_size,
        shuffle=True,
        drop_last=False,
        **kwargs)

    test_loader = torch.utils.data.DataLoader(SimpleImageLoader(
        base_path,
        False,
        transform=transforms.Compose([
            transforms.Resize(380),
            transforms.CenterCrop(368),
            transforms.ToTensor(),
        ])),
                                              batch_size=args.batch_size,
                                              drop_last=False,
                                              **kwargs)

    jnet = CPM4(22)
    if args.cuda:
        jnet.cuda()
        if torch.cuda.device_count() > 1 and args.parallel:
            jnet = nn.DataParallel(jnet, device_ids=[0, 1])
    # This flag allows you to enable the inbuilt cudnn auto-tuner to
    # find the best algorithm to use for your hardware.

    cudnn.benchmark = True
    criterion = torch.nn.MSELoss()
    optimizer = optim.SGD(jnet.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("==> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            jnet.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("==> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("==> no checkpoint found at '{}'".format(args.resume))

    for epoch in range(1, args.epochs + 1):
        # train for one epoch
        adjust_learning_rate(jnet, optimizer, epoch)
        train(train_loader, jnet, criterion, optimizer, epoch)
        # evaluate on validation set
        acc = test(test_loader, jnet, criterion, epoch)

        # remember best acc and save checkpoint
        is_best = acc > best_acc
        best_acc = max(acc, best_acc)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': jnet.state_dict(),
                'best_prec1': best_acc,
                'optimizer': optimizer.state_dict(),
            }, is_best)