Example #1
0
    def __init__(self,
                 data_dir,
                 batch_size,
                 shuffle=True,
                 validation_split=0.0,
                 num_batches=0,
                 training=True,
                 num_workers=4,
                 pin_memory=True):
        config = ConfigParser.get_instance()
        cfg_trainer = config['trainer']

        transform_train = transforms.Compose([
            #transforms.ColorJitter(brightness= 0.4, contrast= 0.4, saturation= 0.4, hue= 0.1),
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4867, 0.4408),
                                 (0.2675, 0.2565, 0.2761)),
        ])
        transform_val = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4867, 0.4408),
                                 (0.2675, 0.2565, 0.2761)),
        ])
        self.data_dir = data_dir
        config = ConfigParser.get_instance()
        cfg_trainer = config['trainer']

        noise_file = '%sCIFAR100_%.1f_Asym_%s.json' % (
            config['data_loader']['args']['data_dir'], cfg_trainer['percent'],
            cfg_trainer['asym'])

        self.train_dataset, self.val_dataset = get_cifar100(
            config['data_loader']['args']['data_dir'],
            cfg_trainer,
            train=training,
            transform_train=transform_train,
            transform_val=transform_val,
            noise_file=noise_file)

        super().__init__(self.train_dataset,
                         batch_size,
                         shuffle,
                         validation_split,
                         num_workers,
                         pin_memory,
                         val_dataset=self.val_dataset)
Example #2
0
 def __init__(self, num_examp, num_classes=10, beta=0.3):
     super(elr_loss, self).__init__()
     self.num_classes = num_classes
     self.config = ConfigParser.get_instance()
     self.USE_CUDA = torch.cuda.is_available()
     self.target = torch.zeros(num_examp, self.num_classes).cuda() if self.USE_CUDA else torch.zeros(num_examp, self.num_classes)
     self.beta = beta
Example #3
0
    def __init__(self,
                 data_dir,
                 batch_size,
                 shuffle=True,
                 validation_split=0.0,
                 num_workers=4):
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=8),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
        transform_val = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
        self.data_dir = data_dir
        config = ConfigParser.get_instance()
        cfg_trainer = config['trainer']
        self.train_dataset, self.val_dataset = get_cifar10(
            config['data_loader']['args']['data_dir'],
            cfg_trainer,
            transform_train=transform_train,
            transform_val=transform_val)

        super().__init__(self.train_dataset,
                         batch_size,
                         shuffle,
                         validation_split,
                         num_workers,
                         val_dataset=self.val_dataset)
Example #4
0
    def __init__(self,
                 data_dir,
                 batch_size,
                 shuffle=True,
                 validation_split=0.0,
                 num_batches=0,
                 training=True,
                 num_workers=4,
                 pin_memory=True,
                 num_class=50):

        self.batch_size = batch_size
        self.num_workers = num_workers
        self.num_batches = num_batches
        self.training = training

        self.transform_train = transforms.Compose([
            transforms.RandomCrop(227),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])
        self.transform_val = transforms.Compose([
            transforms.CenterCrop(227),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])
        self.transform_imagenet = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(227),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])

        self.data_dir = data_dir
        config = ConfigParser.get_instance()
        cfg_trainer = config['trainer']
        self.train_dataset, self.val_dataset = get_webvision(
            config['data_loader']['args']['data_dir'],
            cfg_trainer,
            num_samples=self.num_batches * self.batch_size,
            train=training,
            transform_train=self.transform_train,
            transform_val=self.transform_val,
            num_class=num_class)

        super().__init__(self.train_dataset,
                         batch_size,
                         shuffle,
                         validation_split,
                         num_workers,
                         pin_memory,
                         val_dataset=self.val_dataset)
Example #5
0
 def __init__(self, num_examp, num_classes=10, alpha=0.5, lamb=0.7):
     super(our_loss, self).__init__()
     self.num_classes = num_classes
     self.config = ConfigParser.get_instance()
     self.USE_CUDA = torch.cuda.is_available()
     self.pred_hist = (torch.ones(num_examp, self.num_classes) * 1.0 /
                       self.num_classes).cuda()
     self.p = torch.ones(self.num_classes).cuda(
     ) / self.num_classes if self.USE_CUDA else torch.ones(
         self.num_classes) / self.num_classes
     self.alpha = alpha
     self.lamb = lamb
Example #6
0
    def __init__(self,
                 data_dir,
                 batch_size,
                 shuffle=True,
                 validation_split=0.0,
                 num_batches=0,
                 training=True,
                 num_workers=4,
                 pin_memory=True):
        config = ConfigParser.get_instance()
        cfg_trainer = config['trainer']

        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
        transform_val = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])

        #         transform_train = transforms.Compose([
        #             transforms.RandomCrop(32, padding=4),
        #             transforms.RandomHorizontalFlip(),
        #             transforms.ToTensor(),
        #         ])
        #         transform_val = transforms.Compose([
        #             transforms.ToTensor(),
        #         ])

        self.data_dir = data_dir

        # noise_file='%sCIFAR10_%.1f_Asym_%s.json'%(config['data_loader']['args']['data_dir'],cfg_trainer['percent'],cfg_trainer['asym'])

        self.train_dataset, self.val_dataset = get_cifar10(
            config['data_loader']['args']['data_dir'],
            cfg_trainer,
            train=training,
            transform_train=transform_train,
            transform_val=transform_val)  #, noise_file = noise_file)

        super().__init__(self.train_dataset,
                         batch_size,
                         shuffle,
                         validation_split,
                         num_workers,
                         pin_memory,
                         val_dataset=self.val_dataset)
Example #7
0
    def __init__(self,
                 data_dir,
                 batch_size,
                 shuffle=True,
                 validation_split=0.0,
                 num_batches=0,
                 training=True,
                 num_workers=4,
                 pin_memory=True):

        self.batch_size = batch_size
        self.num_workers = num_workers
        self.num_batches = num_batches
        self.training = training

        self.transform_train = transforms.Compose([
            transforms.Resize(256),
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.6959, 0.6537, 0.6371),
                                 (0.3113, 0.3192, 0.3214)),
        ])
        self.transform_val = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize((0.6959, 0.6537, 0.6371),
                                 (0.3113, 0.3192, 0.3214)),
        ])

        self.data_dir = data_dir
        config = ConfigParser.get_instance()
        cfg_trainer = config['trainer']
        self.train_dataset, self.val_dataset = get_clothing(
            config['data_loader']['args']['data_dir'],
            cfg_trainer,
            num_samples=self.num_batches * self.batch_size,
            train=training,
            transform_train=self.transform_train,
            transform_val=self.transform_val)

        super().__init__(self.train_dataset,
                         batch_size,
                         shuffle,
                         validation_split,
                         num_workers,
                         pin_memory,
                         val_dataset=self.val_dataset)
Example #8
0
def mycriterion(outputs, soft_targets):
    # We introduce a prior probability distribution p, which is a distribution of classes among all training data.

    USE_CUDA = torch.cuda.is_available()
    p = torch.ones(10).cuda() / 10 if USE_CUDA else torch.ones(10) / 10

    probs = F.softmax(outputs, dim=1)
    avg_probs = torch.mean(probs, dim=0)

    L_c = -torch.mean(
        torch.sum(F.log_softmax(outputs, dim=1) * soft_targets, dim=1))
    L_p = -torch.sum(torch.log(avg_probs) * p)
    L_e = -torch.mean(torch.sum(F.log_softmax(outputs, dim=1) * probs, dim=1))

    config = ConfigParser.get_instance()
    cfg_trainer = config['trainer']
    loss = L_c + cfg_trainer['alpha'] * L_p + cfg_trainer['beta'] * L_e
    return probs, loss
Example #9
0
    def __init__(self,
                 data_dir,
                 batch_size,
                 shuffle=True,
                 validation_split=0.0,
                 num_batches=0,
                 training=True,
                 num_workers=4,
                 pin_memory=True):
        config = ConfigParser.get_instance()
        cfg_trainer = config['trainer']

        transforms_train = get_augmentation(
            input_size=(config['data_loader']['args']['image_size'],
                        config['data_loader']['args']['image_size']),
            train_flag=True,
            normalize_flag=config['data_loader']['args']['normalize_flag'])

        transforms_val = get_augmentation(
            input_size=(config['data_loader']['args']['image_size'],
                        config['data_loader']['args']['image_size']),
            train_flag=False,
            normalize_flag=config['data_loader']['args']['normalize_flag'])

        self.train_dataset = SelfDataset(
            config['data_loader']['args']['data_dir'] + '/train',
            config['data_loader']['args']['label_name'],
            train=training,
            change_ratio=cfg_trainer['percent'],
            transforms=transforms_train)
        self.val_dataset = SelfDataset(
            config['data_loader']['args']['data_dir'] + '/val',
            config['data_loader']['args']['label_name'],
            train=False,
            change_ratio=cfg_trainer['percent'],
            transforms=transforms_val)

        super().__init__(self.train_dataset,
                         batch_size,
                         shuffle,
                         validation_split,
                         num_workers,
                         pin_memory,
                         val_dataset=self.val_dataset)
Example #10
0
    def forward(self, output, target, epoch, index):
        y_true = make_one_hot(target, C=self.num_classes)
        y_pred = F.softmax(output, dim=1)
        config = ConfigParser.get_instance()
        y_true_1 = y_true
        y_pred_1 = y_pred  #(y_pred**(1.0/3))/(y_pred**(1.0/3)).sum(dim=1,keepdim=True)

        y_true_2 = y_true
        y_pred_2 = y_pred  #(y_pred**(1.0/3))/(y_pred**(1.0/3)).sum(dim=1,keepdim=True)

        y_pred_1 = torch.clamp(y_pred_1, 1e-3, 1.0)
        y_true_2 = torch.clamp(y_true_2, 1e-7, 1.0 - 1e-7)

        avg_probs = torch.mean(y_pred, dim=0)

        L_p = -torch.sum(torch.log(avg_probs) * self.p)

        pred_ = y_pred_2.data.detach()

        self.pred_hist[index] = (
            1 - self.lamb) * self.pred_hist[index] + self.lamb * (
                (pred_**self.alpha) /
                (pred_**self.alpha).sum(dim=1, keepdim=True))

        weight = (1 - self.pred_hist[index])

        t = 3.0

        out = ((self.weight * y_pred_1)).sum(dim=1)

        ce_loss = torch.mean(-torch.sum(
            (y_true_1) * F.log_softmax(output, dim=1), dim=-1))

        mae_loss = (out.log()).mean()

        Entropy = -(F.softmax(output.data.detach(), dim=1) * F.log_softmax(
            output.data.detach(), dim=1)).sum(dim=1).mean()

        return config['alpha'] * ce_loss + config['beta'] * mae_loss + config[
            'gamma'] * L_p, self.pred_hist[index].cpu(), Entropy
Example #11
0
    def __init__(self,
                 data_dir,
                 batch_size,
                 shuffle=True,
                 validation_split=0.0,
                 num_batches=0,
                 training=True,
                 num_workers=4,
                 pin_memory=True):
        config = ConfigParser.get_instance()
        cfg_trainer = config['trainer']

        transform_train = transforms.Compose([
            transforms.Resize(32),
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.0036, 0.0038, 0.0040),
                                 (0.0043, 0.0046, 0.0044)),
        ])
        transform_val = transforms.Compose([
            transforms.Resize(32),
            transforms.ToTensor(),
            transforms.Normalize((0.0036, 0.0038, 0.0040),
                                 (0.0043, 0.0046, 0.0044)),
        ])
        self.data_dir = data_dir

        self.train_dataset, self.val_dataset = get_new_cdon(
            cfg_trainer, transform_train, transform_val)

        super().__init__(self.train_dataset,
                         batch_size,
                         shuffle,
                         validation_split,
                         num_workers,
                         pin_memory,
                         val_dataset=self.val_dataset)
Example #12
0
    n_samples = len(data_loader.sampler)
    log = {'loss': total_loss / n_samples}
    log.update({
        met.__name__: total_metrics[i].item() / n_samples
        for i, met in enumerate(metric_fns)
    })
    logger.info(log)


if __name__ == '__main__':
    args = argparse.ArgumentParser(description='PyTorch Template')

    args.add_argument('-c',
                      '--config',
                      default=None,
                      type=str,
                      help='config file path (default: None)')
    args.add_argument('-r',
                      '--resume',
                      default=None,
                      type=str,
                      help='path to latest checkpoint (default: None)')
    args.add_argument('-d',
                      '--device',
                      default=None,
                      type=str,
                      help='indices of GPUs to enable (default: all)')
    config = ConfigParser.get_instance(args, '')
    #config = ConfigParser(args)
    main(config)
Example #13
0
    # custom cli options to modify configuration from default values given in json file.
    CustomArgs = collections.namedtuple('CustomArgs', 'flags type target')
    options = [
        CustomArgs(['--lr', '--learning_rate'],
                   type=float,
                   target=('optimizer', 'args', 'lr')),
        CustomArgs(['--bs', '--batch_size'],
                   type=int,
                   target=('data_loader', 'args', 'batch_size')),
        CustomArgs(['--lamb', '--lamb'],
                   type=float,
                   target=('train_loss', 'args', 'lambda')),
        CustomArgs(['--beta', '--beta'],
                   type=float,
                   target=('train_loss', 'args', 'beta')),
        CustomArgs(['--percent', '--percent'],
                   type=float,
                   target=('trainer', 'percent')),
        CustomArgs(['--asym', '--asym'], type=bool,
                   target=('trainer', 'asym')),
        CustomArgs(['--name', '--exp_name'], type=str, target=('name', )),
        CustomArgs(['--seed', '--seed'], type=int, target=('seed', ))
    ]
    config = ConfigParser.get_instance(args, options)

    random.seed(config['seed'])
    torch.manual_seed(config['seed'])
    torch.cuda.manual_seed_all(config['seed'])
    main(config)