Exemplo n.º 1
0
def get_loader(config):
    batch_size = config['batch_size']
    num_workers = config['num_workers']
    use_gpu = config['use_gpu']

    dataset_name = config['dataset']
    assert dataset_name in [
        'CIFAR10', 'CIFAR100', 'MNIST', 'FashionMNIST', 'KMNIST', 'NYU'
    ]

    if dataset_name in ['CIFAR10', 'CIFAR100']:
        dataset = CIFAR(config)
        train_dataset, test_dataset = dataset.get_datasets()
    elif dataset_name in ['MNIST', 'FashionMNIST', 'KMNIST']:
        dataset = MNIST(config)
        train_dataset, test_dataset = dataset.get_datasets()
    else:
        dataset = NYUv2(config)
        train_dataset, test_dataset = dataset.get_datasets()

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        #shuffle=True,
        sampler=ImbalancedDatasetSampler(train_dataset),
        num_workers=num_workers,
        pin_memory=use_gpu,
        drop_last=True,
        worker_init_fn=worker_init_fn,
    )
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=batch_size,
        num_workers=num_workers,
        shuffle=False,
        pin_memory=use_gpu,
        drop_last=False,
    )
    return train_loader, test_loader
Exemplo n.º 2
0
elif args.mode == 'E':  #for evaluation
    df_sep = {'train': df[pivot:], 'test': df[:pivot]}
else:
    raise NotImplementedError

del df
cols = ['clouds', 'temp', 'humidity', 'pressure', 'windspeed', 'rain']
loader = lambda s: FlickrDataLoader(args.image_root, df_sep[s], cols,
                                    transform[s])

train_set = loader('train')
test_set = loader('test')

train_loader = torch.utils.data.DataLoader(
    train_set,
    sampler=ImbalancedDatasetSampler(train_set),
    drop_last=True,
    batch_size=args.batch_size,
    num_workers=args.num_workers)

test_loader = torch.utils.data.DataLoader(
    test_set,
    sampler=ImbalancedDatasetSampler(test_set),
    drop_last=True,
    batch_size=args.batch_size,
    num_workers=args.num_workers)

num_classes = train_set.num_classes

model = models.resnet101(pretrained=False, num_classes=num_classes)
model.cuda()
Exemplo n.º 3
0
    def build(self):
        args = self.args

        # Models
        print('Build Models...')
        self.inference = Conditional_UNet(num_classes=self.num_classes)
        self.discriminator = SNDisc(num_classes=self.num_classes)
        exist_cp = sorted(glob(os.path.join(args.save_dir, args.name, '*')))
        if len(exist_cp) != 0:
            print('Load checkpoint:{}'.format(exist_cp[-1]))
            sd = torch.load(exist_cp[-1])
            self.inference.load_state_dict(sd['inference'])
            self.discriminator.load_state_dict(sd['discriminator'])
            self.epoch = sd['epoch']
            self.global_step = sd['global_step']
            print('Success checkpoint loading!')
        else:
            print('Initialize training status.')
            self.epoch = 0
            self.global_step = 0

        self.estimator = torch.load(args.estimator_path)
        self.estimator.eval()

        # Models to CUDA
        [
            i.cuda()
            for i in [self.inference, self.discriminator, self.estimator]
        ]

        # Optimizer
        self.g_opt = torch.optim.Adam(self.inference.parameters(),
                                      lr=args.lr,
                                      betas=(0.0, 0.999),
                                      weight_decay=args.lr / 20)
        self.d_opt = torch.optim.Adam(self.discriminator.parameters(),
                                      lr=args.lr,
                                      betas=(0.0, 0.999),
                                      weight_decay=args.lr / 20)

        # これらのloaderにsamplerは必要ないのか?
        self.train_loader = torch.utils.data.DataLoader(
            self.train_set,
            batch_size=args.batch_size,
            shuffle=True,
            drop_last=True,
            num_workers=args.num_workers)

        if args.sampler:
            self.random_loader = torch.utils.data.DataLoader(
                self.train_set,
                batch_size=args.batch_size,
                sampler=ImbalancedDatasetSampler(self.train_set),
                drop_last=True,
                num_workers=args.num_workers)
        else:
            self.random_loader = torch.utils.data.DataLoader(
                self.train_set,
                batch_size=args.batch_size,
                shuffle=True,
                drop_last=True,
                num_workers=args.num_workers)

        if not args.image_only:
            self.test_loader = torch.utils.data.DataLoader(
                self.test_set,
                batch_size=args.batch_size,
                shuffle=True,
                drop_last=True,
                num_workers=args.num_workers)
            test_data_iter = iter(self.test_loader)
            self.test_random_sample = [
                tuple(d.to('cuda') for d in test_data_iter.next())
                for i in range(2)
            ]
            del test_data_iter, self.test_loader

        self.scalar_dict = {}
        self.image_dict = {}
        self.shift_lmda = lambda a, b: (1. - self.lmda) * a + self.lmda * b
        print('Build has been completed.')
Exemplo n.º 4
0
def main():
    global best_score
    start_epoch = args.start_epoch

    #Data  Loader
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')

    data_train = datasets.ImageFolder(
        traindir, transforms.Compose([transforms.ToTensor()]))
    mean_tr, std_tr = get_mean_and_std(data_train)
    data_test = datasets.ImageFolder(
        valdir, transforms.Compose([transforms.ToTensor()]))
    mean_te, std_te = get_mean_and_std(data_test)

    #Note that for imgaug, we should convert the PIL images to NumPy arrays before applying the transforms.

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=mean_tr, std=std_tr)
        ]))
    test_dataset = datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=mean_te, std=std_te)
        ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        sampler=ImbalancedDatasetSampler(train_dataset),
        batch_size=args.train_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=True)
    val_loader = torch.utils.data.DataLoader(
        test_dataset  #, sampler=ImbalancedDatasetSampler(test_dataset)
        ,
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=True)

    #    test_loader = torch.utils.data.DataLoader(test_dataset #, sampler=ImbalancedDatasetSampler(test_dataset)
    #        ,batch_size=320, shuffle=False,	num_workers=args.workers, pin_memory=True)

    #    for inputs, targets in train_loader:

    #Create Model
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = resnet34()
    model = model.to(device)
    summary(model, (3, 224, 224))
    #    for child in model.named_children():
    #        print(child)
    #    model.fc.weight
    #    (list(model.layer4.children()))[0].conv1.weights

    #Get the number of model parameters
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    model = torch.nn.DataParallel(model).cuda()
    for name, param in model.named_parameters():
        if param.requires_grad:
            print(name)
#cudnn.benchmark = True

# define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          nesterov=args.nesterov,
                          weight_decay=args.weight_decay)

    title = 'AF'
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        args.checkpoint = os.path.dirname(args.resume)
        checkpoint = torch.load(args.resume)
        best_score = checkpoint['best_score']
        print(best_score)
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc 1.',
            'Valid Acc 1.'
        ])

    if args.evaluate:
        print('\nEvaluation only')
        test_loss, test_acc = test(val_loader, model, criterion, start_epoch,
                                   use_cuda)
        print(' Test Loss:  %.8f, Test Acc:  %.2f' % (test_loss, test_acc))
        return

# Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        #    	print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
        #Adjust Orhto decay rate
        odecay = adjust_ortho_decay_rate(epoch + 1)
        sendecay = adjust_sen_decay(epoch + 1)

        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, use_cuda, odecay,
                                      sendecay)
        test_loss, test_acc = test(val_loader, model, criterion, epoch,
                                   use_cuda)

        # append logger file
        logger.append(
            [state['lr'], train_loss, test_loss, train_acc, test_acc])

        # save model
        is_best = test_acc > best_score
        best_score = max(test_acc, best_score)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': test_acc,
                'best_score': best_score,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            checkpoint=args.checkpoint)

    logger.close()
    logger.plot()
    savefig(os.path.join(args.checkpoint, 'log.eps'))
    print('Best Fscore:')
    print(best_score)
Exemplo n.º 5
0
learning_rate = 0.001

myTransform = transforms.Compose([
    transforms.Resize((64, 64)),
    transforms.ToTensor(),
    transforms.Normalize((0.65, 0.65, 0.65), (0.15, 0.15, 0.15))
])

train_dataset = torchvision.datasets.ImageFolder(root=trainImgFolder,
                                                 transform=myTransform)

# Data loader
train_loader = torch.utils.data.DataLoader(
    dataset=train_dataset,
    batch_size=batch_size,
    sampler=ImbalancedDatasetSampler(train_dataset))

model = models.resnet18(num_classes=num_classes)

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)


# For updating learning rate
def update_lr(optimizer, lr):
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


# Train the model
Exemplo n.º 6
0
mini_valid = Dataloader(x_val, y_val, mini_data_transforms['val'])

# densenet201
# cnn = models.densenet201(pretrained=True)
# cnn.classifier = nn.Linear(1920, 8)
# resnet50
cnn = models.wide_resnet50_2(pretrained=True)
cnn.fc = nn.Linear(2048, 8)
cnn.to(device)


from torch.utils.data import DataLoader as torch_DataLoader
from sampler import ImbalancedDatasetSampler

# define training and validation data loaders
train_dataloader = torch_DataLoader(train_dataset, batch_size=bs, sampler=ImbalancedDatasetSampler(train_dataset),
                                    shuffle=False)
valid_dataloader = torch_DataLoader(valid_dataset, batch_size=bs, shuffle=False)

# define mini train and valid data loaders
mini_train_dataloader = torch_DataLoader(mini_train, batch_size=bs, sampler=ImbalancedDatasetSampler(train_dataset),
                                    shuffle=False)
mini_valid_dataloader = torch_DataLoader(mini_valid, batch_size=bs, shuffle=False)

from Trainer import Trainer
# start mini_train for five epochs
print('开始使用小图片训练')
mini_trainer = Trainer(cnn, mini_train_dataloader, mini_valid_dataloader, epochs=15)
mini_trainer.train_epochs()
# real start
print("开始使用标准大小图片训练")
Exemplo n.º 7
0
with open(os.path.join(args.log_dir, 'command.sh'), 'w') as f:
    f.write(' '.join(sys.argv))
    f.write('\n')

tr_data_len = None
val_data_len = None
if args.debug:
    tr_data_len = 1000
    val_data_len = 100
train_dataset = SIIMDataset(subset='train', augs=train_transform, preprocessing=preprocessing_transform, 
                            img_size=args.img_size, folds_dir=args.folds_dir, fold_id=args.fold_id, data_len=tr_data_len)
valid_dataset = SIIMDataset(subset='valid', augs=valid_transform, preprocessing=preprocessing_transform, 
                            img_size=args.img_size, folds_dir=args.folds_dir, fold_id=args.fold_id, data_len=val_data_len)


train_dataloader = DataLoader(train_dataset, sampler=ImbalancedDatasetSampler(train_dataset), batch_size=args.batch_size,
                              num_workers=args.num_workers, drop_last=True)
valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=args.batch_size,
                              num_workers=args.num_workers, drop_last=True)


if args.freeze:
    for param in model.encoder.parameters():
        param.requires_grad = False

# for name, param in model.encoder.named_parameters():
#     print(name)
# exit()
#parameters = [p for p in model.parameters() if p.requires_grad]
parameters = [
                {'params': [p for name, p in model.encoder.named_parameters() if 'layer1' in name], 'lr': args.lr/10000},
Exemplo n.º 8
0
def train_Varmole(genotype,
                  gene_expression,
                  phenotype,
                  GRN,
                  eQTL,
                  H2,
                  H3,
                  BS,
                  L1REG,
                  Opt,
                  LR,
                  L2REG=None,
                  epoch,
                  device='cpu'):
    device = device

    obs, label, adj = load_data(gene_expression, genotype, phenotype, eQTL,
                                GRN)
    # obs = pd.read_pickle('/gpfs/scratch/ndnguyen/Varmole/new_data/obs_allSamples.pkl')#.sort_index()
    # label = pd.read_pickle('data/label_allSamples.pkl')
    # adj = sp.sparse.load_npz('/gpfs/scratch/ndnguyen/Varmole/new_data/adj_diag.npz')#.sort_index()

    X_train, X_test, y_train, y_test = train_test_split(obs.values.T,
                                                        np.reshape(
                                                            label.values,
                                                            (-1, 1)),
                                                        test_size=0.20,
                                                        random_state=73)

    X_train, X_val, y_train, y_val = train_test_split(X_train,
                                                      y_train,
                                                      test_size=0.20,
                                                      random_state=73)

    X_train, y_train, X_val, y_val = map(torch.tensor,
                                         (X_train, y_train, X_val, y_val))

    BS = BS

    train_ds = TensorDataset(X_train, y_train)
    val_ds = TensorDataset(X_val, y_val)

    train_dl = DataLoader(dataset=train_ds,
                          sampler=ImbalancedDatasetSampler(train_ds),
                          batch_size=BS)
    val_dl = DataLoader(dataset=val_ds, batch_size=BS * 2)

    train_dl = WrappedDataLoader(train_dl, preprocess)
    val_dl = WrappedDataLoader(val_dl, preprocess)

    D_in, H1, H2, H3, D_out = X_train.shape[1], adj.shape[1], H2, H3, 1
    a = torch.from_numpy(adj.todense()).float().to(device)
    model = Net(a, D_in, H1, H2, H3, D_out).to(device)

    L1REG = L1REG
    loss_fn = nn.BCELoss()

    LR = LR
    weight_decay = L2REG
    opt = opt(model.parameters(), lr=LR, weight_decay=L2REG)

    epochs = epoch
    train_loss, train_accuracy, val_loss, val_accuracy = fit(
        epochs, model, loss_fn, opt, train_dl, val_dl)

    # fig, ax = plt.subplots(2, 1, figsize=(12,8))

    # ax[0].plot(train_loss)
    # ax[0].plot(val_loss)
    # ax[0].set_ylabel('Loss')
    # ax[0].set_title('Training Loss')

    # ax[1].plot(train_accuracy)
    # ax[1].plot(val_accuracy)
    # ax[1].set_ylabel('Classification Accuracy')
    # ax[1].set_title('Training Accuracy')

    # plt.tight_layout()
    # plt.show()

    with torch.no_grad():
        x_tensor_test = torch.from_numpy(X_test).float().to(device)
        model.eval()
        yhat = model(x_tensor_test)
        y_hat_class = np.where(yhat.cpu().numpy() < 0.5, 0, 1)
        test_accuracy = balanced_accuracy_score(y_test.reshape(-1, 1),
                                                y_hat_class)
    print("Test Accuracy {:.2f}".format(test_accuracy))
    return test_accuracy
Exemplo n.º 9
0
def data_preparing(config):
    """1. data preparing """
    data_source = config['target_data']
    if data_source == 'tsx':
        data_train_transform = transform_data.data_transform_dict['tsx_train']
        data_test_transform = transform_data.data_transform_dict['tsx_test']
        data_transforms = {
            'train': data_train_transform(rescale_size=160, crop_size=128, channel=1),
            'val': data_test_transform(rescale_size=160, crop_size=128, channel=1)
        }

        dataloaders = {}
        image_dataset = {
            x: tsx_dataset.TSX_Dataset(txt_file=config['data_txt'][x], root_dir='D:/hzl/data/', data_type='npy',
                                       transform=data_transforms[x])
            for x in ['train', 'val']
        }
        dataloaders['train'] = DataLoader(image_dataset['train'],
                                          batch_size=config['batchsize']['train'],
                                          # shuffle=True,
                                          sampler=ImbalancedDatasetSampler(image_dataset['train']),
                                          num_workers=0)
        dataloaders['val'] = DataLoader(image_dataset['val'],
                                        batch_size=config['batchsize']['val'],
                                        shuffle=True,
                                        num_workers=0)
        return dataloaders

    elif data_source == 'mstar':
        data_train_transform = transform_data.data_transform_dict['mstar_train']
        data_test_transform = transform_data.data_transform_dict['mstar_test']
        data_transforms = {
            'train': data_train_transform(rescale_size=128, crop_size=128, channel=3),
            'val': data_test_transform(rescale_size=128, crop_size=128, channel=3)
        }
        image_dataset = {
            x: tsx_dataset.Target_Dataset(txt_file=config['data_txt'][x], transform=data_transforms[x])
            for x in ['train', 'val']
        }
        return {
                x: DataLoader(image_dataset[x],
                              batch_size=config['batchsize'][x],
                              shuffle=True,
                              num_workers=0)
                for x in ['train', 'val']
        }

    elif data_source == 'opensar':
        data_transform_train_test = transform_data.data_transform_dict['opensar']
        data_transforms = {
            x: data_transform_train_test(channel=1)
            for x in ['train', 'val']
        }
        image_dataset = {
            x: tsx_dataset.Target_Dataset(txt_file=config['data_txt'][x], transform=data_transforms[x])
            for x in ['train', 'val']
        }
        return {
                x: DataLoader(image_dataset[x],
                              batch_size=config['batchsize'][x],
                              shuffle=True,
                              num_workers=0)
                for x in ['train', 'val']
        }
def main():
    global args, best_prec1
    args = parser.parse_args()

    print('img_dir:', args.img_dir)
    print('end2end?:', args.end2end)
    args_img_dir = '/media/sdd/kwang/affectnet/fly_part_affectnet/train/'
    #args_img_dir = '/media/sdb/kwang/attention_center_crop_range_part_face/train/'
    # load data and prepare dataset
    train_list_file = 'train_affectnet8part_list.txt'
    train_label_file = 'train_affectnet8part_label.txt'
    #train_list_file = '/home/kwang/AAAI/attention_part_of_ferplus/ferplus_8/ferplus_train_center_crop_list.txt'
    #train_label_file = '/home/kwang/AAAI/attention_part_of_ferplus/ferplus_8/ferplus_train_center_crop_label.txt'
    #train_list_file = '/home/kwang/AAAI/attention_part_of_ferplus/ferplus_8/ferplus_train_center_crop_range_list.txt'
    #train_label_file = '/home/kwang/AAAI/attention_part_of_ferplus/ferplus_8/ferplus_train_center_crop_range_label.txt'
    caffe_crop = CaffeCrop('train')
    train_dataset = MsCelebDataset(
        args_img_dir, train_list_file, train_label_file,
        transforms.Compose([caffe_crop, transforms.ToTensor()]))

    args_img_dir_val = '/media/sdd/kwang/affectnet/fly_part_affectnet/val/'
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        sampler=ImbalancedDatasetSampler(train_dataset),
        batch_size=args.batch_size,
        num_workers=args.workers,
        pin_memory=True)
    caffe_crop = CaffeCrop('test')
    val_list_file = 'val_affectnet8part_list.txt'
    val_label_file = 'val_affectnet8part_label.txt'
    #val_list_file = '/home/kwang/AAAI/attention_part_of_ferplus/ferplus_8/ferplus_val_center_crop_list.txt'
    #val_label_file = '/home/kwang/AAAI/attention_part_of_ferplus/ferplus_8/ferplus_val_center_crop_label.txt'
    val_dataset = MsCelebDataset(
        args_img_dir_val, val_list_file, val_label_file,
        transforms.Compose([caffe_crop, transforms.ToTensor()]))
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        sampler=ImbalancedDatasetSampler(val_dataset),
        batch_size=args.batch_size_t,
        num_workers=args.workers,
        pin_memory=True)

    #assert(train_dataset.max_label == val_dataset.max_label)

    # prepare model
    model = None
    assert (args.arch in ['resnet18', 'resnet34', 'resnet101'])
    if args.arch == 'resnet18':
        #model = Res()
        model = resnet18(end2end=args.end2end)
#     model = resnet18(pretrained=False, nverts=nverts_var,faces=faces_var,shapeMU=shapeMU_var,shapePC=shapePC_var,num_classes=class_num, end2end=args.end2end)
    if args.arch == 'resnet34':
        model = resnet34(end2end=args.end2end)
    if args.arch == 'resnet101':
        pass
    #    model = resnet101(pretrained=False,nverts=nverts_var,faces=faces_var,shapeMU=shapeMU_var,shapePC=shapePC_var, num_classes=class_num, end2end=args.end2end)


#    for param in model.parameters():
#        param.requires_grad = False

# for name, p in model.named_parameters():
#      if not ( 'fc' in name or 'alpha' in name or 'beta' in name):

#          p.requires_grad = False
#      else:
#          print 'updating layer :',name
#          print p.requires_grad

#    for param_flow in model.module.resnet18_optical_flow.parameters():
#        param_flow.requires_grad =True
    model = torch.nn.DataParallel(model).cuda()
    #model.module.theta.requires_grad = True

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    criterion1 = part_attention.MyLoss().cuda()
    #criterion=Cross_Entropy_Sample_Weight.CrossEntropyLoss_weight().cuda()
    optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                       model.parameters()),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    #    optimizer = torch.optim.SGD(model.parameters(), args.lr,
    #                                momentum=args.momentum,
    #                                weight_decay=args.weight_decay)
    # optionally resume from a checkpoint

    if args.pretrained:

        #checkpoint = torch.load('../../Data/Model/resnet18_ms1m_ferplus_88.pth.tar')
        checkpoint = torch.load(
            '/media/sdd/kwang/affectnet/naive_train/ijba_res18_naive.pth.tar')

        pretrained_state_dict = checkpoint['state_dict']
        model_state_dict = model.state_dict()

        for key in pretrained_state_dict:
            if ((key == 'module.fc.weight') | (key == 'module.fc.bias')):

                #if  ((key=='module.fc.weight')|(key=='module.fc.bias')|(key == 'module.feature.weight')|(key == 'module.feature.bias')):
                pass
            else:
                model_state_dict[key] = pretrained_state_dict[key]

        model.load_state_dict(model_state_dict, strict=False)

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True
    print('args.evaluate', args.evaluate)
    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, criterion1, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion, criterion1)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1

        best_prec1 = max(prec1.cuda()[0], best_prec1)

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best[0])
Exemplo n.º 11
0
    train_count_dict = {}
    for i in range(cate_num):
        train_count_dict[i] = len(
            dataset['train'].data.loc[dataset['train'].data['label'] == i])

    loss_weight = [
        (1.0 -
         float(train_count_dict[i]) / float(sum(train_count_dict.values()))) *
        cate_num / (cate_num - 1) for i in range(cate_num)
    ]

    dataloaders = {}
    dataloaders['train'] = DataLoader(dataset['train'],
                                      batch_size=batch_size['train'],
                                      sampler=ImbalancedDatasetSampler(
                                          dataset['train']),
                                      num_workers=0)
    dataloaders['val'] = DataLoader(dataset['val'],
                                    batch_size=batch_size['val'],
                                    shuffle=True,
                                    num_workers=0)

    img_model = torch.load('../model/tsx.pth')
    # spe_model = torch.load('../model/slc_spexy_cae_2.pth')
    net_joint = network.SLC_joint2(cate_num)
    net_joint = get_pretrained(img_model, net_joint)
    # net_joint.load_state_dict(torch.load('../model/slc_joint_deeper_' + str(datasetnum) + '_F_con.pth'))

    net_joint.to(device)

    epoch_num = 7000
Exemplo n.º 12
0
    def __len__(self):
        return self.N

if __name__ == "__main__":
    conf = EasyDict()
    conf.workspace = 'workspace/imbalanced_sampler_cross_entropy'
    conf.device = 'cuda'
    conf.max_epochs = 100
    
    log = logger(conf.workspace)
    model = FCN()
    #loss_function = FocalLoss()
    loss_function = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters())
    scheduler = lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)

    metrics = [ClassificationMeter(2),]
    train_dataset = BiasedDataset(1000, [0.9, 0.1])
    test_dataset = BiasedDataset(1000, [0.5, 0.5])
    loaders = {
            "train": data.DataLoader(train_dataset, sampler=ImbalancedDatasetSampler(train_dataset)),
            "test": data.DataLoader(test_dataset),
            }


    trainer = Trainer(conf, model, optimizer, scheduler, loss_function, loaders, log, metrics)
    trainer.train()
    trainer.evaluate()

Exemplo n.º 13
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)

    # Hyperparams
    batch_size = 32
    epochs = 30
    input_size = (224,) * 2
    weight_decay = 1e-5
    mixup = False
    print(
        f"Batch size: {batch_size}, input size: {input_size}, wd: {weight_decay}, mixup: {mixup}"
    )

    # Create datasets
    train_indices = np.load("../../data/interim/train_indices.npy")
    test_indices = np.load("../../data/interim/test_indices.npy")
    # valid_indices = np.load("../../data/interim/valid_indices.npy")

    # Merge train and test
    # train_indices = np.concatenate((train_indices, test_indices))
    # test_indices = valid_indices

    # Make sure there's no overlap
    assert not set(train_indices) & set(test_indices)

    # Transforms
    train_set_transforms = transforms.Compose(
        [
            transforms.Grayscale(),
            transforms.Resize(input_size),
            transforms.RandomAffine(
                degrees=180,
                # translate=(0.05, 0.05),
                # scale=(0.95, 1.05),
                resample=2,
                fillcolor=0,
            ),
            # transforms.ColorJitter(
            #     brightness=0.20, contrast=0.15, saturation=0.15, hue=0.04
            # ),
            transforms.RandomVerticalFlip(),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
        ]
    )
    test_set_transforms = transforms.Compose(
        [transforms.Grayscale(), transforms.Resize(input_size), transforms.ToTensor()]
    )

    # Datasets
    train_set = torch.utils.data.Subset(
        datasets.ImageFolder(
            "../../data/interim/TrainingSetImagesDirDuplicatesRemovedClasses/",
            train_set_transforms,
        ),
        train_indices,
    )
    test_set = torch.utils.data.Subset(
        datasets.ImageFolder(
            "../../data/interim/TrainingSetImagesDirDuplicatesRemovedClasses/",
            test_set_transforms,
        ),
        test_indices,
    )
    print("Training set size: ", len(train_set))
    print("Test set size : ", len(test_set))
    print("Total: ", len(train_set) + len(test_set))

    # Dataloaders
    train_loader = torch.utils.data.DataLoader(
        dataset=train_set,
        batch_size=batch_size,
        shuffle=False,  # shuffling is handled by sampler
        sampler=ImbalancedDatasetSampler(train_set, train_indices),
        num_workers=4,
        pin_memory=True,
    )

    test_loader = torch.utils.data.DataLoader(
        dataset=test_set, batch_size=128, shuffle=False, num_workers=4, pin_memory=True
    )

    # Distribution of classes in the test set
    label_counter = {0: 0, 1: 0, 2: 0, 3: 0}
    for indice in test_set.indices:
        label = test_set.dataset.imgs[indice][1]
        label_counter[label] += 1
    weights = dict(sorted(label_counter.items())).values()  # sort by keys
    weights = torch.FloatTensor([1.0 / x for x in weights]).to(device)
    weights = weights / torch.sum(weights)
    print("Class weights: ", weights)

    model = Model(pretrained=True, num_classes=4).to(device)
    print(Model.__name__)

    # Train first and last layer for one epoch
    # model_params = [*model.parameters()]
    # optimizer = torch.optim.SGD(
    #     [model_params[0], model_params[-1]],
    #     lr=1e-2,
    #     momentum=0.9,
    #     weight_decay=weight_decay,
    # )
    # train(model, device, train_loader, optimizer, -1)

    optimizer = torch.optim.SGD(
        model.parameters(), lr=5e-4, momentum=0.9, weight_decay=weight_decay
    )
    print("Optimizer: ", optimizer.__class__.__name__)

    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=[5], gamma=0.1
    )

    train_loss_history = list()
    test_loss_history = list()
    acc_history = list()

    for epoch in range(1, epochs + 1):
        print("################## EPOCH {}/{} ##################".format(epoch, epochs))

        for param_group in optimizer.param_groups:
            print("Current learning rate:", param_group["lr"])

        train_loss = train(model, device, train_loader, optimizer, epoch, mixup=mixup)
        test_loss, acc = validate(model, device, test_loader, weights)

        scheduler.step()

        # Save model
        if epoch > 1 and (test_loss < min(test_loss_history) or acc > max(acc_history)):
            checkpoint(
                model,
                test_loss,
                acc,
                optimizer,
                epoch,
                input_size,
                weight_decay,
                mixup,
                infos="",
            )

        train_loss_history.append(train_loss)
        test_loss_history.append(test_loss)
        acc_history.append(acc)

        # Save history at each epoch (overwrite previous history)
        history = [train_loss_history, test_loss_history, acc_history]
        np.save("history.npy", np.array(history))