Exemple #1
0
    def __init__(self, config):

        self.device = config["device"]

        self.model = ConvNet(num_classes=config["num-classes"])
        self.model.to(self.device)

        if config["resume"]:
            print("> Loading Checkpoint")
            self.model.load_state_dict(T.load(config["load-path"]))

        self.train_loader, self.val_loader = get_train_valid_loader(
            config["data-path"], config["num-classes"], config["batch-size"],
            config["val-batch-size"], config["augment"], config["seed"],
            config["valid-size"], config["shuffle"], config["num-workers"])

        self.test_loader = get_test_loader(
            config["data-path"], config["num-classes"], config["batch-size"],
            config["shuffle"], config["num-workers"], config["pin-memory"])

        self.criterion = nn.CrossEntropyLoss()
        self.optim = T.optim.AdamW(self.model.parameters(),
                                   lr=config["lr-init"],
                                   weight_decay=config["weight-decay"])

        self.writer = SummaryWriter(
            log_dir=os.path.join("logs", config["run-title"]))
        self.reduce_lr = T.optim.lr_scheduler.ReduceLROnPlateau(
            self.optim,
            factor=config["lr-factor"],
            patience=config["lr-patience"],
            min_lr=config["lr-min"])

        self.stopping_patience = config["stopping-patience"]
        self.stopping_delta = config["stopping-delta"]

        self.filepath = os.path.join(config["save-path"], config["run-title"],
                                     config["run-title"] + ".pt")
Exemple #2
0
            num_workers=4,
            batch_size=args.b,
        )

        test_loader = torch.utils.data.DataLoader(
            dataset=FGVCAircraft(
                root="/home/kanchanaranasinghe/data/raw/fgvc-aircraft-2013b",
                train=False,
                transform=transform_test),
            shuffle=True,
            num_workers=4,
            batch_size=args.b,
        )

    else:
        training_loader, validation_loader = get_train_valid_loader(
            num_workers=4, batch_size=args.b, shuffle=True, valid_size=0.1)

        test_loader = get_test_dataloader(settings.CIFAR100_TRAIN_MEAN,
                                          settings.CIFAR100_TRAIN_STD,
                                          num_workers=4,
                                          batch_size=args.b,
                                          shuffle=True,
                                          imbalance=args.imbalance)

    loss_function = nn.CrossEntropyLoss()
    params = net.parameters()
    if args.opl:
        aux_loss = OrthogonalProjectionLoss(no_norm=False,
                                            use_attention=False,
                                            gamma=args.opl_gamma)
        # aux_loss = OLELoss()
Exemple #3
0
    os.makedirs(args.save)

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

if args.valid:
    valid_len = 60000
else:
    valid_len = 0

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader, valid_loader = get_train_valid_loader(data_dir='./data.svhn',
                                                    batch_size=args.batch_size,
                                                    augment=True,
                                                    random_seed=args.seed,
                                                    valid_len=valid_len,
                                                    shuffle=True,
                                                    show_sample=False,
                                                    **kwargs)
test_loader = get_test_loader(data_dir='./data.svhn',
                              batch_size=args.batch_size,
                              shuffle=True,
                              **kwargs)

last_prec1 = 0
model = None
cfg = None
if args.model:
    if os.path.isfile(args.model):
        checkpoint = torch.load(args.model)
        cfg = checkpoint['cfg']
Exemple #4
0
    'blockwise': False,
    'hflip': True,
    'randomCrop': 4,
    'imageSize': 32,
    'randomcrop_type': 0,
    'widen_factor': 10,
    'nGPU': 1,
    'data_type': 'torch.CudaTensor',
    'seed': 444
}

torch.manual_seed(opt['seed'])

trainloader, validloader = get_train_valid_loader(root='./data',
                                                  batch_size=128,
                                                  augment=True,
                                                  random_seed=opt['seed'],
                                                  num_workers=4,
                                                  pin_memory=True)

Normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])

test_transform = transforms.Compose([transforms.ToTensor(), Normalize])

testset = torchvision.datasets.CIFAR10(root='./data',
                                       train=False,
                                       download=True,
                                       transform=test_transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=128,
                                         shuffle=False,
                                         num_workers=4)
Exemple #5
0
    batch_size = target.size(0)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res
#%%
train_loader, val_loader, train_dataset = utils.get_train_valid_loader(args.train_dir,
        batch_size=args.batch_size, crop_size=args.crop_size,
        augment=args.augment, random_seed=111,
        shuffle=True, valid_size=args.valid_size,
        filtering=args.filtering, 
        num_channels=args.num_channels,
        l2_loss=args.l2_loss, same_crop=args.same_crop,
        num_workers=args.num_workers)
test_loader, test_dataset = utils.get_test_loader(TEST_DIR, batch_size=args.batch_size, crop_size=args.crop_size, filtering=args.filtering, num_channels=args.num_channels, l2_loss=args.l2_loss, num_workers=args.num_workers)
#val_loader, val_dataset = utils.get_val_loader(VAL_DIR, batch_size=args.batch_size, crop_size=args.crop_size, filtering=args.filtering, num_channels=args.num_channels, l2_loss=args.l2_loss)
print(train_dataset.classes)


print(args)
if args.arch.startswith('my'):
    model = myresnet.ResNet18()
else:
    #original_model = models.resnet101(pretrained=True)
    original_model = globals()[args.arch](pretrained=args.pretrained)
    if args.finetune:
Exemple #6
0
from net import Net

import os
import time
import argparse

parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--use_cuda', type=bool, help="Whether to use cuda")
args = parser.parse_args()

if args.use_cuda:
    use_cuda = True
else:
    use_cuda = False

trainloader, validloader = get_train_valid_loader()
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

net = VGG()
if use_cuda:
    net = nn.DataParallel(net)
    net.cuda()

criterion = nn.CrossEntropyLoss()
#optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
optimizer = optim.Adam(net.parameters())


# Training
def train(epoch):