Exemple #1
0
def main():
    gpu_num = torch.cuda.device_count()
    train_loader = data_utils.DataLoader(dataset=DataProvider(),
                                         batch_size=60 * gpu_num,
                                         num_workers=18,
                                         worker_init_fn=worker_init_fn)
    val_loader = data_utils.DataLoader(dataset=DataProvider(val=True),
                                       batch_size=60 * gpu_num,
                                       num_workers=18,
                                       worker_init_fn=worker_init_fn)
    best_acc = 0
    model = InceptionV3().cuda()
    model = nn.DataParallel(model)
    # optimizer = optim.Adam(model.module.parameters(), lr=1e-4)
    optimizer = optim.RMSprop(
        model.module.parameters(),
        lr=0.05 / 2,
        alpha=0.9,
        eps=1.0,
        momentum=0.9,
    )  # weight_decay=0.5)

    criterion = nn.CrossEntropyLoss()
    start_epoch = 0
    resume = 'model_best.pth.tar'
    if os.path.isfile(resume):
        checkpoint = torch.load(resume)
        start_epoch = checkpoint['epoch']
        best_acc = checkpoint['best_acc']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            resume, checkpoint['epoch']))

    lr_scheduler = optim.lr_scheduler.StepLR(
        optimizer,
        step_size=int(2e6 / len(train_loader)),
        gamma=0.5,
    )
    # last_epoch=start_epoch)
    for epoch in range(start_epoch, 500):
        lr_scheduler.step()
        np.random.seed()
        train(model, optimizer, criterion, train_loader, epoch)
        acc = val(model, criterion, val_loader, epoch)
        is_best = acc > best_acc
        best_acc = max(acc, best_acc)
        if epoch % 2 == 0:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    # 'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc': best_acc,
                    'optimizer': optimizer.state_dict(),
                },
                is_best)
Exemple #2
0
def main():
    train_loader = data_utils.DataLoader(dataset=DataProvider(),
                                         batch_size=120,
                                         num_workers=18,
                                         worker_init_fn=worker_init_fn)
    val_loader = data_utils.DataLoader(dataset=DataProvider(val=True),
                                       batch_size=120,
                                       num_workers=18,
                                       worker_init_fn=worker_init_fn)
    best_acc = 0
    model = InceptionV3().cuda()
    model = nn.DataParallel(model)
    # optimizer = optim.Adam(model.module.parameters(), lr=1e-4)
    optimizer = optim.RMSprop(model.module.parameters(),
                              lr=0.05 / 2,
                              momentum=0.9,
                              weight_decay=0.5)

    criterion = nn.CrossEntropyLoss()
    start_epoch = 0
    resume = 'model_best.pth.tar'
    if os.path.isfile(resume):
        checkpoint = torch.load(resume)
        start_epoch = checkpoint['epoch']
        best_acc = checkpoint['best_acc']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            resume, checkpoint['epoch']))

    lr_scheduler = optim.lr_scheduler.StepLR(
        optimizer,
        step_size=int(2e6 / len(train_loader)),
        gamma=0.5,
    )
    # last_epoch=start_epoch)
    for epoch in range(start_epoch, 500):
        lr_scheduler.step()
        np.random.seed()
        # train(model, optimizer, criterion, train_loader, epoch)
        val(model, criterion, val_loader, epoch)
Exemple #3
0
def main():
    train_loader = data_utils.DataLoader(dataset=DataProvider(),
                                         batch_size=200,
                                         num_workers=20,
                                         worker_init_fn=worker_init_fn)
    val_loader = data_utils.DataLoader(dataset=DataProvider(val=True),
                                       batch_size=200,
                                       num_workers=20,
                                       worker_init_fn=worker_init_fn)
    best_acc = 0
    model = VGG_FCN().cuda()
    model = nn.DataParallel(model)
    optimizer = optim.Adam(model.module.parameters(), lr=1e-4)
    criterion = nn.CrossEntropyLoss()

    resume = 'model_best.pth.tar'
    if os.path.isfile(resume):
        checkpoint = torch.load(resume)
        start_epoch = checkpoint['epoch']
        best_acc1 = checkpoint['best_acc1']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            resume, checkpoint['epoch']))

    for epoch in range(500):
        np.random.seed()
        train(model, optimizer, criterion, train_loader, epoch)
        acc = val(model, criterion, val_loader, epoch)
        is_best = acc > best_acc
        best_acc = max(acc, best_acc)
        if epoch % 2 == 0:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    # 'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc,
                    'optimizer': optimizer.state_dict(),
                },
                is_best)
Exemple #4
0
# np.random.seed(100)
np.random.shuffle(data_list)

# 5-fold cross validation
n_fold = 5
fold_size = len(data_list) // n_fold

overall_acc = []
for fold in range(n_fold):
    fold_output_path = output_path + '/fold_' + str(fold)
    # divide data for 5 fold, each fold contains 300 data
    # every iteration, 1200 data for training, 300 data for validation
    valid_list = data_list[fold * fold_size:(fold + 1) * fold_size]
    train_list = [d for d in data_list if d not in valid_list]

    train_provider = DataProvider(train_list, is_shuffle=True)
    valid_provider = DataProvider(valid_list, is_shuffle=False)

    # build network
    net = VGG19(15, retrain=True, weights=None)

    # select optimizer
    optimizer = tf.keras.optimizers.Adam(learning_rate)

    # loss function (cross entropy)
    loss_function = tf.nn.softmax_cross_entropy_with_logits

    # check point saver
    ckpt = tf.train.Checkpoint(net=net)

    # calculate iterations each epoch
Exemple #5
0
from data_loader import DataProvider
from model import ResNet50, VGG19
from dict_map import ClassMap

# hyper-parameter setting
output_path = 'pin/results/'

eval_batch_size = 15

# load data
data_path = 'pin/testing/*.jpg'
data_list = glob.glob(data_path)
assert len(data_list) % eval_batch_size == 0, 'Wrong batch size'

test_provider = DataProvider(data_list, need_labels=False, is_shuffle=False)

# build network
net = VGG19(15, retrain=True)

# check point
ckpt = tf.train.Checkpoint(net=net)
ckpt.restore(output_path + '/ckpt/final')

# evaluation on test set
preds = None
for i in range(test_provider.size() // eval_batch_size):
    xs, ys = test_provider(eval_batch_size)
    logits = net(xs)
    sub_preds = tf.nn.softmax(logits)
    sub_preds = np.argmax(sub_preds, -1)