def main(args):

    model = resnet.ResNet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3],
                          args.num_classes)
    saved_state_dict = torch.load(args.saved_model)

    transformations = transforms.Compose([
        transforms.Resize((args.image_size, args.image_size)),
        transforms.ToTensor()
    ])

    if args.gpu[0] >= 0:
        cudnn.enabled = True
        softmax = nn.Softmax().cuda()
        model.cuda()
    else:
        softmax = nn.Softmax()

    load_filtered_state_dict(model,
                             saved_state_dict,
                             ignore_layer=[],
                             reverse=True)

    test_x, test_y, classes_names = get_dataset(args.test_data_dir)
    test_dataset = DataWrapper(test_x,
                               test_y,
                               transformations,
                               augumentation=False)
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=1)

    classes, indices = np.unique(test_y, return_index=True)

    #n = (test_dataset.__len__() + args.batch_size - 1) / args.batch_size * args.batch_size
    n = test_dataset.__len__()

    y_pred = np.zeros((n))
    y = np.zeros((n))
    count = 0
    for i, (images, labels, names) in enumerate(test_loader):
        images = Variable(images)
        labels = Variable(labels)
        if args.gpu[0] >= 0:
            images = images.cuda()
            labels = labels.cuda()

        label_pred = model(images)
        label_pred = softmax(label_pred)

        n = images.size()[0]
        _, label_pred = label_pred.topk(1, 1, True, True)
        y_pred[count:count + n] = label_pred.view(-1).cpu().numpy()
        y[count:count + n] = labels.data.cpu().numpy()
        count += n

    plot(y, y_pred, classes_names)
Beispiel #2
0
def main(args):
    cudnn.enabled = True

    print('Loading data.')

    transformations = transforms.Compose([
        transforms.Resize(240),
        transforms.RandomCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    train_x, train_y, classes_names = get_dataset(args.trainning_data_dir)
    test_x, test_y, _ = get_dataset(args.validation_data_dir)
    num_classes = len(classes_names)

    trainning_dataset = DataWrapper(train_x, train_y, transformations)
    eval_dataset = DataWrapper(test_x, test_y, transformations)

    train_loader = torch.utils.data.DataLoader(dataset=trainning_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=16)

    eval_loader = torch.utils.data.DataLoader(dataset=eval_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=16)
    n = trainning_dataset.__len__()
    print(n)

    # ResNet50 structure
    model = resnet.ResNet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3],
                          num_classes)
    if args.saved_model:
        print('Loading model.')
        saved_state_dict = torch.load(args.saved_model)

        # 'origin model from pytorch'
        if 'resnet' in args.saved_model:
            load_filtered_state_dict(model,
                                     saved_state_dict,
                                     ignore_layer=[],
                                     reverse=False)
        else:
            load_filtered_state_dict(model, saved_state_dict, ignore_layer=[])

    crossEntropyLoss = nn.CrossEntropyLoss().cuda()
    softmax = nn.Softmax().cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    # multi-gpu
    model = nn.DataParallel(model, device_ids=[0, 1])
    model.cuda()

    Save_model = SaveBestModel(save_dir=args.save_path)
    Writer = SummaryWriter()
    step = 0
    for epoch in range(args.num_epochs):
        evaluate(eval_loader, model, Writer, step, Save_model, epoch)
        step = train(train_loader, model, crossEntropyLoss, optimizer, Writer,
                     args.batch_size, epoch, step, n)