Esempio n. 1
0
def evaluate_classifier(weights_path):
    train_loader, val_loader = dataset.get_classifier_dataloaders(
        augment=False, batch_size=10, train_shuffle=False)
    test_loader = dataset.get_test_dataloader(batch_size=10, num_channels=3)
    model = torchvision.models.resnet34().cuda()
    D = torch.load(weights_path)
    model.load_state_dict(D)
    weights_dir = os.path.dirname(weights_path)
    model.eval()

    for name, loader in zip(['training', 'validation', 'test'],
                            [train_loader, val_loader, test_loader]):
        ret = {'label': []}
        for i, (image, _) in enumerate(loader):
            if i % 10 == 0:
                print('Processing batch %d' % i)
            output = model(image.cuda())
            output = output.detach().cpu()
            predictions = torch.argmax(output, dim=1)
            for prediction in predictions:
                prediction_np = np.asscalar(prediction.numpy())
                ret['label'].append(prediction_np)
            df = pd.DataFrame(ret)
            df.to_csv(weights_dir + '/%s_classifier_pred.csv' % name,
                      index=False)
Esempio n. 2
0
def evaluate_watershed(weights_path):
    '''
		Evaluates model on given dataset and saves images as .png. Also saves predicted of number of classes based on watershed predictions in a csv.
	'''
    for ds in ['train', 'validation', 'test']:
        if ds == 'train':
            data_loader, _ = dataset.get_dataloaders(
                batch_size=10,
                augment=False,
                skip_no_lenses_frames=False,
                train_shuffle=False)
        elif ds == 'validation':
            _, data_loader = dataset.get_dataloaders(
                batch_size=10,
                augment=False,
                skip_no_lenses_frames=False,
                train_shuffle=False)
        else:
            data_loader = dataset.get_test_dataloader(batch_size=10,
                                                      num_channels=1)
        model = unet.unet_model.UNet(1, NUM_CLASSES)
        D = torch.load(weights_path)
        model.load_state_dict(D)
        model = model.cuda()
        model.eval()
        weights_dir = os.path.dirname(weights_path)
        os.makedirs(weights_dir + '/%s_prediction/' % ds, exist_ok=True)

        ret = {n: [] for n in range(NUM_CLASSES)}
        img_no = -1
        for i, (image, _) in enumerate(data_loader):
            if i % 10 == 0:
                print('Processing batch %d' % i)
            output = model(image.cuda())
            output = output.detach().cpu()
            predictions = torch.argmax(output, dim=1)
            for prediction in predictions:
                img_no += 1
                prediction_np = prediction.numpy()
                pil = Image.fromarray(
                    (prediction_np * 255.0 /
                     np.clip(np.max(prediction_np), 1, None)).astype(np.uint8))
                pil.save(weights_dir + '/%s_prediction/%s-%s-prediction.png' %
                         (ds, ds, str(img_no).zfill(5)))
                for n in range(NUM_CLASSES):
                    ret[n].append(
                        watershed_to_num_lenses(prediction.numpy(), n))
        df = pd.DataFrame(ret)
        df.to_csv(os.path.dirname(weights_path) +
                  '/%s_watershed_pred.csv' % ds,
                  index=False)
        df = pd.DataFrame({'label': df[9]})
        df.to_csv(os.path.dirname(weights_path) + '/%s_pred.csv' % ds,
                  index=False)
Esempio n. 3
0
def test(l_list, t_list):
    start_time = time.time()
    test_loader = get_test_dataloader(args)
    for batch_index, (ims, labels) in enumerate(test_loader):
        if args.gpu:
            ims = ims.cuda()
            labels = labels.cuda()
        size_ratio = labels[:, -1]
        outs, att = model(ims, size_ratio)
        l_list = np.append(l_list, labels[:, 0:1].detach().cpu().numpy())
        t_list = np.append(t_list, outs.detach().cpu().numpy())

    end_time = time.time()
    time_cost = end_time - start_time
    return time_cost, l_list, t_list
Esempio n. 4
0
    #optimizer = torch.optim.Adam(model.parameters(),lr = args.lr, weight_decay = 1e-3)
    #optimizer = torch.optim.SGD(model.parameters(),lr = args.lr,momentum = 0.9,weight_decay=5e-4)
    optimizer = adabound.AdaBound(model.parameters(), lr=args.lr,
                                  final_lr=0.1)  #Adabound: Adaboost+ SGD
    criterion = RegressionLoss()
    tensorboard_dir = os.path.join(args.runs, args.name)
    writer = SummaryWriter(tensorboard_dir)
    checkpoint_path = os.path.join(args.checkpoints_dir, args.name)
    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
    checkpoints = os.path.join(checkpoint_path, '{net}-{epoch}-{type}.pth')

    best_criterion = -1
    lr = args.lr
    train_loader, val_loader = get_train_dataloader(args)
    test_loader = get_test_dataloader(args)

    for epoch in range(1, args.epochs + 1):

        y_l = np.array([])
        y_p = np.array([])
        for param_group in optimizer.param_groups:
            current_lr = param_group['lr']
        time_cost, y_l, y_p = train(epoch, y_l, y_p)
        print(
            "====================Epoch:{}==================== Learning Rate:{:.5f}"
            .format(epoch, current_lr))
        SROCC, KROCC, PLCC, RMSE, Acc = evaluate(y_l, y_p)
        writer.add_scalar('Train/SROCC', SROCC, epoch)
        print(
            "Training Results - Epoch: {}  Avg accuracy: {:.3f} RMSE: {:.5f}  SROCC: {:.5f} KROCC: {:.5f} PLCC: {:.5f} ***** Time Cost: {:.1f} s"
Esempio n. 5
0
    use_cuda = not args.use_cpu and torch.cuda.is_available()
    device = 'cuda' if use_cuda else 'cpu'
    bs = args.train_batch_size
    dataset_version = args.dataset_version
    eval_batch = 2 if dataset_version == 'flip' else (
        1 if dataset_version == 'single' else 6)

    train_dataloader = get_train_dataloader(args.data_dir,
                                            args.train_batch_size,
                                            dataset_version,
                                            shuffle=True,
                                            use_transforms=args.augmentations)
    test_dataloader = get_test_dataloader(args.data_dir,
                                          args.test_batch_size,
                                          dataset_version,
                                          shuffle=True,
                                          use_transforms=args.augmentations)

    metrics_train_dataloader = get_train_dataloader(args.data_dir,
                                                    eval_batch,
                                                    dataset_version,
                                                    shuffle=False,
                                                    use_transforms=False)
    metrics_test_dataloader = get_test_dataloader(args.data_dir,
                                                  eval_batch,
                                                  dataset_version,
                                                  shuffle=False,
                                                  use_transforms=False)

    model = UNet().to(device)
embeddings = []

if __name__ == '__main__':
    args = parse_args()
    print(args)

    use_cuda = not args.use_cpu and torch.cuda.is_available()
    device = 'cuda' if use_cuda else 'cpu'
    bs = args.train_batch_size
    best_acc = 0

    train_dataloader = get_train_dataloader(args.data_dir,
                                            args.train_batch_size,
                                            embedding=args.embedding)
    test_dataloader = get_test_dataloader(args.data_dir,
                                          args.train_batch_size,
                                          embedding=args.embedding)

    # metrics_train_dataloader = None # et_train_dataloader(args.data_dir, eval_batch, dataset_version, shuffle=False, use_transforms=False)
    # metrics_test_dataloader = None # get_test_dataloader(args.data_dir, eval_batch, dataset_version, shuffle=False, use_transforms=False)

    model = dispatch_model(args, device)

    wandb.init(project=args.project_name, name=args.run_name, config=args)
    wandb.watch(model, log='all')
    config = wandb.config

    loss_function = CrossEntropyLoss(reduction='mean')
    optimizer = dispatch_optimizer(model, args)
    lr_scheduler = dispatch_lr_scheduler(optimizer, args)
from models import SimpleModel
from training import warmup, dispatch_lr_scheduler, get_lr, dispatch_optimizer
from metrics import compute_accuracy, compute_confusion_matrix, compute_loss
from dataset import get_train_dataloader, get_test_dataloader
from utils import parse_args

if __name__ == '__main__':
    args = parse_args()
    use_cuda = not args.use_cpu and torch.cuda.is_available()
    device = 'cuda' if use_cuda else 'cpu'
    bs = args.train_batch_size

    train_dataloader = get_train_dataloader(
        os.path.join(args.data_dir, 'train/'), args.train_batch_size,
        args.augmentation)
    test_dataloader = get_test_dataloader(os.path.join(args.data_dir, 'test/'),
                                          args.test_batch_size)

    model = SimpleModel(use_bn=args.use_bn).to(device)

    wandb.init(project="classifying-celebrities", config=args)
    wandb.watch(model, log='all')
    config = wandb.config

    loss_function = CrossEntropyLoss(reduction='mean')
    optimizer = dispatch_optimizer(model, args)
    lr_scheduler = dispatch_lr_scheduler(optimizer, args)

    iteration = 0
    training_accuracy = compute_accuracy(model, train_dataloader, device)
    test_accuracy = compute_accuracy(model, test_dataloader, device)
    wandb.log({'training accuracy': training_accuracy}, step=iteration * bs)