Beispiel #1
0
def main():

    best_prec1 = 0
    model = MVConv()
    if use_gpu:
        model.cuda()
    print(model)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()
    print(lr, weight_decay)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr,
                                weight_decay=weight_decay)

    cudnn.benchmark = True

    transformed_train_dataset = ModelNetDataset(root_dir=train_data_root,
                                                phase='train',
                                                transform=transforms.Compose(
                                                    [ToTensor()]))

    transformed_valid_dataset = ModelNetDataset(root_dir=train_data_root,
                                                phase='test',
                                                transform=transforms.Compose(
                                                    [ToTensor()]))

    # Loading dataset into dataloader
    train_loader = torch.utils.data.DataLoader(transformed_train_dataset,
                                               batch_size=train_batch_size,
                                               shuffle=True,
                                               num_workers=num_workers)

    val_loader = torch.utils.data.DataLoader(transformed_valid_dataset,
                                             batch_size=test_batch_size,
                                             shuffle=True,
                                             num_workers=num_workers)

    start_time = time.time()

    # Train for all epochs between Startepoch, Endepoch
    for epoch in range(0, epochs):
        adjust_learning_rate(optimizer, epoch)

        # train on train dataset
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(model.state_dict(), is_best, trained_model_path)

    end_time = time.time()
    duration = (end_time - start_time) / 3600
    print("Duration:")
    print(duration)
Beispiel #2
0
def get_dataset(directory, img_size, data_size=None):
    if img_size == None:
        return Segmentation(directory, 'training.json', \
        transform = ToTensor(), data_size=data_size)
    else:
        return Segmentation(directory, 'training.json', \
        transform = Compose([ \
          RandomCrop((img_size, img_size)), \
          ToTensor()
        ]), data_size=data_size)
    def __init__(self):
        self.embedding_size = 512
        self.m = 3
        self.lambda_min = 5 #lambda下限
        self.lambda_max = 1000 #lambda 上限
        self.epochs = 20
        self.batch_size = 128
        self.optimizer = 'sgd'
        self.momentum = 0.9
        self.dampening = 0
        self.lr = 1e-1
        self.lr_decay = 0
        self.wd = 0.00001
        self.model_dir = './model/sgd/'
        self.final_dir = './final_model/sgd/'
        self.start = None
        self.resume = self.final_dir + 'net.pth'
        self.load_it = True
        self.it = None
        self.load_optimizer = True
        self.seed = 123456

        self.use_out = True
        self.use_embedding = True

        self.test_batch_size = 24
        self.transform = transforms.Compose([
            TruncatedInput(input_per_file=1),
            ToTensor(),
        ])
Beispiel #4
0
    def __init__(self):
        self.embedding_size = 1024
        self.m = 0.2
        self.s = 8

        self.samples_per_speaker = 8  #每个说话人采样数
        self.epochs = 20
        self.batch_size = 128
        self.optimizer = 'sgd'
        self.momentum = 0.9
        self.dampening = 0
        self.lr = 1e-1
        self.lr_decay = 0
        self.wd = 0.00001
        self.model_dir = './model/sgd/'
        self.final_dir = './final_model/sgd/'
        self.start = None
        self.resume = self.final_dir + 'net.pth'
        self.load_optimizer = True
        self.seed = 123456

        self.use_out = True
        self.use_embedding = True

        self.test_batch_size = 128
        self.transform = transforms.Compose([
            TruncatedInput(input_per_file=1),
            ToTensor(),
        ])
Beispiel #5
0
 def __init__(self, model_path):
     self.model = SiameseAlexNet()
     """服务器部署就改,去掉map_location"""
     if config.ubuntu:
         checkpoint = torch.load(model_path)
     else:
         checkpoint = torch.load(model_path, map_location="cpu")
     if "model" in checkpoint.keys():
         """服务器部署就改"""
         if config.ubuntu:
             self.model.load_state_dict(torch.load(model_path)["model"])
         else:
             self.model.load_state_dict(
                 torch.load(model_path, map_location="cpu")["model"])
     else:
         self.model.load_state_dict(torch.load(model_path))
     self.cuda = torch.cuda.is_available()
     self.device = torch.device('cuda:0,1' if self.cuda else 'cpu')
     self.model = self.model.to(self.device)
     self.model.eval()
     self.transforms = transforms.Compose([ToTensor()])
     valid_scope = config.valid_scope
     self.anchors = generate_anchors(config.total_stride,
                                     config.anchor_base_size,
                                     config.anchor_scales,
                                     config.anchor_ratios, valid_scope)
     ## np.outer()里面就是汉宁窗了,None增加了最前面的一个通道数量是1
     self.windows = np.tile(
         np.outer(np.hanning(config.score_size),
                  np.hanning(config.score_size))[None, :],
         [config.anchor_num, 1, 1]).flatten()  #从通道0的scope_map开始展开(17,17)
Beispiel #6
0
def get_dataset(directory, img_size, data_size=None):
  """
  Unlike the train file, this returns the intact image and label regardless
  of the img_size option. It is the work of the main function to crop the
  image and send it as input to the model and stitch back the output
  """
  return Segmentation(directory, 'training.json', \
    transform = ToTensor(), data_size=data_size)
Beispiel #7
0
    def __getitem__(self, index):
        # print(len(self.gt_box_positive))
        patch = self.patches[index]
        image = _load_image(self.data_images,
                            self.image_files[patch[5]],
                            bits=self.bits_per_channel,
                            mode=self.mode)

        trsf_crop = CropTransform(patch[0], patch[1], self.patch_size,
                                  self.patch_size)
        trsf_hflip = transforms.RandomHorizontalFlip(p=0.5)
        trsf_tensor = transforms.ToTensor() \
            if self.bits_per_channel == 8 else ToTensor()
        trsf_normalize = transforms.Normalize(self.mean, self.std)

        trsf_compose = [trsf_crop]
        if self.training is True:
            trsf_compose.append(trsf_hflip)
        trsf_compose.append(trsf_tensor)
        trsf_compose.append(trsf_normalize)

        data_transforms = transforms.Compose(trsf_compose)
        image = data_transforms(image)

        bboxes = self.gt_box_positive[index]

        pad_v = max(patch[3] - self.image_sizes[patch[5]][1], 0)
        pad_h = max(patch[2] - self.image_sizes[patch[5]][0], 0)

        mask = np.zeros((self.patch_size, self.patch_size), dtype=np.uint8)
        if pad_v > 0:
            mask[self.patch_size - int(pad_v) - 1:self.patch_size, :] = 255
        if pad_h > 0:
            mask[:, self.patch_size - int(pad_h) - 1:self.patch_size] = 255

        for b in bboxes:
            x1, y1, x2, y2, _, _ = self.gt_boxes[b]
            x1 = max(x1 - patch[0], 0)
            y1 = max(y1 - patch[1], 0)
            x2 = min(x2 - patch[0], self.patch_size)
            y2 = min(y2 - patch[1], self.patch_size)
            mask[y1:y2, x1:x2] = 127  # to test/visualize
        mask = Image.fromarray(mask, mode='L')

        if self.training is True:
            mask = trsf_hflip(mask)
        mask = torch.LongTensor(np.array(mask).astype(np.uint8))

        print("Image {} with patch grid of {} by {}".format(
            self.image_files[patch[5]], self.patch_grids[patch[5]][0],
            self.patch_grids[patch[5]][1]))
        return image, mask  # , patch
Beispiel #8
0
def vgg_eval_model(dataset_root_dir, restore_model: str):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("DEVICE WILL BE USED: ", device)

    net = VggNet(torchvision.models.vgg16_bn(True))
    net = net.to(device)

    classes = ('not hotdog', 'hotdog')

    if restore_model is not None and len(restore_model) > 0:
        # original saved file with DataParallel
        state_dict = torch.load(restore_model, map_location={'cuda:0': 'cpu'})
        net.load_state_dict(state_dict)
        print("Model {} restored".format(restore_model))
    else:
        print("ERROR: no restore model file found!")
        return

    #from torch.autograd import Variable
    #dummy_input = Variable(torch.randn(1, 3, 224, 224), requires_grad=True)
    # input_names = ["actual_input_1"] + ["learned_%d" % i for i in range(12)]
    # output_names = ["output1"]
    #torch.onnx.export(net, dummy_input, "vgg_hot_dog.onnx", export_params=True, verbose=True)
    #print("SUCCESS")

    hot_dog_dataset_test = HotDogsDatasetEval(
        root_dir=dataset_root_dir,
        transform=transforms.Compose([
            Rescale((224, 224)),  #normalize,
            ToTensor(),
        ]))
    test_dataloader = DataLoader(hot_dog_dataset_test,
                                 batch_size=4,
                                 shuffle=True,
                                 num_workers=4)

    for dl, type in zip([test_dataloader], ['test']):
        correct = 0
        total = 0
        with torch.no_grad():
            for data in dl:
                images, names = data['image'].float(), data['name']
                images = images.to(device)

                outputs = net(images)
                total += len(names)
                for id, prediction in enumerate(outputs.data):
                    res = torch.nn.functional.softmax(prediction, dim=0)
                    _, rid = torch.max(res, 0)
                    print('{} is {}'.format(names[id], classes[rid]))
                    imshow(torchvision.utils.make_grid(images[id]))
Beispiel #9
0
def test(n_class=1,
         in_channel=1,
         load=False,
         img_size=None,
         directory='../Data/train/'):
    global original, dataset, model
    original = Segmentation(directory, 'training.json', ToTensor())
    if img_size is None:
        dataset = Segmentation(directory, 'test.json', ToTensor())
    else:
        dataset = Segmentation(directory, 'test.json', T.Compose([\
          RandomCrop((img_size, img_size)), ToTensor()]))

    model = UNet(n_class=n_class, in_channel=in_channel)
    if load:
        filename = "unet.pth"
        map_location = 'cuda:0' if torch.cuda.is_available() else 'cpu'
        try:
            checkpoint = torch.load(filename, map_location=map_location)
            model.load_state_dict(checkpoint['state_dict'])
            print("Loaded saved model")
        except:
            print("Unable to load saved model")
def main():
    parser = argparse.ArgumentParser(
        description='Binary MRI Quality Classification')
    parser.add_argument('--yaml_path',
                        type=str,
                        metavar='YAML',
                        default="config/acdc_binary_classification.yaml",
                        help='Enter the path for the YAML config')
    args = parser.parse_args()

    yaml.add_constructor("!join", yaml_var_concat)

    yaml_path = args.yaml_path
    with open(yaml_path, 'r') as f:
        train_args = yaml.load(f, Loader=yaml.Loader)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    composed = transforms.Compose([
        Resize((224, 224)),
        OneToThreeDimension(),
        ToTensor(),
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    acdc_dataset = ACDCDataset(train_args["pos_samps_test"],
                               train_args["neg_samps_test"],
                               transform=composed)

    dataloader = DataLoader(acdc_dataset,
                            batch_size=train_args["batch_size"],
                            shuffle=False,
                            num_workers=4)
    dataset_size = len(acdc_dataset)

    model_ft = get_model(train_args["model"],
                         device,
                         pretrained=train_args["pretrained"])
    state = get_most_recent_model(train_args["model"],
                                  train_args["model_save_dir"])
    model_ft.load_state_dict(state)

    test(model_ft, dataloader, dataset_size, device=device)
def main(args):
	device = torch.device(args.device)
	#we first see if there is a model with needed name in our models folder,
	model = torch.load(f"{args.model_folder}/{args.model_name}", map_location=device)
	model.eval()


	data_transforms = transforms.Compose([ToTensor()])
	
	dataset = Data(args.filename_x, args.filename_y, args.data_root,transform=data_transforms)

	output = {"Super_resolution": []}


	for sample in dataset:
		lores = sample['x'].to(device).float()
		print(lores.shape)
		sures = model(lores.unsqueeze(0)).squeeze(0)
		output["Super_resolution"].append(sures.detach().cpu().data.numpy())
	
	savemat(f"{args.model_folder}/{args.filename_out}", output)
def main():
    """ DataLoader """
    train_data = PartAffordanceDataset(
        'train.csv',
        transform=transforms.Compose([CenterCrop(),
                                      ToTensor(),
                                      Normalize()]))

    test_data = PartAffordanceDataset(
        'test.csv',
        transform=transforms.Compose([CenterCrop(),
                                      ToTensor(),
                                      Normalize()]))

    train_loader = DataLoader(train_data,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=4)
    test_loader = DataLoader(test_data,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=4)

    if args.model == 'FCN8s':
        model = FCN8s(args.in_channel, args.n_classes)
    elif args.model == 'SegNetBasic':
        model = SegNetBasic(args.in_channel, args.n_classes)
    elif args.model == 'UNet':
        model = UNet(args.in_channel, args.n_classes)
    else:
        print('This model doesn\'t exist in the model directory')
        sys.exit(1)

    model.apply(init_weight)
    """ training """

    if args.writer:
        writer = SummaryWriter(args.result_path)

    if args.class_weight:
        criterion = nn.CrossEntropyLoss(weight=class_weight.to(args.device))
    else:
        criterion = nn.CrossEntropyLoss()

    model.to(args.device)

    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)

    train_losses = []
    val_iou = []
    mean_iou = []
    best_mean_iou = 0.0

    for epoch in range(args.max_epoch):
        model.train()
        running_loss = 0.0

        for i, sample in tqdm.tqdm(enumerate(train_loader),
                                   total=len(train_loader)):
            optimizer.zero_grad()

            x, y = sample['image'], sample['class']

            x = x.to(args.device)
            y = y.to(args.device)

            h = model(x)
            loss = criterion(h, y)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

        train_losses.append(running_loss / i)

        val_iou.append(
            eval_model(model, test_loader, args.device).to('cpu').float())
        mean_iou.append(val_iou[-1].mean().item())

        if best_mean_iou < mean_iou[-1]:
            best_mean_iou = mean_iou[-1]
            torch.save(model.state_dict(),
                       args.result_path + '/best_mean_iou_model.prm')

        if writer is not None:
            writer.add_scalar("train_loss", train_losses[-1], epoch)
            writer.add_scalar("mean_IoU", mean_iou[-1], epoch)
            writer.add_scalars(
                "class_IoU", {
                    'iou of class 0': val_iou[-1][0],
                    'iou of class 1': val_iou[-1][1],
                    'iou of class 2': val_iou[-1][2],
                    'iou of class 3': val_iou[-1][3],
                    'iou of class 4': val_iou[-1][4],
                    'iou of class 5': val_iou[-1][5],
                    'iou of class 6': val_iou[-1][6],
                    'iou of class 7': val_iou[-1][7]
                }, epoch)

        print('epoch: {}\tloss: {:.5f}\tmean IOU: {:.3f}'.format(
            epoch, train_losses[-1], mean_iou[-1]))

    torch.save(model.state_dict(), args.result_path + "/final_model.prm")
Beispiel #13
0
import torch
from torch import nn
from torchvision import transforms, utils
from torch.utils.data import DataLoader
from dataset import PosterDataset, Resize, ToTensor
import numpy as np
import warnings
warnings.filterwarnings("ignore")

bs = 32

transformed_dataset = PosterDataset(csv_file='./data.txt',
                                    root_dir='../data/fgsm/FGSM',
                                    transform=transforms.Compose(
                                        [Resize(), ToTensor()]))
data_loader = DataLoader(transformed_dataset, batch_size=bs, shuffle=False)
print('train batches: ', len(data_loader))

device = torch.device('cuda')
org_model = torch.load('../data/models/origin_model.pkl').cuda()
fgsm_model = torch.load('../data/models/fgsm_model.pkl').cuda()
pgd_model = torch.load('../data/models/pgd_model.pkl').cuda()


def props_to_onehot(props):
    if isinstance(props, list):
        props = np.array(props)
    a = np.argmax(props, axis=1)
    b = np.zeros((len(a), props.shape[1]))
    b[np.arange(len(a)), a] = 1
    return b
Beispiel #14
0
    with torch.no_grad():
        for i, sample in enumerate(test_loader):
            image = sample["image"].to(params["device"]).float()
            pred = model(image)
            pred = pred.cpu().numpy()
            prediction_list.append(pred)
        predictions = np.concatenate(prediction_list, axis=0)
        return predictions


if __name__ == "__main__":
    if not os.path.isdir(params["checkpoint_dir"]):
        os.makedirs(params["checkpoint_dir"])
    # TODO: change back
    train_transform = torchvision.transforms.Compose(
        [ToTensor(), RandomFlip(), RandomRotate()])
    test_transform = torchvision.transforms.Compose([ToTensor()])

    train_dataset = COVID19Dataset(params["train_data_dir"],
                                   transform=train_transform,
                                   channels=params["channels"])

    test_dataset = COVID19Dataset(params["train_data_dir"],
                                  transform=test_transform,
                                  channels=params["channels"])

    print("train:", params["train_data_dir"], len(train_dataset))

    # TODO: change back to True
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=params["batch_size"],
parser.add_argument('--out_dir',
                    type=str,
                    default='models/',
                    help='Folder where to save the model')

if __name__ == '__main__':

    # Parse input arguments
    args = parser.parse_args()

    #%% Check device
    # device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

    #%% Create dataset
    dataset = CSPDataset(size=args.size, k=args.k, n=args.n, alpha=args.alpha,\
         r=args.r, p=args.p, transform=ToTensor())

    dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)

    # retrieving data
    # n = dataset.csp.n
    n = dataset.csp.n
    m = dataset.csp.m
    n_bad = dataset.csp.n_bad_assgn
    csp_shape = {
        'k': args.k,
        'n': n,
        'd': dataset.csp.d,
        'm': m,
        'n_bad': n_bad
    }
    elif args.model == 'UNet':
        model = UNet(args.in_channel, args.n_classes)
    else:
        print('This model doesn\'t exist in the model directory')
        sys.exit(1)

    if args.params_path is not None:
        model.load_state_dict(
            torch.load(args.params_path,
                       map_location=lambda storage, loc: storage))
    """ define DataLoader """

    data = PartAffordanceDataset('test.csv',
                                 transform=transforms.Compose(
                                     [CenterCrop(),
                                      ToTensor(),
                                      Normalize()]))

    data_loader = DataLoader(data, batch_size=args.num_images, shuffle=True)

    for sample in data_loader:
        model.eval()

        predict(model, sample, device=args.device)

        x = sample["image"]
        x = reverse_normalize(x, mean, std)
        save_image(
            x, args.result_path + '/' + 'original_images_with_' + args.model +
            '.jpg')
Beispiel #17
0
def test(data_dir, csv_path, splits_path, output_dir, target='pa',
         batch_size=1, pretrained=False, min_patients_per_label=100, seed=666,
         model_type='hemis', architecture='densenet121', misc=None):
    assert target in ['pa', 'l', 'joint']

    print(f"\n\nTesting seed {seed}")
    torch.manual_seed(seed)
    np.random.seed(seed)
    extra = misc.extra
    name = output_dir.split("/")[-1].format('') + extra
    output_dir = output_dir.format(seed)
    splits_path = splits_path.format(seed)

    if not exists(splits_path):
        split_dataset(csv_path, splits_path)

    resultsfile = join(output_dir, '..', 'auc-test.csv')
    if not isfile(resultsfile):
        columns = ['expt', 'seed', 'accuracy', 'auc', 'auc_weighted', 'prc', 'prc_weighted']
        test_metrics_df = pd.DataFrame(columns=columns)
    else:
        test_metrics_df = pd.read_csv(resultsfile)

    # Save predictions
    savepreds = {}
    saveauc = {}

    predsdir = join(output_dir, '..', 'test_outs')
    predsfile = join(predsdir, f'preds-{name}{extra}_{seed}-{target}.npz')
    aucfile = join(predsdir, f'auc-{name}{extra}_{seed}-{target}.npz')

    if isfile(predsfile):
        print(f'Loading {predsfile}')
        _arr = np.load(predsfile, allow_pickle=True)
        savepreds = {k: _arr[k] for k in _arr.keys()}

    if isfile(aucfile):
        print(f'Loading {aucfile}')
        _arr = np.load(aucfile, allow_pickle=True)
        saveauc = {k: _arr[k] for k in _arr.keys()}

    print("Test mode: {}".format(target))
    print('Device that will be used is: {0}'.format(DEVICE))

    # Load data
    preprocessing = Compose([Normalize(), ToTensor()])

    testset = PCXRayDataset(data_dir, csv_path, splits_path, transform=preprocessing, dataset='test',
                            pretrained=pretrained, min_patients_per_label=min_patients_per_label)
    testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)

    print("{0} patients in test set.".format(len(testset)))

    # Find best weights
    metricsdf = pd.read_csv(join(output_dir, f'{target}-metrics.csv'))
    best_epoch = int(metricsdf.idxmax()['auc'])
    weights_file = join(output_dir, '{}-e{}.pt'.format(target, best_epoch))

    # Create model and load best weights
    model = create_model(model_type, num_classes=testset.nb_labels, target=target,
                         architecture=architecture, dropout=0.0, otherargs=misc)
    try:
        model.load_state_dict(torch.load(weights_file))
    except:
        # Issue in loading weights if trained on multiple GPUs
        state_dict = torch.load(weights_file, map_location='cpu')
        for key in list(state_dict.keys()):
            if 'conv' in key or 'classifier' in key:
                if '.0.' in key:
                    new_key = key.replace(".0.", '.')
                    state_dict[new_key] = state_dict[key]
                    del state_dict[key]

        model.load_state_dict(state_dict)

    model.to(DEVICE)
    model.eval()

    # if misc.test_multi:
    #     y_true, y_preds, _ = get_model_preds(model, dataloader=testloader, target=target, model_type=model_type,
    #                                          vote_at_test=misc.vote_at_test, progress_bar=True)
    #     metrics, per_label_auc, per_label_prc = get_metrics(y_true, y_preds)
    #     row = {'expt': name, 'seed': seed, **metrics}
    #     print(row)
    #
    #     test_metrics_df = test_metrics_df.append(row, ignore_index=True)
    #     test_metrics_df.to_csv(resultsfile, index=False)
    #
    #     savepreds = {'y_true': y_true, 'y_preds': y_preds, 'meta': row}
    #     saveauc = {'auc': per_label_auc, 'prc': per_label_prc, 'meta': row}

    for view in misc.test_on:
        print(f"Testing on only {view}")
        if view == 'pa':
            model.test_only_one = 0
        elif view == 'l':
            model.test_only_one = 1
        else:
            model.test_only_one = None

        y_true, y_preds, _ = get_model_preds(model, dataloader=testloader, target=target,
                                             test_on=view, model_type=model_type,
                                             vote_at_test=misc.vote_at_test, progress_bar=True)

        metrics, per_label_auc, per_label_prc = get_metrics(y_true, y_preds)
        row = {'expt': name + f'{view}_view', 'seed': seed, **metrics}
        print(row)

        test_metrics_df = test_metrics_df.append(row, ignore_index=True)

        savepreds[f'y_true_{view}_view'] = y_true
        savepreds[f'y_preds_{view}_view'] = y_preds
        savepreds[f'meta_{view}_view'] = row
        saveauc[f'auc_{view}_view'] = per_label_auc
        saveauc[f'prc_{view}_view'] = per_label_prc
        saveauc[f'meta_{view}_view'] = row

    test_metrics_df.to_csv(resultsfile, index=False)
    np.savez(join(predsdir, f'preds-{name}{extra}_{seed}-{target}'), **savepreds)
    np.savez(join(predsdir, f'auc-{name}{extra}_{seed}-{target}'), **saveauc)
Beispiel #18
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch')
    parser.add_argument('--batch-size',
                        type=int,
                        default=16,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        metavar='LR',
                        help='learning rate (default: 0.001)')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--randam-batch',
                        action='store_true',
                        default=False,
                        help='if true, randamly sample batch')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For Saving the current Model')
    args = parser.parse_args()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    kwargs = {
        'num_workers': 1,
        'pin_memory': True,
        'worker_init_fn': init_worker_seed
    }

    train_loader = torch.utils.data.DataLoader(ObjectDataset(
        os.path.dirname(os.path.abspath(__file__)) + '/../kitti',
        'train',
        transform=transforms.Compose([ToTensor(),
                                      RandomHorizontalFlip()])),
                                               batch_size=args.batch_size,
                                               shuffle=args.randam_batch,
                                               **kwargs)

    test_loader = torch.utils.data.DataLoader(ObjectDataset(
        os.path.dirname(os.path.abspath(__file__)) + '/../kitti',
        'val',
        transform=transforms.Compose([ToTensor()])),
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              **kwargs)

    model = Network(3).to(device)

    # criterion = nn.CrossEntropyLoss()
    criterion = F.nll_loss
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    print('output log file to', log_directory)

    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, criterion, epoch)
        validation(args, model, device, test_loader, criterion)

    if (args.save_model):
        torch.save(model.state_dict(), "object_recognition.pt")

    train_writer.close()
    validation_writer.close()
Beispiel #19
0
def train(data_dir,
          csv_path,
          splits_path,
          output_dir,
          target='pa',
          nb_epoch=100,
          lr=(1e-4, ),
          batch_size=1,
          optim='adam',
          dropout=None,
          min_patients_per_label=50,
          seed=666,
          data_augmentation=True,
          model_type='hemis',
          architecture='densenet121',
          misc=None):
    assert target in ['pa', 'l', 'joint']

    torch.manual_seed(seed)
    np.random.seed(seed)

    output_dir = output_dir.format(seed)
    splits_path = splits_path.format(seed)

    logger.info(f"Training mode: {target}")

    if not exists(output_dir):
        os.makedirs(output_dir)

    if not exists(splits_path):
        split_dataset(csv_path, splits_path, seed=seed)

    # Find device
    logger.info(f'Device that will be used is: {DEVICE}')

    # Load data
    val_transfo = [Normalize(), ToTensor()]
    if data_augmentation:
        train_transfo = [Normalize(), ToPILImage()]

        if 'rotation' in misc.transforms:
            train_transfo.append(RandomRotation(degrees=misc.rotation_degrees))

        if 'translation' in misc.transforms:
            train_transfo.append(RandomTranslate(translate=misc.translate))

        train_transfo.append(ToTensor())

        if 'noise' in misc.transforms:
            train_transfo.append(GaussianNoise())
    else:
        train_transfo = val_transfo

    dset_args = {
        'datadir': data_dir,
        'csvpath': csv_path,
        'splitpath': splits_path,
        'max_label_weight': misc.max_label_weight,
        'min_patients_per_label': min_patients_per_label,
        'flat_dir': misc.flatdir
    }
    loader_args = {
        'batch_size': batch_size,
        'shuffle': True,
        'num_workers': misc.threads,
        'pin_memory': True
    }

    trainset = PCXRayDataset(transform=Compose(train_transfo), **dset_args)
    valset = PCXRayDataset(transform=Compose(val_transfo),
                           dataset='val',
                           **dset_args)

    trainloader = DataLoader(trainset, **loader_args)
    valloader = DataLoader(valset, **loader_args)
    n_pts = f"{len(trainset)} train,"

    if misc.use_extended:
        ext_args = dset_args.copy()
        ext_args['splitpath'] = None
        ext_args['csvpath'] = misc.csv_path_ext

        extset = PCXRayDataset(transform=Compose(train_transfo),
                               mode='pa_only',
                               use_labels=trainset.labels,
                               **ext_args)
        extset.labels_count = trainset.labels_count
        extset.labels_weights = trainset.labels_weights
        extloader = DataLoader(extset, **loader_args)

        n_pts += f" {len(extset)} ext_train,"

    logger.info(f"Number of patients: {n_pts} {len(valset)} valid.")
    logger.info(
        f"Predicting {len(trainset.labels)} labels: \n{trainset.labels}")
    logger.info(trainset.labels_weights)

    # Load model
    model = create_model(model_type,
                         num_classes=trainset.nb_labels,
                         target=target,
                         architecture=architecture,
                         dropout=dropout,
                         otherargs=misc)
    model.to(DEVICE)
    logger.info(f'Created {model_type} model')

    evaluator = ModelEvaluator(output_dir=output_dir,
                               target=target,
                               logger=logger)

    criterion = nn.BCEWithLogitsLoss(
        pos_weight=trainset.labels_weights.to(DEVICE))
    loss_weights = [1.0] + list(misc.loss_weights)

    if len(misc.mt_task_prob) == 1:
        _mt_task_prob = misc.mt_task_prob[0]
        task_prob = [1 - _mt_task_prob, _mt_task_prob / 2., _mt_task_prob / 2.]
    else:
        _pa_prob, _l_prob = misc.mt_task_prob
        _jt_prob = 1 - (_pa_prob + _l_prob)
        task_prob = [_jt_prob, _pa_prob, _l_prob]

    if model_type in ['singletask', 'multitask', 'dualnet'] and len(lr) > 1:
        # each branch has custom learning rate
        optim_params = [{
            'params': model.frontal_model.parameters(),
            'lr': lr[0]
        }, {
            'params': model.lateral_model.parameters(),
            'lr': lr[1]
        }, {
            'params': model.classifier.parameters(),
            'lr': lr[2]
        }]
    else:
        # one lr for all
        optim_params = [{'params': model.parameters(), 'lr': lr[0]}]

    if misc.learn_loss_coeffs:
        temperature = torch.ones(size=(3, ), requires_grad=True,
                                 device=DEVICE).float()
        temperature_lr = lr[-1] if len(lr) > 3 else lr[0]
        optim_params.append({'params': temperature, 'lr': temperature_lr})

    # Optimizer
    optimizer, scheduler = create_opt_and_sched(optim=optim,
                                                params=optim_params,
                                                lr=lr[0],
                                                other_args=misc)
    start_epoch = 1

    # Resume training if possible
    latest_ckpt_file = join(output_dir, f'{target}-latest.tar')
    if isfile(latest_ckpt_file):
        checkpoint = torch.load(latest_ckpt_file)
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        del checkpoint

        evaluator.load_saved()
        start_epoch = int(evaluator.eval_df.epoch.iloc[-1])
        logger.info(f"Resumed at epoch {start_epoch}")

    # Training loop
    for epoch in range(start_epoch,
                       nb_epoch + 1):  # loop over the dataset multiple times
        model.train()
        running_loss = torch.zeros(1, requires_grad=False,
                                   dtype=torch.float).to(DEVICE)
        train_preds, train_true = [], []
        for i, data in enumerate(trainloader, 0):
            if target == 'joint':
                *images, label = data['PA'].to(DEVICE), data['L'].to(
                    DEVICE), data['encoded_labels'].to(DEVICE)
                if model_type == 'stacked':
                    images = torch.cat(images, dim=1)
            else:
                images, label = data[target.upper()].to(
                    DEVICE), data['encoded_labels'].to(DEVICE)

            # Forward
            output = model(images)
            optimizer.zero_grad()
            if model_type == 'multitask':
                # order of returned logits is joint, frontal, lateral
                if misc.learn_loss_coeffs:
                    loss_weights = temperature.pow(-2)

                all_task_losses, weighted_task_losses = [], []
                for idx, _logit in enumerate(output):
                    task_loss = criterion(_logit, label)
                    all_task_losses.append(task_loss)
                    weighted_task_losses.append(task_loss * loss_weights[idx])

                losses_dict = {
                    0: sum(weighted_task_losses),
                    1: all_task_losses[1],
                    2: all_task_losses[2]
                }
                select = np.random.choice([0, 1, 2], p=task_prob)
                loss = losses_dict[select]  # mixing this temp seems bad

                if misc.learn_loss_coeffs:
                    loss += temperature.log().sum()

                output = output[0]
            else:
                loss = criterion(output, label)

            # Backward
            loss.backward()
            optimizer.step()

            # Save predictions
            train_preds.append(torch.sigmoid(output).detach().cpu().numpy())
            train_true.append(label.detach().cpu().numpy())

            # print statistics
            running_loss += loss.detach()
            print_every = max(1, len(trainset) // (20 * batch_size))
            if (i + 1) % print_every == 0:
                running_loss = running_loss.cpu().detach().numpy().squeeze(
                ) / print_every
                logger.info('[{0}, {1:5}] loss: {2:.5f}'.format(
                    epoch, i + 1, running_loss))
                evaluator.store_dict['train_loss'].append(running_loss)
                running_loss = torch.zeros(1, requires_grad=False).to(DEVICE)
            del output, images, data

        if misc.use_extended:
            # Train with only PA images from extended dataset
            for i, data in enumerate(extloader, 0):
                if target == 'joint':
                    *images, label = data['PA'].to(DEVICE), data['L'].to(
                        DEVICE), data['encoded_labels'].to(DEVICE)
                else:
                    images, label = data[target.upper()].to(
                        DEVICE), data['encoded_labels'].to(DEVICE)

                # Forward
                output = model(images)
                optimizer.zero_grad()
                if model_type == 'multitask':
                    # only use PA loss
                    output = output[1]

                loss = criterion(output, label)

                # Backward
                loss.backward()
                optimizer.step()

                # Save predictions
                train_preds.append(
                    torch.sigmoid(output).detach().cpu().numpy())
                train_true.append(label.detach().cpu().numpy())

                # print statistics
                running_loss += loss.detach()
                print_every = max(1, len(trainset) // (20 * batch_size))
                if (i + 1) % print_every == 0:
                    running_loss = running_loss.cpu().detach().numpy().squeeze(
                    ) / print_every
                    logger.info(
                        '[{0}, {1:5}] Extended dataset loss: {2:.5f}'.format(
                            epoch, i + 1, running_loss))
                    evaluator.store_dict['train_loss'].append(running_loss)
                    running_loss = torch.zeros(1,
                                               requires_grad=False).to(DEVICE)
                del output, images, data

        train_preds = np.vstack(train_preds)
        train_true = np.vstack(train_true)

        model.eval()
        val_true, val_preds, val_runloss = get_model_preds(
            model,
            dataloader=valloader,
            loss_fn=criterion,
            target=target,
            model_type=model_type,
            vote_at_test=misc.vote_at_test)

        val_runloss /= (len(valset) / batch_size)
        logger.info(f'Epoch {epoch} - Val loss = {val_runloss:.5f}')
        val_auc, _ = evaluator.evaluate_and_save(val_true,
                                                 val_preds,
                                                 epoch=epoch,
                                                 train_true=train_true,
                                                 train_preds=train_preds,
                                                 runloss=val_runloss)

        if 'reduce' in misc.sched:
            scheduler.step(metrics=val_auc, epoch=epoch)
        else:
            scheduler.step(epoch=epoch)

        _states = {
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'scheduler_state_dict': scheduler.state_dict()
        }
        torch.save(_states, latest_ckpt_file)
        torch.save(model.state_dict(),
                   join(output_dir, '{}-e{}.pt'.format(target, epoch)))

        # Remove all batches weights
        weights_files = glob(
            join(output_dir, '{}-e{}-i*.pt'.format(target, epoch)))
        for file in weights_files:
            os.remove(file)
Beispiel #20
0
        train_via = {
            key: via[key]
            for i, key in enumerate(keys) if (i % args.k) != args.fold
        }
    else:
        train_via = via

    if args.special:
        trainset = VIADataset(train_via, args.path, shuffle=True, size=None)
        trainset = RandomTranspose(trainset)
    else:
        trainset = VIADataset(train_via, args.path, shuffle=True, alt=1)
        trainset = Scale(trainset, args.scale) if args.scale > 1 else trainset
        trainset = RandomTranspose(RandomFilter(ColorJitter(trainset)))

    trainset = ToTensor(trainset)

    print('Training size = {}'.format(len(trainset)))

    # Dataloaders
    trainloader = DataLoader(trainset, batch_size=args.batch, pin_memory=True)

    # Model
    if args.model == 'unet':
        if args.multitask:
            model = MultiTaskUNet(3, 1, R=5)
        else:
            model = UNet(3, 1)
    elif args.model == 'segnet':
        if args.multitask:
            model = MultiTaskSegNet(3, 1, R=5)
Beispiel #21
0
            flame_proj_lmks.append(flame_proj_lmk)
            shape_norms += torch.norm(shape_params)
            exp_norms += torch.norm(exp_params)


def weight_init(m):
    if isinstance(m, nn.Linear):
        nn.init.xavier_normal_(m.weight)


if __name__ == '__main__':
    need_evaluate = False
    composed_transforms = transforms.Compose(
        [ScaleAndCrop(config_img_size),
         ToTensor()])
    dataset = NoWDataset(
        dataset_path=os.path.join('.', 'training_set', 'NoW_Dataset',
                                  'final_release_version'),
        data_folder='iphone_pictures',
        bounding_box_folder='detected_face',
        facepos_folder='openpose',
        # id_txt = 'subjects_idst.txt',
        id_txt='subjects_id.txt',
        R=6,
        transform=composed_transforms)

    if need_evaluate:
        resnet50 = torch.load("./resnet50.pkl")
    else:
        resnet50 = models.resnet50(pretrained=True)
Beispiel #22
0
    path = "/home/lgraha07/scratch/Paper2021/data/"

    trainpath = path + traindir
    testpath = path + testdir
    validpath = path + validdir

    filename = "fracdata.dat"

    trainfile = trainpath + filename
    testfile = testpath + filename
    validfile = validpath + filename

    train_set = FractalDataset(filename=trainfile,
                               root_dir=trainpath,
                               transform=ToTensor())
    test_set = FractalDataset(filename=testfile,
                              root_dir=testpath,
                              transform=ToTensor())
    valid_set = FractalDataset(filename=validfile,
                               root_dir=validpath,
                               transform=ToTensor())

    # Put each dataset into a pytorch data loader which divides it into batches
    frac_train_loader = data_utils.DataLoader(train_set,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=num_workers)
    frac_test_loader = data_utils.DataLoader(test_set,
                                             batch_size=batch_size,
                                             shuffle=False,
Beispiel #23
0
    suffix = utils.build_suffix(args.structure)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(torch.cuda.is_available())

    if args.train_set is not None:
        train_set_file = args.train_set
    else:
        train_set_file = "data/labels/train_labels_frame_patches30.csv"

    train_set = FoADataset(train_set_file,
                           "data/inputs",
                           transform=transforms.Compose([
                               RandomTranslation(),
                               RandomPermutations(),
                               Normalization(),
                               ToTensor()
                           ]))

    if args.test_set is not None:
        test_set_file = args.test_set
    else:
        test_set_file = "data/labels/test_labels_frame_patches30.csv"

    test_set = FoADataset(test_set_file,
                          "data/inputs",
                          transform=transforms.Compose(
                              [Normalization(), ToTensor()]))

    train_loader = DataLoader(train_set,
                              batch_size=args.batch_size,
                              shuffle=True,
Beispiel #24
0
            torch.save(net, model_name)
            eval(valloader, model_name)

    print('Finished Training')


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('path')
    args = parser.parse_args()

    traindataset = PhoneDataset(label_path='labels_train.txt',
                                root_dir=args.path,
                                transform=transforms.Compose(
                                    [Rescale((224, 224)),
                                     ToTensor()]))
    trainloader = torch.utils.data.DataLoader(
        traindataset,
        batch_size=4,
        shuffle=False,
    )

    valdataset = PhoneDataset(label_path='labels_val.txt',
                              root_dir=args.path,
                              transform=transforms.Compose(
                                  [Rescale((224, 224)),
                                   ToTensor()]))
    valloader = torch.utils.data.DataLoader(
        traindataset,
        batch_size=1,
        shuffle=False,
Beispiel #25
0
def main():
    global args
    global sv_name_eval
    # save configuration to file
    sv_name = datetime.strftime(datetime.now(), '%Y%m%d_%H%M%S')
    sv_name_eval = sv_name
    print('saving file name is ', sv_name)

    write_arguments_to_file(args,
                            os.path.join(logs_dir, sv_name + '_arguments.txt'))

    # ----------------------------------- data
    # define mean/std of the training set (for data normalization)
    label_type = args.label_type
    use_s1 = (args.sensor_type == 's1') | (args.sensor_type == 's1s2')
    use_s2 = (args.sensor_type == 's2') | (args.sensor_type == 's1s2')

    dataset = args.dataset
    data_dir = os.path.join("data", dataset, "data")

    bands_mean = {}
    bands_std = {}
    train_dataGen = None
    val_dataGen = None
    test_dataGen = None

    print(f"Using {dataset} dataset")
    if dataset == 'sen12ms':
        bands_mean = {
            's1_mean': [-11.76858, -18.294598],
            's2_mean': [
                1226.4215, 1137.3799, 1139.6792, 1350.9973, 1932.9058,
                2211.1584, 2154.9846, 2409.1128, 2001.8622, 1356.0801
            ]
        }
        bands_std = {
            's1_std': [4.525339, 4.3586307],
            's2_std': [
                741.6254, 740.883, 960.1045, 946.76056, 985.52747, 1082.4341,
                1057.7628, 1136.1942, 1132.7898, 991.48016
            ]
        }
    elif dataset == 'bigearthnet':
        # THE S2 BAND STATISTICS WERE PROVIDED BY THE BIGEARTHNET TEAM
        # Source: https://git.tu-berlin.de/rsim/bigearthnet-models-tf/-/blob/master/BigEarthNet.py
        bands_mean = {
            's1_mean': [-12.619993, -19.290445],
            's2_mean': [
                340.76769064, 429.9430203, 614.21682446, 590.23569706,
                950.68368468, 1792.46290469, 2075.46795189, 2218.94553375,
                2266.46036911, 2246.0605464, 1594.42694882, 1009.32729131
            ]
        }
        bands_std = {
            's1_std': [5.115911, 5.464428],
            's2_std': [
                554.81258967, 572.41639287, 582.87945694, 675.88746967,
                729.89827633, 1096.01480586, 1273.45393088, 1365.45589904,
                1356.13789355, 1302.3292881, 1079.19066363, 818.86747235
            ]
        }
    else:
        raise NameError(f"unknown dataset: {dataset}")

    # load datasets
    imgTransform = transforms.Compose(
        [ToTensor(), Normalize(bands_mean, bands_std)])
    if dataset == 'sen12ms':
        train_dataGen = SEN12MS(data_dir,
                                args.label_split_dir,
                                imgTransform=imgTransform,
                                label_type=label_type,
                                threshold=args.threshold,
                                subset="train",
                                use_s1=use_s1,
                                use_s2=use_s2,
                                use_RGB=args.use_RGB,
                                IGBP_s=args.simple_scheme,
                                data_size=args.data_size,
                                sensor_type=args.sensor_type,
                                use_fusion=args.use_fusion)

        val_dataGen = SEN12MS(data_dir,
                              args.label_split_dir,
                              imgTransform=imgTransform,
                              label_type=label_type,
                              threshold=args.threshold,
                              subset="val",
                              use_s1=use_s1,
                              use_s2=use_s2,
                              use_RGB=args.use_RGB,
                              IGBP_s=args.simple_scheme,
                              data_size=args.data_size,
                              sensor_type=args.sensor_type,
                              use_fusion=args.use_fusion)

        if args.eval:
            test_dataGen = SEN12MS(data_dir,
                                   args.label_split_dir,
                                   imgTransform=imgTransform,
                                   label_type=label_type,
                                   threshold=args.threshold,
                                   subset="test",
                                   use_s1=use_s1,
                                   use_s2=use_s2,
                                   use_RGB=args.use_RGB,
                                   IGBP_s=args.simple_scheme,
                                   sensor_type=args.sensor_type,
                                   use_fusion=args.use_fusion)
    else:
        # Assume bigearthnet
        train_dataGen = BigEarthNet(data_dir,
                                    args.label_split_dir,
                                    imgTransform=imgTransform,
                                    label_type=label_type,
                                    threshold=args.threshold,
                                    subset="train",
                                    use_s1=use_s1,
                                    use_s2=use_s2,
                                    use_RGB=args.use_RGB,
                                    CLC_s=args.simple_scheme,
                                    data_size=args.data_size,
                                    sensor_type=args.sensor_type,
                                    use_fusion=args.use_fusion)

        val_dataGen = BigEarthNet(data_dir,
                                  args.label_split_dir,
                                  imgTransform=imgTransform,
                                  label_type=label_type,
                                  threshold=args.threshold,
                                  subset="val",
                                  use_s1=use_s1,
                                  use_s2=use_s2,
                                  use_RGB=args.use_RGB,
                                  CLC_s=args.simple_scheme,
                                  data_size=args.data_size,
                                  sensor_type=args.sensor_type,
                                  use_fusion=args.use_fusion)

        if args.eval:
            test_dataGen = BigEarthNet(data_dir,
                                       args.label_split_dir,
                                       imgTransform=imgTransform,
                                       label_type=label_type,
                                       threshold=args.threshold,
                                       subset="test",
                                       use_s1=use_s1,
                                       use_s2=use_s2,
                                       use_RGB=args.use_RGB,
                                       CLC_s=args.simple_scheme,
                                       sensor_type=args.sensor_type,
                                       use_fusion=args.use_fusion)

    # number of input channels
    n_inputs = train_dataGen.n_inputs
    print('input channels =', n_inputs)
    wandb.config.update({"input_channels": n_inputs})

    # set up dataloaders
    train_data_loader = DataLoader(train_dataGen,
                                   batch_size=args.batch_size,
                                   num_workers=args.num_workers,
                                   shuffle=True,
                                   pin_memory=True)
    val_data_loader = DataLoader(val_dataGen,
                                 batch_size=args.batch_size,
                                 num_workers=args.num_workers,
                                 shuffle=False,
                                 pin_memory=True)

    if args.eval:
        test_data_loader = DataLoader(test_dataGen,
                                      batch_size=args.batch_size,
                                      num_workers=args.num_workers,
                                      shuffle=False,
                                      pin_memory=True)

# -------------------------------- ML setup
# cuda
    use_cuda = torch.cuda.is_available()
    if use_cuda:
        torch.backends.cudnn.enabled = True
        cudnn.benchmark = True

    # define number of classes
    if dataset == 'sen12ms':
        if args.simple_scheme:
            numCls = 10
            ORG_LABELS = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
        else:
            numCls = 17
            ORG_LABELS = [
                '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',
                '13', '14', '15', '16', '17'
            ]
    else:
        if args.simple_scheme:
            numCls = 19
            ORG_LABELS = [
                '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',
                '13', '14', '15', '16', '17', '18', '19'
            ]
        else:
            numCls = 43
            ORG_LABELS = [
                '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12',
                '13', '14', '15', '16', '17', '18', '19', '20', '21', '22',
                '23', '24', '25', '26', '27', '28', '29', '30', '31', '32',
                '33', '34', '35', '36', '37', '38', '39', '40', '41', '42',
                '43'
            ]

    print('num_class: ', numCls)
    wandb.config.update({"n_class": numCls})

    # define model
    if args.model == 'VGG16':
        model = VGG16(n_inputs, numCls)
    elif args.model == 'VGG19':
        model = VGG19(n_inputs, numCls)
    elif args.model == 'Supervised':
        model = ResNet50(n_inputs, numCls)
    elif args.model == 'Supervised_1x1':
        model = ResNet50_1x1(n_inputs, numCls)
    elif args.model == 'ResNet101':
        model = ResNet101(n_inputs, numCls)
    elif args.model == 'ResNet152':
        model = ResNet152(n_inputs, numCls)
    elif args.model == 'DenseNet121':
        model = DenseNet121(n_inputs, numCls)
    elif args.model == 'DenseNet161':
        model = DenseNet161(n_inputs, numCls)
    elif args.model == 'DenseNet169':
        model = DenseNet169(n_inputs, numCls)
    elif args.model == 'DenseNet201':
        model = DenseNet201(n_inputs, numCls)
    # finetune moco pre-trained model
    elif args.model.startswith("Moco"):
        pt_path = os.path.join(args.pt_dir, f"{args.pt_name}.pth")
        print(pt_path)
        assert os.path.exists(pt_path)
        if args.model == 'Moco':
            print("transfer backbone weights but no conv 1x1 input module")
            model = Moco(torch.load(pt_path), n_inputs, numCls)
        elif args.model == 'Moco_1x1':
            print("transfer backbone weights and input module weights")
            model = Moco_1x1(torch.load(pt_path), n_inputs, numCls)
        elif args.model == 'Moco_1x1RND':
            print(
                "transfer backbone weights but initialize input module random with random weights"
            )
            model = Moco_1x1(torch.load(pt_path), n_inputs, numCls)
        else:  # Assume Moco2 at present
            raise NameError("no model")
    else:
        raise NameError("no model")

    print(model)

    # move model to GPU if is available
    if use_cuda:
        model = model.cuda()

    # define loss function
    if label_type == 'multi_label':
        lossfunc = torch.nn.BCEWithLogitsLoss()
    else:
        lossfunc = torch.nn.CrossEntropyLoss()

    # set up optimizer
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.decay)

    best_acc = 0
    start_epoch = 0
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            checkpoint_nm = os.path.basename(args.resume)
            sv_name = checkpoint_nm.split('_')[0] + '_' + checkpoint_nm.split(
                '_')[1]
            print('saving file name is ', sv_name)

            if checkpoint['epoch'] > start_epoch:
                start_epoch = checkpoint['epoch']
            best_acc = checkpoint['best_prec']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # set up tensorboard logging
    # train_writer = SummaryWriter(os.path.join(logs_dir, 'runs', sv_name, 'training'))
    # val_writer = SummaryWriter(os.path.join(logs_dir, 'runs', sv_name, 'val'))


# ----------------------------- executing Train/Val.
# train network
# wandb.watch(model, log="all")

    scheduler = None
    if args.use_lr_step:
        # Ex: If initial Lr is 0.0001, step size is 25, and gamma is 0.1, then lr will be changed for every 20 steps
        # 0.0001 - first 25 epochs
        # 0.00001 - 25 to 50 epochs
        # 0.000001 - 50 to 75 epochs
        # 0.0000001 - 75 to 100 epochs
        # https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=args.lr_step_size,
                                              gamma=args.lr_step_gamma)

    for epoch in range(start_epoch, args.epochs):
        if args.use_lr_step:
            scheduler.step()
            print('Epoch {}/{} lr: {}'.format(epoch, args.epochs - 1,
                                              optimizer.param_groups[0]['lr']))
        else:
            print('Epoch {}/{}'.format(epoch, args.epochs - 1))
        print('-' * 25)

        train(train_data_loader, model, optimizer, lossfunc, label_type, epoch,
              use_cuda)
        micro_f1 = val(val_data_loader, model, optimizer, label_type, epoch,
                       use_cuda)

        is_best_acc = micro_f1 > best_acc
        best_acc = max(best_acc, micro_f1)

        save_checkpoint(
            {
                'epoch': epoch,
                'arch': args.model,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'best_prec': best_acc
            }, is_best_acc, sv_name)

        wandb.log({'epoch': epoch, 'micro_f1': micro_f1})

    print("=============")
    print("done training")
    print("=============")

    if args.eval:
        eval(test_data_loader, model, label_type, numCls, use_cuda, ORG_LABELS)
Beispiel #26
0
                        out=n_classes, dropout=args.dropout)
elif args.encoder == 'resnet34':
    encoder = models.resnet34()
    model = GDVM_ResNet(encoder, latent=args.size_latent,
                        out=n_classes, dropout=args.dropout)

n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)

print(model)
print('Total number of params in model: {}'.format(n_params))
model.cuda()

optimizer = optim.Adam(model.parameters(), lr=args.lr)
poses = [0, 1, 2, 3, 4, 5, 6, 7, 8]
eval_thresholds = np.arange(0.05, 1, 0.05)
tsfm = ToTensor()

oname = 'gdvm_' + args.criterion + args.aus + '_encoder_' + args.encoder + '_alpha_' + \
    str(args.alpha) + '_beta_' + \
    str(args.beta) + '_latent_' + \
    str(args.size_latent) + '_lr_' + str(args.lr)

print(oname)

scheduler = optim.lr_scheduler.ReduceLROnPlateau(
    optimizer, factor=0.1, patience=10, verbose=True, threshold=0.01)
logger = Logger('./logs/post_final/'+oname+'/')

''' If just quick test reduce the training/test subjcets and poses to minimum '''
(t_subs_tr, t_subs_te) = (['F001', 'F002', 'F003'], ['F007']
                          ) if args.quick_test else (None, None)
def main():
    """Run training."""
    config = yaml.safe_load(open("config.yml"))
    #training hyperparameters
    num_epochs = config['num_epochs']
    learning_rate = config['learning_rate']
    batch_size = config['batch_size']
    valid_period = config['valid_period']
    #data hyperparameters
    #extract sample (input sequences with length = stride_len)
    #for RNN from the givent full trajectory
    window_len = config['window_len']
    stride_len = config['stride_len']
    n_times = config['n_times']
    x_train, y_train, x_dev, y_dev, x_test, y_test = read_data(
        window_len, stride_len, n_times)
    print('Dataset:', x_train.shape, y_train.shape, x_dev.shape, y_dev.shape,
          x_test.shape, y_test.shape)
    #create dataset
    transformed_control_dataset_train = ControlDataset(
        x_train, y_train, transform=transforms.Compose([ToTensor()]))
    transformed_control_dataset_dev = ControlDataset(
        x_dev, y_dev, transform=transforms.Compose([ToTensor()]))
    #transformed_control_dataset_test = ControlDataset(x_test, y_test,
    #transform=transforms.Compose([ToTensor()]))
    # create batch
    data_train = DataLoader(transformed_control_dataset_train,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=1,
                            drop_last=True)
    data_dev = DataLoader(transformed_control_dataset_dev,
                          batch_size=batch_size,
                          shuffle=False,
                          num_workers=1,
                          drop_last=True)
    #data_test = DataLoader(transformed_control_dataset_test, batch_size=1,
    #shuffle=False, num_workers=1, drop_last=True)

    # Device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print('Device:', device)

    # save model
    save_model_name = config['model_name']
    print('Save Model name: ', save_model_name)
    model_save_path = '../checkpoints/' + save_model_name  #+ '.pt' ## not pth

    #test/train
    mode = int(sys.argv[1])

    if mode == 0:
        #train mode
        # Model preparation
        model = SeRNN_FWXX(batch_size, device)
        print('Model: ', model)

        model.to(device)
        pytorch_total_params = sum(p.numel() for p in model.parameters())
        print('# of params: ', pytorch_total_params)

        # multiple GPUs
        if torch.cuda.device_count() > 1:
            print("Model uploaded. Number of GPUs: ",
                  torch.cuda.device_count())
            #model = nn.DataParallel(model)

        #set the training loss and optimizer
        criterion = nn.MSELoss()
        optimizer = optim.RMSprop(model.parameters(), lr=learning_rate)
        train_model(model, device, data_train, data_dev, x_dev, y_dev,
                    optimizer, criterion, num_epochs, model_save_path,
                    window_len, stride_len, valid_period)
    else:
        # test mode
        # upload saved model
        print('Saved Model evaluation with test set')
        model = torch.jit.load(model_save_path)
        test_model(model, device, x_test, y_test, 'True', window_len,
                   stride_len)
def main():

    args = get_arguments()

    # configuration
    CONFIG = Dict(yaml.safe_load(open(args.config)))
    """ DataLoader """
    test_data = PartAffordanceDataset(CONFIG.test_data,
                                      config=CONFIG,
                                      transform=transforms.Compose([
                                          CenterCrop(CONFIG),
                                          ToTensor(),
                                          Normalize()
                                      ]))

    test_loader = DataLoader(test_data,
                             batch_size=4,
                             shuffle=True,
                             num_workers=1)

    test_iter = iter(test_loader)

    model = models.vgg16_bn(pretrained=False)
    model.classifier[6] = nn.Linear(in_features=4096,
                                    out_features=7,
                                    bias=True)
    model.load_state_dict(
        torch.load('./result/best_accuracy_model.prm',
                   map_location=lambda storage, loc: storage))
    model.to(args.device)

    while True:
        sample = test_iter.next()
        image, label = sample['image'], sample['label']

        # show images
        show_img(torchvision.utils.make_grid(image))

        # print labels
        print('True labels')
        print(label)

        with torch.no_grad():
            image = image.to(args.device)
            label = label.to(args.device)

            h = model(image)
            h = torch.sigmoid(h)
            h[h > 0.5] = 1
            h[h <= 0.5] = 0

            total_num = 7 * len(label)
            acc_num = torch.sum(h == label)

            accuracy = float(acc_num) / total_num

        print('\nPredicted labels')
        print(h)

        print('\naccuracy\t{:.3f}'.format(accuracy))

        print('\nIf you want to look at more images, input \"c\"')
        s = input()
        if s == 'c':
            continue
        else:
            break
Beispiel #29
0
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
train_tfLogger = TFLogger(osp.join(args.logs_dir, 'train'))
eval_tfLogger = TFLogger(osp.join(args.logs_dir, 'eval'))

# t = [ToTensor()]
# train_data = DatasetSyn(args.train_data, "train", transform=Compose(t), sample_shape=args.sample_shape)
# train_data.reset_Sample()
# train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True,
#                           num_workers=args.num_workers)

# eval_data = DatasetSyn(args.eval_data, "eval", transform=Compose(t), sample_shape=args.sample_shape)
# eval_data.reset_Sample()
# eval_loader = DataLoader(eval_data, batch_size=args.batch_size, shuffle=True,
#                           num_workers=args.num_workers)

t = [ToTensor()]
train_data = DatasetReal(REAL_DATA_PATH,
                         mode="train",
                         transform=Compose(t),
                         sample_shape=args.sample_shape)
train_data.reset_Sample()
train_loader = DataLoader(train_data,
                          batch_size=args.batch_size,
                          shuffle=True,
                          num_workers=args.num_workers)

eval_data = DatasetReal(REAL_DATA_PATH,
                        mode="eval",
                        transform=Compose(t),
                        sample_shape=args.sample_shape)
eval_data.reset_Sample()
Beispiel #30
0
def train(args):
    yolo_dataset = YoloDataset(DATASET,
                               "faces.csv",
                               input_size=208,
                               transform=transforms.Compose([ToTensor()]))
    dataset_indices = set([i for i in range(len(yolo_dataset))])
    test_indices = random.sample(dataset_indices,
                                 int(args.test_size * len(yolo_dataset)))
    train_indices = list(dataset_indices - set(test_indices))
    trainset = Subset(yolo_dataset, train_indices)
    testset = Subset(yolo_dataset, train_indices)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=4)
    validloader = torch.utils.data.DataLoader(testset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=4)
    dataloaders = {TRAIN: trainloader, VALID: validloader}
    model = MyResnet()
    if CUDA:
        model.cuda()

    optimizer = optim.Adam(model.conv_addition.parameters(), lr=args.lr)
    saver = CheckpointSaver(args.save_dir_name, max_checkpoints=3)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.5)
    # writer = SummaryWriter(os.path.join(args.save_dir_name, "log"))
    train_losses, test_losses = [], []
    for epoch in range(args.epochs):
        for phase in dataloaders:
            if phase == TRAIN:
                scheduler.step()
                model.train()
            else:
                model.eval()

            epoch_avg = AVG()
            logger.info(f"-----------------{phase.upper()}-----------------")
            for i, data in enumerate(dataloaders[phase]):
                img, y = data['img'], data['y']
                if CUDA:
                    img = img.cuda()
                    y = y.cuda()
                optimizer.zero_grad()

                with torch.set_grad_enabled(phase == TRAIN):
                    pred = model(img)
                    y_ = pred.permute((0, 2, 3, 1))
                    y = y.permute((0, 2, 3, 1))
                    loss = loss_function(y_, y)
                    epoch_avg.add(loss.item())
                    # backward + optimize only if in training phase
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()
                logger.info(f"Epoch: {epoch}, batch: {i}, loss {loss.item()}")
            logger.info(f"Epoch: {epoch}, average loss: {epoch_avg}")
            if phase == TRAIN:
                train_losses.append(str(epoch_avg))
            else:
                test_losses.append(str(epoch_avg))
            # writer.add_scalar(f'{phase}_data/average_loss', str(epoch_avg), epoch)
        if epoch % 20 == 0:
            saver.save(model, optimizer, epoch)

    with open(os.path.join(args.save_dir_name, "losses.txt"), 'w') as file:
        file.write(str(train_losses))
        file.write(str(test_losses))