Ejemplo n.º 1
0
    def __init__(self,mode, path):
        '''This function load the encoder model into a required
           mode (pass in strings):
           1. fc0 -> drop the last fc layer, use previous fc layer
           2. fc1 -> use the last fc layer
           3. conv -> drop both fc layers, use last conv layer'''

        #path = '/deep/group/aihc-bootcamp-spring2020/cxr_fewer_samples/experiments/jingbo/resnet18_mocov2_20200617-021146_SLURM1534372/'
        #path += "checkpoint_0003.pth.tar"
        checkpoint = torch.load(path, map_location="cpu")
        state_dict = dict((key[7:], value)
                          for (key, value) in checkpoint['state_dict'].items())
        model = builder.MoCo(
            models.__dict__['resnet18'],

            #Parameter from MoCo checkpoint - Different from default!!
            K=49152,
            mlp=True,
            pretrained=False)
        model.load_state_dict(state_dict)
        if mode == 'fc1':
            self.model = model.encoder_q
        elif mode == 'fc0':

            self.model = torch.nn.Sequential(*list(model.encoder_q.children())[:-1],torch.nn.Flatten())
        else:
            self.model = torch.nn.Sequential(*list(model.encoder_q.children())[:-2],torch.nn.Flatten())
        self.model.eval()
    output_yp = output_yp.transpose(1, 2, 0)
    output_ypf = np.where(output_yp > 0.5, 1.0, 0.0)
    visualize_assist2(image=imagex,
                      original_mask=maskp,
                      predict_prob=output_yp,
                      predict_mask=output_ypf)


DEVICE = "cuda"
SAVE_INTERVAL = 1
root = r'E:\Users\huang\Desktop\wen\MRP\MRP'
experiment = 'ss-test'
save_root = os.path.join(root, 'results/' + experiment)
public_save_root = os.path.join(root, 'results')
pretrained_model_dict = torch.load(r"checkpoint_moco_expb4_0007.pth.tar")
model = moco_builder.MoCo(PetNet_V2, K=1024)
model.load_state_dict(pretrained_model_dict['state_dict'])
unet = smp.Unet(
    encoder_name="resnet34",
    encoder_weights=None,
    classes=2,
    activation=None,
)
unet.encoder = model.encoder_k.encoder
unet = unet.to("cuda")

preproc_fn = smp.encoders.get_preprocessing_fn("resnet34")
train_dataset = SegDataset(r"E:\liver2\liver2\train-150",
                           r"E:\liver2\liver2\train\masks",
                           augmentation=pet_augmentation_valid(),
                           preprocessing=get_preprocessing(preproc_fn),
Ejemplo n.º 3
0
        return x


data_dir = r"D:\PetImages"
train_dir = data_dir
preproc_fn = smp.encoders.get_preprocessing_fn("resnet34")
train_dataset = ArvnDataset_Pet_Constrastive(train_dir, ["Cat", "Dog"],
                                             pet_augmentation(),
                                             get_preprocessing(preproc_fn))
train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=64,
                                               pin_memory=True,
                                               shuffle=False,
                                               drop_last=True)
model_base_encoder = PetNet_V2()
model = moco_builder.MoCo(PetNet_V2, K=1024).to("cuda")
lr = 1e-3
loss = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(),
                            lr,
                            momentum=0.9,
                            weight_decay=1e-4)
epoches = 10
for epoch in range(epoches):
    adjust_learning_rate(optimizer, epoch, lr, epoches)
    train(train_dataloader, model, loss, optimizer, epoch)
    save_checkpoint(
        {
            'epoch': epoch + 1,
            'arch': "resnet34",
            'state_dict': model.state_dict(),
def main_worker(gpu, ngpus_per_node, args):
    args.gpu = gpu
    # suppress printing if not master
    if args.multiprocessing_distributed and args.gpu != 0:

        def print_pass(*args):
            pass

        builtins.print = print_pass

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)

    model = builder.MoCo(hr_net, config, args.moco_dim, args.moco_k,
                         args.moco_m, args.moco_t, args.mlp)

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            #args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(
                (args.workers + ngpus_per_node - 1) / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu], find_unused_parameters=True)
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(
                model, find_unused_parameters=True)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
        # comment out the following line for debugging
        raise NotImplementedError("Only DistributedDataParallel is supported.")
    else:
        # AllGather implementation (batch shuffle, queue update, etc.) in
        # this code only supports DistributedDataParallel.
        raise NotImplementedError("Only DistributedDataParallel is supported.")

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    optimizer = torch.optim.SGD(model.parameters(),
                                0.001,
                                momentum=0.9,
                                weight_decay=0.1)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            if args.gpu is None:
                checkpoint = torch.load(args.resume)
            else:
                # Map model to be loaded to specified single gpu.
                loc = 'cuda:{}'.format(args.gpu)
                checkpoint = torch.load(args.resume, map_location=loc)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    augmentation = [
        transforms.Resize((16, 16)),
        transforms.ToTensor(), normalize
    ]

    transform = transforms.Compose(augmentation)

    # query_encoder = model.module.encoder_q
    # key_encoder = model.module.encoder_k

    dataset_root = '/root'
    probe_path = os.path.join(dataset_root,
                              'tinyface/Testing_Set/probe_img_ID_pairs.mat')
    probe_image_root = os.path.join(dataset_root, 'tinyface/Testing_Set/Probe')

    gallery_path = os.path.join(
        dataset_root, 'tinyface/Testing_Set/gallery_match_img_ID_pairs.mat')
    gallery_image_root = os.path.join(dataset_root,
                                      'tinyface/Testing_Set/Gallery_Match')
    gallery_distractor_root = os.path.join(
        dataset_root, 'tinyface/Testing_Set/Gallery_Distractor')

    probe_image_pathes = gen_probe_pathes(probe_path, probe_image_root)
    gallery_image_pathes = gen_gallery_pathes(gallery_path, gallery_image_root)
    gallery_distractor_image_pathes = list_images(gallery_distractor_root)

    probe_features = []
    for probe_image_path in tqdm(probe_image_pathes):
        img = Image.open(probe_image_path)
        img = transform(img).unsqueeze(0).cuda()
        probe_feature = model.module.encode_query(img)
        probe_features.append(
            probe_feature.squeeze(0).detach().cpu().numpy().tolist())
    probe_features = np.asarray(probe_features, dtype=np.float32)
    savemat('features/probe.mat', {'probe_feature_map': probe_features})

    gallery_features = []
    for gallery_image_path in tqdm(gallery_image_pathes):
        img = Image.open(gallery_image_path)
        img = transform(img).unsqueeze(0).cuda()
        gallery_feature = model.module.encode_key(img)
        gallery_features.append(
            gallery_feature.squeeze(0).detach().cpu().numpy().tolist())
    gallery_features = np.asarray(gallery_features, dtype=np.float32)
    savemat('features/gallery.mat', {'gallery_feature_map': gallery_features})
Ejemplo n.º 5
0
def main_worker(gpu, ngpus_per_node, args):
    args.gpu = gpu
    # suppress printing if not master
    if args.multiprocessing_distributed and args.gpu != 0:

        def print_pass(*args):
            pass

        builtins.print = print_pass

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)

    model = builder.MoCo(hr_net, config, args.moco_dim, args.moco_k,
                         args.moco_m, args.moco_t, args.mlp)

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(
                (args.workers + ngpus_per_node - 1) / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu], find_unused_parameters=True)
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(
                model, find_unused_parameters=True)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
        # comment out the following line for debugging
        raise NotImplementedError("Only DistributedDataParallel is supported.")
    else:
        # AllGather implementation (batch shuffle, queue update, etc.) in
        # this code only supports DistributedDataParallel.
        raise NotImplementedError("Only DistributedDataParallel is supported.")

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            if args.gpu is None:
                checkpoint = torch.load(args.resume)
            else:
                # Map model to be loaded to specified single gpu.
                loc = 'cuda:{}'.format(args.gpu)
                checkpoint = torch.load(args.resume, map_location=loc)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    if args.aug_plus:
        # MoCo v2's aug: similar to SimCLR https://arxiv.org/abs/2002.05709
        augmentation = [
            transforms.Resize((16, 16)),
            transforms.RandomApply(
                [
                    transforms.ColorJitter(0.4, 0.4, 0.4,
                                           0.1)  # not strengthened
                ],
                p=0.8),
            transforms.RandomGrayscale(p=0.2),
            transforms.RandomApply([loader.GaussianBlur([.1, 2.])], p=0.5),
            transforms.RandomHorizontalFlip(),
            transforms.RandomRotation(0.4),
            transforms.ToTensor(),
            normalize
        ]
    else:
        # MoCo v1's aug: the same as InstDisc https://arxiv.org/abs/1805.01978
        augmentation = [
            transforms.Resize((16, 16)),
            transforms.RandomGrayscale(p=0.2),
            transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),
            transforms.RandomHorizontalFlip(),
            transforms.RandomRotation(0.4),
            transforms.ToTensor(), normalize
        ]

    train_dataset = datasets.ImageFolder(
        traindir, loader.TwoCropsTransform(transforms.Compose(augmentation)))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler,
                                               drop_last=True)

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0) and epoch % 10 == 0:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                },
                is_best=False,
                filename='checkpoint_{:04d}.pth.tar'.format(epoch))