Ejemplo n.º 1
0
    def __getitem__(self, index):
        if self.is_train:
            img, target = self.train_img[index], self.train_label[index]
            if len(img.shape) == 2:
                img = np.stack([img] * 3, 2)
            img = Image.fromarray(img, mode="RGB")
            img = transforms.Resize((256, 256), Image.BILINEAR)(img)
            img = transforms.RandomCrop(INPUT_SIZE)(img)
            img = transforms.RandomHorizontalFlip()(img)
            img = transforms.ToTensor()(img)
            img = transforms.Normalize([0.485, 0.456, 0.406],
                                       [0.229, 0.224, 0.225])(img)

        else:
            img, target = self.test_img[index], self.test_label[index]
            if len(img.shape) == 2:
                img = np.stack([img] * 3, 2)
            img = Image.fromarray(img, mode="RGB")
            img = transforms.Resize((256, 256), Image.BILINEAR)(img)
            img = transforms.CenterCrop(INPUT_SIZE)(img)
            img = transforms.ToTensor()(img)
            img = transforms.Normalize([0.485, 0.456, 0.406],
                                       [0.229, 0.224, 0.225])(img)

        return img, target
Ejemplo n.º 2
0
def load_data(args):

    normalize = t.Normalize(mean=[0.445, 0.287, 0.190],
                            std=[0.31, 0.225, 0.168])
    im_transform = t.Compose([t.ToTensor(), normalize])

    # Use  the following code fo co_transformations e.g. random rotation or random flip etc.
    # co_transformer = cot.Compose([cot.RandomRotate(45)])

    dsetTrain = GIANA(args.imgdir,
                      args.gtdir,
                      input_size=(args.input_width, args.input_height),
                      train=True,
                      transform=im_transform,
                      co_transform=None,
                      target_transform=t.ToLabel())
    train_data_loader = data.DataLoader(dsetTrain,
                                        batch_size=args.batch_size,
                                        shuffle=True,
                                        num_workers=args.num_workers)

    dsetVal = GIANA(args.imgdir,
                    args.gtdir,
                    train=False,
                    transform=im_transform,
                    co_transform=None,
                    target_transform=t.ToLabel())
    val_data_loader = data.DataLoader(dsetVal,
                                      batch_size=args.batch_size,
                                      shuffle=False,
                                      num_workers=args.num_workers)
    return train_data_loader, val_data_loader
Ejemplo n.º 3
0
    def __init__(self, cms, margin_size, margin_time):
        self.IMAGE_SCALE = 20
        self.IMAGE_SIZE = self.IMAGE_SCALE * self.IMAGE_SCALE

        self.train_transform = transforms.Compose([
            transforms.Resize((self.IMAGE_SCALE, self.IMAGE_SCALE)),
            transforms.ToTensor()
        ])
        self.test_transform = transforms.Compose([
            transforms.Resize((self.IMAGE_SCALE, self.IMAGE_SCALE)),
            transforms.ToTensor()
        ])

        self.CMS = cms
        self.MARGIN_SIZE = margin_size
        self.MARGIN_TIME = margin_time
def get_transform():
    transforms = []

    transforms.append(custom_T.ToTensor())
    transforms.append(custom_T.FasterRCNNResizer())

    return custom_T.Compose(transforms)
Ejemplo n.º 5
0
def get_transform(train):
    transforms = []
    # converts the image, a PIL image, into a PyTorch Tensor
    transforms.append(T.ToTensor())
    if train:
        # during training, randomly flip the training images
        # and ground-truth for data augmentation
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)
def get_transform(train=False):
    transforms = []

    if train:
        transforms.append(custom_T.RandomHorizontalFlip())
        transforms.append(custom_T.RandomCrop())

    transforms.append(custom_T.ToTensor())
    transforms.append(custom_T.FasterRCNNResizer())

    return custom_T.Compose(transforms)
Ejemplo n.º 7
0
    def __init__(self, root=None, dataloader=default_loader):
        self.transform1 = transforms.Compose([
            transforms.RandomRotation(30),
            transforms.Resize([256, 256]),
            transforms.RandomCrop(INPUT_SIZE),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=(0.9, 1.1),
                                   contrast=(0.9, 1.1),
                                   saturation=(0.9, 1.1)),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.485, 0.456, 0.406),
                                 std=(0.229, 0.224, 0.225)),
            transforms.RandomErasing(probability=0.5, sh=0.05)
        ])
        # 增强方法2: 关注更小的区域
        self.transform2 = transforms.Compose([
            transforms.RandomRotation(30),
            transforms.Resize([336, 336]),
            transforms.RandomCrop(INPUT_SIZE),
            transforms.RandomHorizontalFlip(),
            transforms.ColorJitter(brightness=(0.9, 1.1),
                                   contrast=(0.9, 1.1),
                                   saturation=(0.9, 1.1)),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.485, 0.456, 0.406),
                                 std=(0.229, 0.224, 0.225)),
            transforms.RandomErasing(probability=0.5, sh=0.05)
        ])
        self.dataloader = dataloader

        self.root = root
        with open(os.path.join(self.root, TRAIN_DATASET), 'r') as fid:
            self.imglist = fid.readlines()

        self.labels = []
        for line in self.imglist:
            image_path, label = line.strip().split()
            self.labels.append(int(label))
        self.labels = np.array(self.labels)
        self.labels = torch.LongTensor(self.labels)
Ejemplo n.º 8
0
def visualization(x_train, y_train, x_val, y_val, x_test, y_test, nb_class,
                  ckpt, opt):
    # no augmentations used for linear evaluation
    transform_lineval = transforms.Compose([transforms.ToTensor()])

    train_set_lineval = UCR2018(data=x_train,
                                targets=y_train,
                                transform=transform_lineval)
    val_set_lineval = UCR2018(data=x_val,
                              targets=y_val,
                              transform=transform_lineval)
    test_set_lineval = UCR2018(data=x_test,
                               targets=y_test,
                               transform=transform_lineval)

    train_loader_lineval = torch.utils.data.DataLoader(train_set_lineval,
                                                       batch_size=128,
                                                       shuffle=True)
    val_loader_lineval = torch.utils.data.DataLoader(val_set_lineval,
                                                     batch_size=128,
                                                     shuffle=False)
    test_loader_lineval = torch.utils.data.DataLoader(test_set_lineval,
                                                      batch_size=128,
                                                      shuffle=False)
    signal_length = x_train.shape[1]

    # loading the saved backbone
    backbone_lineval = SimConv4().cuda()  # defining a raw backbone model

    checkpoint = torch.load(ckpt, map_location='cpu')
    backbone_lineval.load_state_dict(checkpoint)

    print('Visualization')
    for epoch in range(opt.epochs_test):
        backbone_lineval.eval()
        embeds = None
        labels = None
        for i, (data, target) in enumerate(test_loader_lineval):
            data = data.cuda()
            target = target.cuda().view(-1, 1)
            output = backbone_lineval(data).detach()
            if embeds is None:
                embeds = output.cpu().numpy()
                labels = target.cpu().numpy()
            else:
                embeds = np.vstack([embeds, output.cpu().numpy()])
                labels = np.vstack([labels, target.cpu().numpy()])

    return embeds, labels
Ejemplo n.º 9
0
def pretrain_InterSampleRel(x_train, y_train, opt):
    K = opt.K
    batch_size = opt.batch_size  # 128 has been used in the paper
    tot_epochs = opt.epochs  # 400 has been used in the paper
    feature_size = opt.feature_size
    ckpt_dir = opt.ckpt_dir

    prob = 0.2  # Transform Probability
    raw = transforms.Raw()
    cutout = transforms.Cutout(sigma=0.1, p=prob)
    jitter = transforms.Jitter(sigma=0.2, p=prob)
    scaling = transforms.Scaling(sigma=0.4, p=prob)
    magnitude_warp = transforms.MagnitudeWrap(sigma=0.3, knot=4, p=prob)
    time_warp = transforms.TimeWarp(sigma=0.2, knot=8, p=prob)
    window_slice = transforms.WindowSlice(reduce_ratio=0.8, p=prob)
    window_warp = transforms.WindowWarp(window_ratio=0.3, scales=(0.5, 2), p=prob)

    transforms_list = {'jitter': [jitter],
                       'cutout': [cutout],
                       'scaling': [scaling],
                       'magnitude_warp': [magnitude_warp],
                       'time_warp': [time_warp],
                       'window_slice': [window_slice],
                       'window_warp': [window_warp],
                       'G0': [jitter, magnitude_warp, window_slice],
                       'G1': [jitter, time_warp, window_slice],
                       'G2': [jitter, time_warp, window_slice, window_warp, cutout],
                       'none': [raw]}

    transforms_targets = list()
    for name in opt.aug_type:
        for item in transforms_list[name]:
            transforms_targets.append(item)
    train_transform = transforms.Compose(transforms_targets + [transforms.ToTensor()])

    backbone = SimConv4().cuda()
    model = RelationalReasoning(backbone, feature_size).cuda()

    train_set = MultiUCR2018(data=x_train, targets=y_train, K=K, transform=train_transform)
    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=batch_size,
                                               shuffle=True)
    torch.save(model.backbone.state_dict(), '{}/backbone_init.tar'.format(ckpt_dir))
    acc_max, epoch_max = model.train(tot_epochs=tot_epochs, train_loader=train_loader, opt=opt)

    torch.save(model.backbone.state_dict(), '{}/backbone_last.tar'.format(ckpt_dir))

    return acc_max, epoch_max
Ejemplo n.º 10
0
def eval(args, model):
    if not os.path.exists(args.savedir):
        os.makedirs(args.savedir)

    listImgFiles = [
        k.split('/')[-1].split('.')[0]
        for k in glob.glob(os.path.join(args.imgdir, '*.bmp'))
    ]
    for currFile in tqdm(listImgFiles):
        img = Image.open(os.path.join(args.imgdir, currFile + '.bmp'))
        img = t.ToTensor()(img)
        if args.cuda:
            img.cuda()
        output = model(Variable(img, volatile=True).unsqueeze(0))
        output = t.ToPILImage()(output[0].cpu().data)
        newfilename = os.path.join(args.savedir,
                                   currFile + '_' + args.model + '.bmp')
        output.save(newfilename, 'BMP')
Ejemplo n.º 11
0
def get_dataloader():
    # TODO(xwd): Adaptive normalization by some large image.
    # E.g. In medical image processing, WSI image is very large and different to ordinary images.

    value_scale = 255
    mean = [0.485, 0.456, 0.406]
    mean = [item * value_scale for item in mean]
    std = [0.229, 0.224, 0.225]
    std = [item * value_scale for item in std]

    train_transform = transform.Compose([
        transform.RandomScale([cfg['scale_min'], cfg['scale_max']]),
        transform.RandomRotate([cfg['rotate_min'], cfg['rotate_max']],
                               padding=mean,
                               ignore_label=cfg['ignore_label']),
        transform.RandomGaussianBlur(),
        transform.RandomHorizontallyFlip(),
        transform.RandomCrop([cfg['train_h'], cfg['train_w']],
                             crop_type='rand',
                             padding=mean,
                             ignore_label=cfg['ignore_label']),
        transform.ToTensor(),
        transform.Normalize(mean=mean, std=std)
    ])

    train_data = cityscapes.Cityscapes(cfg['data_path'],
                                       split='train',
                                       transform=train_transform)

    # Use data sampler to make sure each GPU loads specific parts of dataset to avoid data reduntant.
    train_sampler = DistributedSampler(train_data)

    train_loader = DataLoader(train_data,
                              batch_size=cfg['batch_size'] //
                              cfg['world_size'],
                              shuffle=(train_sampler is None),
                              num_workers=4,
                              pin_memory=True,
                              sampler=train_sampler,
                              drop_last=True)

    return train_loader, train_sampler
def get_transform(train=False, yolo=False, aug=None):
    assert aug == 'dirty_camera_lens' or aug == 'gan' or aug is None, "Aug parameter not valid"

    transforms = []

    if yolo:
        transforms.append(custom_T.PadToSquare())
        transforms.append(custom_T.Resize(img_size=None))

    if train:
        transforms.append(custom_T.RandomHorizontalFlip())
        transforms.append(custom_T.RandomCrop())

    if aug == 'dirty_camera_lens':
        print("Augmentation: Dirty Camera Lens")
        transforms.append(custom_T.DirtyCameraLens())

    transforms.append(custom_T.ToTensor())
    # transforms.append(custom_T.FasterRCNNResizer())

    return custom_T.Compose(transforms)
Ejemplo n.º 13
0
    def __init__(self, root=None, dataloader=default_loader):
        self.transform = transforms.Compose([
            transforms.Resize([256, 256]),
            transforms.RandomCrop(INPUT_SIZE),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.485, 0.456, 0.406),
                                 std=(0.229, 0.224, 0.225))
        ])
        self.dataloader = dataloader

        self.root = root
        self.imgs = []
        self.labels = []

        with open(os.path.join(self.root, EVAL_DATASET), 'r') as fid:
            for line in fid.readlines():
                img_path, label = line.strip().split()
                img = self.dataloader(img_path)
                label = int(label)
                self.imgs.append(img)
                self.labels.append(label)
Ejemplo n.º 14
0
def deploy(path):
    assert os.path.exists(path), f'{path} not found : ('
    dataset = 'YOUR_DATASET_NAME'

    img_size = 256
    test_transform = transforms.Compose([
        transforms.Resize((img_size, img_size)),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])
    testA = ImageFolder(os.path.join('dataset', dataset, 'testA'), test_transform)
    with fluid.dygraph.guard(): 
        testA_loader = DataLoader(testA, batch_size=1, shuffle=False)
        real_A, _ = next(iter(testA_loader))
        in_np = real_A.numpy()

    # load model
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    program, feed_vars, fetch_vars = fluid.io.load_inference_model(path, exe)

    # inference
    fetch, = exe.run(program, feed={feed_vars[0]: in_np}, fetch_list=fetch_vars)
    def img_postprocess(img):
        assert isinstance(img, np.ndarray), type(img)
        img = img * 0.5 + 0.5
        img = img.squeeze(0).transpose((1, 2, 0))
        # BGR to RGB
        img = img[:, :, ::-1]
        return img
    in_img = img_postprocess(in_np)
    out_img = img_postprocess(fetch)
    plt.subplot(121)
    plt.title('real A')
    plt.imshow(in_img)
    plt.subplot(122)
    plt.title('A to B')
    plt.imshow(out_img)
    plt.show()
Ejemplo n.º 15
0
def main():
    net = AFENet(classes=21, pretrained_model_path=None).cuda()
    net.load_state_dict(torch.load(os.path.join(args['model_save_path'], args['snapshot'])))

    value_scale = 255
    mean = [0.485, 0.456, 0.406]
    mean = [item * value_scale for item in mean]
    std = [0.229, 0.224, 0.225]
    std = [item * value_scale for item in std]

    test_transform = transform.Compose([transform.ToTensor()])

    test_data = voc2012.VOC2012(split='test', data_root=args['dataset_root'], data_list=args['test_list'],
                                 transform=test_transform)
    test_loader = DataLoader(test_data, batch_size=1, shuffle=False, num_workers=1)

    gray_folder = os.path.join(args['test_result_save_path'])
    # gray_folder = os.path.join(args['test_result_save_path'], 'gray')
    color_folder = os.path.join(args['test_result_save_path'], 'color')

    colors = np.loadtxt(args['colors_path']).astype('uint8')

    test(test_loader, test_data.data_list, net, 21, mean, std, 512, 480, 480, args['scales'],
         gray_folder, color_folder, colors)
Ejemplo n.º 16
0
def main(args=None):
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    # torch.cuda.set_device(0)
    use_gpu = torch.cuda.is_available()

    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, args.log_train))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, args.log_test))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset)

    transform_train = T.Compose([
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        T.RandomErasing(),
    ])

    transform_train2 = T.Compose([
        T.Resize((args.height, args.width)),
        T.Random2DTranslation(args.height, args.width),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = False

    trainloader = DataLoader(
        VideoDataset(dataset.train,
                     data_name=args.dataset,
                     seq_len=args.seq_len,
                     sample='random',
                     transform=transform_train,
                     transform2=transform_train2,
                     type="train"),
        sampler=RandomIdentitySampler(dataset.train,
                                      num_instances=args.num_instances),
        batch_size=args.train_batch,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=True,
    )

    queryloader = DataLoader(
        VideoDataset(dataset.query,
                     data_name=args.dataset,
                     seq_len=args.seq_len,
                     sample='dense',
                     transform=transform_test,
                     type="test"),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        VideoDataset(dataset.gallery,
                     data_name=args.dataset,
                     seq_len=args.seq_len,
                     sample='dense',
                     transform=transform_test,
                     type="test"),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing models: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              final_dim=args.feat_dim)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    crossEntropyLoss = CrossEntropyLabelSmooth(
        num_classes=dataset.num_train_pids, use_gpu=use_gpu)
    tripletLoss = TripletLoss(margin=args.margin)
    regularLoss = RegularLoss(use_gpu=use_gpu)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    scheduler = WarmupMultiStepLR(optimizer, args.stepsize, args.gamma,
                                  args.warmup_factor, args.warmup_items,
                                  args.warmup_method)
    start_epoch = args.start_epoch

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        atest(model, queryloader, galleryloader, use_gpu)
        return

    start_time = time.time()
    best_rank1 = -np.inf
    for epoch in range(start_epoch, args.max_epoch):
        print("==> Epoch {}/{}".format(epoch + 1, args.max_epoch))

        train(model, crossEntropyLoss, tripletLoss, regularLoss, optimizer,
              trainloader, use_gpu)

        # if args.stepsize > 0:
        scheduler.step()

        if (epoch + 1) >= 200 and (epoch + 1) % args.eval_step == 0:
            print("==> Test")
            rank1 = atest(model, queryloader, galleryloader, use_gpu)
            is_best = rank1 > best_rank1
            if is_best: best_rank1 = rank1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()
            save_checkpoint({
                'state_dict': state_dict,
            }, is_best,
                            osp.join(
                                args.save_dir,
                                args.model_name + str(epoch + 1) + '.pth.tar'))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
Ejemplo n.º 17
0
def main(args=None):
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    # torch.cuda.set_device(0)
    use_gpu = torch.cuda.is_available()

    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, args.log_train))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, args.log_test))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_dataset(name=args.dataset)

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = False

    queryloader = DataLoader(
        VideoDataset(dataset.query,
                     data_name=args.dataset,
                     seq_len=args.seq_len,
                     sample='dense',
                     transform=transform_test,
                     type="test"),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    galleryloader = DataLoader(
        VideoDataset(dataset.gallery,
                     data_name=args.dataset,
                     seq_len=args.seq_len,
                     sample='dense',
                     transform=transform_test,
                     type="test"),
        batch_size=args.test_batch,
        shuffle=False,
        num_workers=args.workers,
        pin_memory=pin_memory,
        drop_last=False,
    )

    print("Initializing models: {}".format(args.arch))
    model = models.init_model(name=args.arch,
                              num_classes=dataset.num_train_pids,
                              final_dim=args.feat_dim)
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    checkpoint = torch.load(args.model_path)
    model.load_state_dict(checkpoint['state_dict'])

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        atest(model, queryloader, galleryloader, use_gpu)
        return
Ejemplo n.º 18
0
def main():
    net = AFENet(classes=21,
                 pretrained_model_path=args['pretrained_model_path']).cuda()
    net_ori = [net.layer0, net.layer1, net.layer2, net.layer3, net.layer4]
    net_new = [
        net.ppm, net.cls, net.aux, net.ppm_reduce, net.aff1, net.aff2, net.aff3
    ]

    value_scale = 255
    mean = [0.485, 0.456, 0.406]
    mean = [item * value_scale for item in mean]
    std = [0.229, 0.224, 0.225]
    std = [item * value_scale for item in std]

    train_transform = transform.Compose([
        transform.RandScale([0.75, 2.0]),
        transform.RandomHorizontalFlip(),
        transform.Crop([480, 480],
                       crop_type='rand',
                       padding=mean,
                       ignore_label=255),
        transform.ToTensor(),
        transform.Normalize(mean=mean, std=std)
    ])

    train_data = voc2012.VOC2012(split='train',
                                 data_root=args['dataset_root'],
                                 data_list=args['train_list'],
                                 transform=train_transform)
    train_loader = DataLoader(train_data,
                              batch_size=args['train_batch_size'],
                              shuffle=True,
                              num_workers=8,
                              drop_last=True)

    val_transform = transform.Compose([
        transform.Crop([480, 480],
                       crop_type='center',
                       padding=mean,
                       ignore_label=255),
        transform.ToTensor(),
        transform.Normalize(mean=mean, std=std)
    ])
    val_data = voc2012.VOC2012(split='val',
                               data_root=args['dataset_root'],
                               data_list=args['val_list'],
                               transform=val_transform)

    val_loader = DataLoader(val_data,
                            batch_size=args['val_batch_size'],
                            shuffle=False,
                            num_workers=8)

    if len(args['snapshot']) == 0:
        curr_epoch = 1
        args['best_record'] = {
            'epoch': 0,
            'val_loss': 1e10,
            'acc': 0,
            'acc_cls': 0,
            'mean_iu': 0,
            'fwavacc': 0
        }
    else:
        print('training resumes from ' + args['snapshot'])
        net.load_state_dict(
            torch.load(os.path.join(args['model_save_path'],
                                    args['snapshot'])))
        split_snapshot = args['snapshot'].split('_')
        curr_epoch = int(split_snapshot[1]) + 1
        args['best_record'] = {
            'epoch': int(split_snapshot[1]),
            'val_loss': float(split_snapshot[3]),
            'acc': float(split_snapshot[5]),
            'acc_cls': float(split_snapshot[7]),
            'mean_iu': float(split_snapshot[9]),
            'fwavacc': float(split_snapshot[11])
        }
    params_list = []
    for module in net_ori:
        params_list.append(dict(params=module.parameters(), lr=args['lr']))
    for module in net_new:
        params_list.append(dict(params=module.parameters(),
                                lr=args['lr'] * 10))
    args['index_split'] = 5

    criterion = torch.nn.CrossEntropyLoss(ignore_index=255)

    optimizer = torch.optim.SGD(params_list,
                                lr=args['lr'],
                                momentum=args['momentum'],
                                weight_decay=args['weight_decay'])
    if len(args['snapshot']) > 0:
        optimizer.load_state_dict(
            torch.load(
                os.path.join(args['model_save_path'],
                             'opt_' + args['snapshot'])))

    check_makedirs(args['model_save_path'])

    all_iter = args['epoch_num'] * len(train_loader)
    for epoch in range(curr_epoch, args['epoch_num'] + 1):
        train(train_loader, net, optimizer, epoch, all_iter)
        validate(val_loader, net, criterion, optimizer, epoch)
Ejemplo n.º 19
0
def get_transform(train=False):
    transform = []
    transform.append(transforms.ToTensor())
    if train:
        transform.append(transforms.RandomHorizontalFlip(0.5))
    return transforms.Compose(transform)
Ejemplo n.º 20
0
def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)
Ejemplo n.º 21
0
    def build_model(self):
        """ DataLoader """
        pad = int(30 * self.img_size // 256)
        train_transform = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.Resize((self.img_size + pad, self.img_size + pad)),
            transforms.RandomCrop(self.img_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
        ])
        test_transform = transforms.Compose([
            transforms.Resize((self.img_size, self.img_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
        ])

        self.trainA = ImageFolder(
            os.path.join('dataset', self.dataset, 'trainA'), train_transform)
        self.trainB = ImageFolder(
            os.path.join('dataset', self.dataset, 'trainB'), train_transform)
        self.testA = ImageFolder(
            os.path.join('dataset', self.dataset, 'testA'), test_transform)
        self.testB = ImageFolder(
            os.path.join('dataset', self.dataset, 'testB'), test_transform)
        self.trainA_loader = DataLoader(self.trainA,
                                        batch_size=self.batch_size,
                                        shuffle=True)
        self.trainB_loader = DataLoader(self.trainB,
                                        batch_size=self.batch_size,
                                        shuffle=True)
        self.testA_loader = DataLoader(self.testA, batch_size=1, shuffle=False)
        self.testB_loader = DataLoader(self.testB, batch_size=1, shuffle=False)
        """ Define Generator, Discriminator """
        self.genA2B = ResnetGenerator(input_nc=3,
                                      output_nc=3,
                                      ngf=self.ch,
                                      n_blocks=self.n_res,
                                      img_size=self.img_size,
                                      light=self.light)
        self.genB2A = ResnetGenerator(input_nc=3,
                                      output_nc=3,
                                      ngf=self.ch,
                                      n_blocks=self.n_res,
                                      img_size=self.img_size,
                                      light=self.light)
        self.disGA = Discriminator(input_nc=3, ndf=self.ch, n_layers=7)
        self.disGB = Discriminator(input_nc=3, ndf=self.ch, n_layers=7)
        self.disLA = Discriminator(input_nc=3, ndf=self.ch, n_layers=5)
        self.disLB = Discriminator(input_nc=3, ndf=self.ch, n_layers=5)
        """ Define Loss """
        self.L1_loss = loss.L1Loss()
        self.MSE_loss = loss.MSELoss()
        self.BCE_loss = loss.BCEWithLogitsLoss()
        """ Trainer """
        def get_params(block):
            out = []
            for name, param in block.named_parameters():
                if 'instancenorm' in name or 'weight_u' in name or 'weight_v' in name:
                    continue
                out.append(param)
            return out

        genA2B_parameters = get_params(self.genA2B)
        genB2A_parameters = get_params(self.genB2A)
        disGA_parameters = get_params(self.disGA)
        disGB_parameters = get_params(self.disGB)
        disLA_parameters = get_params(self.disLA)
        disLB_parameters = get_params(self.disLB)
        G_parameters = genA2B_parameters + genB2A_parameters
        D_parameters = disGA_parameters + disGB_parameters + disLA_parameters + disLB_parameters
        self.G_optim = fluid.optimizer.Adam(
            parameter_list=G_parameters,
            learning_rate=self.lr,
            beta1=0.5,
            beta2=0.999,
            regularization=fluid.regularizer.L2Decay(self.weight_decay))
        self.D_optim = fluid.optimizer.Adam(
            parameter_list=D_parameters,
            learning_rate=self.lr,
            beta1=0.5,
            beta2=0.999,
            regularization=fluid.regularizer.L2Decay(self.weight_decay))
        """ Define Rho clipper to constraint the value of rho in AdaILN and ILN"""
Ejemplo n.º 22
0
def evaluation(x_train,
               y_train,
               x_val,
               y_val,
               x_test,
               y_test,
               nb_class,
               ckpt,
               opt,
               ckpt_tosave=None):
    # no augmentations used for linear evaluation
    transform_lineval = transforms.Compose([transforms.ToTensor()])

    train_set_lineval = UCR2018(data=x_train,
                                targets=y_train,
                                transform=transform_lineval)
    val_set_lineval = UCR2018(data=x_val,
                              targets=y_val,
                              transform=transform_lineval)
    test_set_lineval = UCR2018(data=x_test,
                               targets=y_test,
                               transform=transform_lineval)

    train_loader_lineval = torch.utils.data.DataLoader(train_set_lineval,
                                                       batch_size=128,
                                                       shuffle=True)
    val_loader_lineval = torch.utils.data.DataLoader(val_set_lineval,
                                                     batch_size=128,
                                                     shuffle=False)
    test_loader_lineval = torch.utils.data.DataLoader(test_set_lineval,
                                                      batch_size=128,
                                                      shuffle=False)
    signal_length = x_train.shape[1]

    # loading the saved backbone
    backbone_lineval = SimConv4().cuda()  # defining a raw backbone model
    # backbone_lineval = OS_CNN(signal_length).cuda()  # defining a raw backbone model

    # 64 are the number of output features in the backbone, and 10 the number of classes
    linear_layer = torch.nn.Linear(opt.feature_size, nb_class).cuda()
    # linear_layer = torch.nn.Linear(backbone_lineval.rep_dim, nb_class).cuda()

    checkpoint = torch.load(ckpt, map_location='cpu')
    backbone_lineval.load_state_dict(checkpoint)
    if ckpt_tosave:
        torch.save(backbone_lineval.state_dict(), ckpt_tosave)

    optimizer = torch.optim.Adam(linear_layer.parameters(),
                                 lr=opt.learning_rate_test)
    CE = torch.nn.CrossEntropyLoss()

    early_stopping = EarlyStopping(opt.patience_test, verbose=True)
    best_acc = 0
    best_epoch = 0

    print('Linear evaluation')
    for epoch in range(opt.epochs_test):
        linear_layer.train()
        backbone_lineval.eval()

        acc_trains = list()
        for i, (data, target) in enumerate(train_loader_lineval):
            optimizer.zero_grad()
            data = data.cuda()
            target = target.cuda()

            output = backbone_lineval(data).detach()
            output = linear_layer(output)
            loss = CE(output, target)
            loss.backward()
            optimizer.step()
            # estimate the accuracy
            prediction = output.argmax(-1)
            correct = prediction.eq(target.view_as(prediction)).sum()
            accuracy = (100.0 * correct / len(target))
            acc_trains.append(accuracy.item())

        print('[Train-{}][{}] loss: {:.5f}; \t Acc: {:.2f}%' \
              .format(epoch + 1, opt.model_name, loss.item(), sum(acc_trains) / len(acc_trains)))

        acc_vals = list()
        acc_tests = list()
        linear_layer.eval()
        with torch.no_grad():
            for i, (data, target) in enumerate(val_loader_lineval):
                data = data.cuda()
                target = target.cuda()

                output = backbone_lineval(data).detach()
                output = linear_layer(output)
                # estimate the accuracy
                prediction = output.argmax(-1)
                correct = prediction.eq(target.view_as(prediction)).sum()
                accuracy = (100.0 * correct / len(target))
                acc_vals.append(accuracy.item())

            val_acc = sum(acc_vals) / len(acc_vals)
            if val_acc > best_acc:
                best_acc = val_acc
                best_epoch = epoch
                for i, (data, target) in enumerate(test_loader_lineval):
                    data = data.cuda()
                    target = target.cuda()

                    output = backbone_lineval(data).detach()
                    output = linear_layer(output)
                    # estimate the accuracy
                    prediction = output.argmax(-1)
                    correct = prediction.eq(target.view_as(prediction)).sum()
                    accuracy = (100.0 * correct / len(target))
                    acc_tests.append(accuracy.item())

                test_acc = sum(acc_tests) / len(acc_tests)

        print('[Test-{}] Val ACC:{:.2f}%, Best Test ACC.: {:.2f}% in Epoch {}'.
              format(epoch, val_acc, test_acc, best_epoch))
        early_stopping(val_acc, None)
        if early_stopping.early_stop:
            print("Early stopping")
            break

    return test_acc, best_epoch
Ejemplo n.º 23
0
#dataset prepare
#---------------------------------
print('Loading dataset...')
cache_size = 256
if args.image_size == 448:
    cache_size = 256 * 2
if args.image_size == 352:
    cache_size = 402
transform_train = transforms.Compose([
    transforms.Resize((cache_size,cache_size)),
    #transforms.Resize((args.image_size,args.image_size)),
    #transforms.RandomRotation(10),
    transforms.RandomCrop(args.image_size),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize([0.485,0.456,0.406], [0.229,0.224,0.225]),
])

transform_test = transforms.Compose([
    transforms.Resize((cache_size,cache_size)),
    transforms.CenterCrop(args.image_size),
    transforms.ToTensor(),
    transforms.Normalize([0.485,0.456,0.406], [0.229,0.224,0.225]),
])


print("Dataset Initializing...")
trainset = SRDataset.SRDataset(max_person=args.max_person,image_dir=args.images_root, \
    images_list=args.train_file_pre + '_images.txt',bboxes_list=args.train_file_pre + '_bbox.json', \
    relations_list=args.train_file_pre + '_relation.json', image_size=args.image_size,input_transform=transform_train)
Ejemplo n.º 24
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    if not args.evaluate:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt'))
    else:
        sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
    print("==========\nArgs:{}\n==========".format(args))

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    print("Initializing dataset {}".format(args.dataset))
    dataset = data_manager.init_imgreid_dataset(
        root=args.root, name=args.dataset,
    )

    transform_train = T.Compose([
        T.Random2DTranslation(args.height, args.width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    transform_test = T.Compose([
        T.Resize((args.height, args.width)),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    pin_memory = True if use_gpu else False

    trainloader = DataLoader(
        ImageDataset(dataset.train, transform=transform_train),
        sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances),
        batch_size=args.train_batch, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=True,
    )

    queryloader = DataLoader(
        ImageDataset(dataset.test_3000_query, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    galleryloader = DataLoader(
        ImageDataset(dataset.test_3000_gallery, transform=transform_test),
        batch_size=args.test_batch, shuffle=False, num_workers=args.workers,
        pin_memory=pin_memory, drop_last=False,
    )

    print("Initializing model: {}".format(args.arch))
    model = models.init_model(name=args.arch, num_classes=dataset.train_vehicle_nums, loss={'xent', 'htri'})
    print("Model size: {:.3f} M".format(sum(p.numel() for p in model.parameters()) / 1000000.0))
    modelid = 0
    if args.upload:
        modelid = upload_data.updatemodel(args.arch, args.lr, args.stepsize, args.gamma, args.loss, args.dataset,
                                          args.height, args.width, args.seq_len, args.train_batch, args.other)

    # criterion_xent = CrossEntropyLabelSmooth(num_classes=dataset.train_vehicle_nums, use_gpu=use_gpu)
    criterion_xent = nn.CrossEntropyLoss()
    criterion_htri = TripletLoss(margin=args.margin)

    optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay)
    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.stepsize, gamma=args.gamma)
    start_epoch = args.start_epoch

    if args.load_weights:
        # load pretrained weights but ignore layers that don't match in size
        print("Loading pretrained weights from '{}'".format(args.load_weights))
        checkpoint = torch.load(args.load_weights)
        pretrain_dict = checkpoint['state_dict']
        model_dict = model.state_dict()
        pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
        model_dict.update(pretrain_dict)
        model.load_state_dict(model_dict)

    if args.resume:
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']
        rank1 = checkpoint['rank1']
        print("Loaded checkpoint from '{}'".format(args.resume))
        print("- start_epoch: {}\n- rank1: {}".format(start_epoch, rank1))

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    if args.evaluate:
        print("Evaluate only")
        test(model, queryloader, galleryloader, use_gpu, return_distmat=True)
        return

    start_time = time.time()
    train_time = 0
    best_rank1 = -np.inf
    best_epoch = 0
    print("==> Start training")

    for epoch in range(start_epoch, args.max_epoch):
        start_train_time = time.time()
        train(modelid, epoch, model, criterion_xent, criterion_htri, optimizer, trainloader, use_gpu)
        train_time += round(time.time() - start_train_time)

        scheduler.step()

        if (epoch + 1) > args.start_eval and args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or (
                epoch + 1) == args.max_epoch:
            print("==> Test")
            cmc, mAP = test(model, queryloader, galleryloader, use_gpu)
            rank1 = cmc[0]
            is_best = rank1 > best_rank1

            if is_best:
                best_rank1 = rank1
                best_epoch = epoch + 1

            if use_gpu:
                state_dict = model.module.state_dict()
            else:
                state_dict = model.state_dict()

            save_checkpoint({
                'state_dict': state_dict,
                'rank1': rank1,
                'epoch': epoch,
            }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar'))
            if args.upload:
                upload_data.updatetest(modelid, epoch + 1, mAP.item(), cmc[0].item(), cmc[4].item(), cmc[9].item(),
                                       cmc[19].item())
                upload_data.updaterank(modelid, best_rank1.item())

    print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    train_time = str(datetime.timedelta(seconds=train_time))
    print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
Ejemplo n.º 25
0
def main():

    # Initialize arguments
    args = init_arguments()

    # Initialize random
    init_seed(args.seed)

    # Initialize the model
    model, parameters, losses, accuracy_dict, accuracies, start_epoch = init_model(
        args)

    IMAGE_SCALE = 20

    transform = transforms.Compose(
        [transforms.Resize((IMAGE_SCALE, IMAGE_SCALE)),
         transforms.ToTensor()])

    omniglot_loader = loader.OmniglotLoader('data/omniglot',
                                            classify=False,
                                            partition=0.8,
                                            classes=True)
    train_loader = torch.utils.data.DataLoader(
        OMNIGLOT('data/omniglot',
                 train=True,
                 transform=transform,
                 download=True,
                 omniglot_loader=omniglot_loader,
                 batch_size=parameters.episode_length),
        batch_size=parameters.batch_size,
        shuffle=True)

    test_loader = torch.utils.data.DataLoader(OMNIGLOT(
        'data/omniglot',
        train=False,
        transform=transform,
        download=False,
        omniglot_loader=omniglot_loader,
        batch_size=parameters.episode_length),
                                              batch_size=parameters.batch_size,
                                              shuffle=False)

    criterion = nn.CrossEntropyLoss(reduce=False)
    optimizer = optim.Adam(model.parameters())

    init_logging()
    accuracy_dict, losses, accuracies = train_model(model, parameters,
                                                    train_loader, criterion,
                                                    optimizer, args,
                                                    accuracy_dict, losses,
                                                    accuracies, start_epoch)
    loss_plot.plot([accuracies], ["Training Accuracy Percentage"],
                   "training_stats", args.name + "/", "Percentage")
    loss_plot.plot([losses], ["Training Loss"], "training_loss",
                   args.name + "/", "Average Loss")
    predictions, labels = evaluate(model, test_loader, criterion, parameters,
                                   args, 10)
    matrix_plot.plot_matrix(predictions,
                            labels,
                            args.name + "/",
                            title="matrix_plot_test")
    scatterplot.plot(accuracy_dict,
                     args.name + "/",
                     parameters.batch_size,
                     title="Prediction Accuracy after Testing")
Ejemplo n.º 26
0
import matplotlib.pyplot as plt
from tools import prediction
from utils.metrics import Evaluator
args = get_args()
rng = np.random.RandomState(seed=args.seed)

torch.manual_seed(seed=args.seed)

transform_train = trans.Compose([
    trans.RandomHorizontalFlip(),
    #trans.FixScale((args.crop_size,args.crop_size)),
    trans.RandomScale((0.5, 2.0)),
    #trans.FixScale(args.crop_size),
    trans.RandomCrop(args.crop_size),
    trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    trans.ToTensor(),
])

transform_val = trans.Compose([
    #trans.FixScale((args.crop_size,args.crop_size)),
    trans.FixScale(args.crop_size),
    trans.CenterCrop(args.crop_size),
    trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    trans.ToTensor(),
])
if (args.aug == True):
    voc_train = VOCSegmentation(root='./data',
                                set_name='train',
                                transform=transform_train)
else:
    voc_train = VOCSegmentation(root='./data',
Ejemplo n.º 27
0
def get_data(data_dir,
             source,
             target,
             source_train_path,
             target_train_path,
             source_extension,
             target_extension,
             height,
             width,
             batch_size,
             re=0,
             workers=8):

    dataset = DA(data_dir, source, target, source_train_path,
                 target_train_path, source_extension, target_extension)

    normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    source_num_classes = dataset.num_source_train_ids
    train_transformer = T.Compose([
        T.RandomSizedRectCrop(height, width),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalizer,
        T.RandomErasing(EPSILON=re),
    ])
    test_transformer = T.Compose([
        T.Resize((height, width), interpolation=3),
        T.ToTensor(),
        normalizer,
    ])
    source_train_loader = DataLoader(Preprocessor(
        dataset.source_train,
        root=osp.join(dataset.source_images_dir, dataset.source_train_path),
        transform=train_transformer),
                                     batch_size=batch_size,
                                     num_workers=0,
                                     shuffle=True,
                                     pin_memory=False,
                                     drop_last=True)
    target_train_loader = DataLoader(Preprocessor(
        dataset.target_train,
        root=osp.join(dataset.target_images_dir, dataset.target_train_path),
        transform=train_transformer),
                                     batch_size=batch_size,
                                     num_workers=0,
                                     shuffle=True,
                                     pin_memory=False,
                                     drop_last=True)
    # source_train_loader = DataLoader(
    #     UnsupervisedCamStylePreprocessor(dataset.source_train, root=osp.join(dataset.source_images_dir, dataset.source_train_path),
    #                                      camstyle_root=osp.join(dataset.source_images_dir, dataset.source_train_path),
    #                  transform=train_transformer),
    #     batch_size=batch_size, num_workers=0,
    #     shuffle=True, pin_memory=False, drop_last=True)
    # target_train_loader = DataLoader(
    #     UnsupervisedCamStylePreprocessor(dataset.target_train,
    #                                      root=osp.join(dataset.target_images_dir, dataset.target_train_path),
    #                                      camstyle_root=osp.join(dataset.target_images_dir,
    #                                                             dataset.target_train_camstyle_path),
    #                                      num_cam=dataset.target_num_cam, transform=train_transformer),
    #     batch_size=batch_size, num_workers=workers,
    #     shuffle=True, pin_memory=True, drop_last=True)
    query_loader = DataLoader(Preprocessor(dataset.query,
                                           root=osp.join(
                                               dataset.target_images_dir,
                                               dataset.query_path),
                                           transform=test_transformer),
                              batch_size=batch_size,
                              num_workers=workers,
                              shuffle=False,
                              pin_memory=True)
    gallery_loader = DataLoader(Preprocessor(dataset.gallery,
                                             root=osp.join(
                                                 dataset.target_images_dir,
                                                 dataset.gallery_path),
                                             transform=test_transformer),
                                batch_size=batch_size,
                                num_workers=workers,
                                shuffle=False,
                                pin_memory=True)
    return dataset, source_num_classes, source_train_loader, target_train_loader, query_loader, gallery_loader
Ejemplo n.º 28
0
def semisupervised_train(x_train, y_train, x_val, y_val, x_test, y_test, opt):
    # construct data loader
    # Those are the transformations used in the paper
    prob = 0.2  # Transform Probability
    cutout = transforms_ts.Cutout(sigma=0.1, p=prob)
    jitter = transforms_ts.Jitter(sigma=0.2, p=prob)  # CIFAR10
    scaling = transforms_ts.Scaling(sigma=0.4, p=prob)
    magnitude_warp = transforms_ts.MagnitudeWrap(sigma=0.3, knot=4, p=prob)
    time_warp = transforms_ts.TimeWarp(sigma=0.2, knot=8, p=prob)
    window_slice = transforms_ts.WindowSlice(reduce_ratio=0.8, p=prob)
    window_warp = transforms_ts.WindowWarp(window_ratio=0.3,
                                           scales=(0.5, 2),
                                           p=prob)

    transforms_list = {
        'jitter': [jitter],
        'cutout': [cutout],
        'scaling': [scaling],
        'magnitude_warp': [magnitude_warp],
        'time_warp': [time_warp],
        'window_slice': [window_slice],
        'window_warp': [window_warp],
        'G0': [jitter, magnitude_warp, window_slice],
        'G1': [jitter, time_warp, window_slice],
        'G2': [jitter, time_warp, window_slice, window_warp, cutout],
        'none': []
    }

    transforms_targets = list()
    for name in opt.aug_type:
        for item in transforms_list[name]:
            transforms_targets.append(item)

    if '2C' in opt.class_type:
        cut_piece = transforms.CutPiece2C(sigma=opt.piece_size)
        temp_class = 2
    elif '3C' in opt.class_type:
        cut_piece = transforms.CutPiece3C(sigma=opt.piece_size)
        temp_class = 3
    elif '4C' in opt.class_type:
        cut_piece = transforms.CutPiece4C(sigma=opt.piece_size)
        temp_class = 4
    elif '5C' in opt.class_type:
        cut_piece = transforms.CutPiece5C(sigma=opt.piece_size)
        temp_class = 5
    elif '6C' in opt.class_type:
        cut_piece = transforms.CutPiece6C(sigma=opt.piece_size)
        temp_class = 6
    elif '7C' in opt.class_type:
        cut_piece = transforms.CutPiece7C(sigma=opt.piece_size)
        temp_class = 7
    elif '8C' in opt.class_type:
        cut_piece = transforms.CutPiece8C(sigma=opt.piece_size)
        temp_class = 8

    tensor_transform = transforms.ToTensor()
    train_transform_peice = transforms.Compose(transforms_targets)

    train_transform = transforms_ts.Compose(transforms_targets +
                                            [transforms_ts.ToTensor()])
    transform_eval = transforms.Compose([transforms.ToTensor()])

    train_set_labeled = UCR2018(data=x_train,
                                targets=y_train,
                                transform=train_transform)
    train_set_unlabeled = MultiUCR2018_Intra(
        data=x_train,
        targets=y_train,
        K=opt.K,
        transform=train_transform_peice,
        transform_cut=cut_piece,
        totensor_transform=tensor_transform)

    val_set = UCR2018(data=x_val, targets=y_val, transform=transform_eval)
    test_set = UCR2018(data=x_test, targets=y_test, transform=transform_eval)

    train_dataset_size = len(train_set_labeled)
    partial_size = int(opt.label_ratio * train_dataset_size)
    train_ids = list(range(train_dataset_size))
    np.random.shuffle(train_ids)
    train_sampler = SubsetRandomSampler(train_ids[:partial_size])

    trainloader_label = torch.utils.data.DataLoader(train_set_labeled,
                                                    batch_size=128,
                                                    sampler=train_sampler)
    trainloader_unlabel = torch.utils.data.DataLoader(train_set_unlabeled,
                                                      batch_size=128)

    val_loader_lineval = torch.utils.data.DataLoader(val_set,
                                                     batch_size=128,
                                                     shuffle=False)
    test_loader_lineval = torch.utils.data.DataLoader(test_set,
                                                      batch_size=128,
                                                      shuffle=False)

    # loading the saved backbone
    backbone = SimConv4().cuda()  # defining a raw backbone model

    # 64 are the number of output features in the backbone, and 10 the number of classes
    linear_layer = torch.nn.Linear(opt.feature_size, opt.nb_class).cuda()

    # linear_layer = torch.nn.Sequential(
    #     torch.nn.Linear(opt.feature_size, 256),
    #     torch.nn.BatchNorm1d(256),
    #     torch.nn.LeakyReLU(),
    #     torch.nn.Linear(256, nb_class),
    #     torch.nn.Softmax(),
    # ).cuda()

    cls_head = torch.nn.Sequential(
        torch.nn.Linear(opt.feature_size * 2, 256),
        torch.nn.BatchNorm1d(256),
        torch.nn.LeakyReLU(),
        torch.nn.Linear(256, temp_class),
        torch.nn.Softmax(),
    ).cuda()

    optimizer = torch.optim.Adam([{
        'params': backbone.parameters()
    }, {
        'params': linear_layer.parameters()
    }, {
        'params': cls_head.parameters()
    }],
                                 lr=opt.learning_rate)

    CE = torch.nn.CrossEntropyLoss()

    early_stopping = EarlyStopping(
        opt.patience,
        verbose=True,
        checkpoint_pth='{}/backbone_best.tar'.format(opt.ckpt_dir))

    torch.save(backbone.state_dict(),
               '{}/backbone_init.tar'.format(opt.ckpt_dir))

    best_acc = 0
    best_epoch = 0
    acc_epoch_cls = 0

    print('Semi-supervised Train')
    for epoch in range(opt.epochs):
        backbone.train()
        linear_layer.train()
        cls_head.train()

        acc_epoch = 0
        acc_epoch_cls = 0
        loss_epoch_label = 0
        loss_epoch_unlabel = 0
        loss = 0

        for i, data_labeled in enumerate(trainloader_label):
            optimizer.zero_grad()

            # labeled sample
            (x, target) = data_labeled
            x = x.cuda()
            target = target.cuda()
            output = backbone(x)
            output = linear_layer(output)
            loss_label = CE(output, target)
            loss_epoch_label += loss_label.item()

            loss_label.backward()
            optimizer.step()

            # estimate the accuracy
            prediction = output.argmax(-1)
            correct = prediction.eq(target.view_as(prediction)).sum()
            accuracy = (100.0 * correct / len(target))
            acc_epoch += accuracy.item()

        for i, data_unlabeled in enumerate(trainloader_unlabel):
            optimizer.zero_grad()

            # unlabeled time piece
            (x_piece1, x_piece2, target_temp, _) = data_unlabeled
            x_piece1 = torch.cat(x_piece1, 0).cuda()
            x_piece2 = torch.cat(x_piece2, 0).cuda()
            target_temp = torch.cat(target_temp, 0).cuda()
            features_cut0 = backbone(x_piece1)
            features_cut1 = backbone(x_piece2)
            features_cls = torch.cat([features_cut0, features_cut1], 1)
            c_output = cls_head(features_cls)
            correct_cls, length_cls = run_test(c_output, target_temp)
            loss_unlabel = CE(c_output, target_temp)
            loss_unlabel.backward()
            optimizer.step()

            loss_epoch_unlabel += loss_unlabel.item()

            accuracy_cls = 100. * correct_cls / length_cls
            acc_epoch_cls += accuracy_cls.item()

        acc_epoch /= len(trainloader_label)
        acc_epoch_cls /= len(trainloader_unlabel)
        loss_epoch_label /= len(trainloader_label)
        loss_epoch_unlabel /= len(trainloader_unlabel)

        print('[Train-{}][{}] loss_label: {:.5f}; \tloss_unlabel: {:.5f}; \t Acc label: {:.2f}% \t Acc unlabel: {:.2f}%' \
              .format(epoch + 1, opt.model_name, loss_epoch_label, loss_epoch_unlabel, acc_epoch, acc_epoch_cls))

        acc_vals = list()
        acc_tests = list()
        backbone.eval()
        linear_layer.eval()
        with torch.no_grad():
            for i, (x, target) in enumerate(val_loader_lineval):
                x = x.cuda()
                target = target.cuda()

                output = backbone(x).detach()
                output = linear_layer(output)
                # estimate the accuracy
                prediction = output.argmax(-1)
                correct = prediction.eq(target.view_as(prediction)).sum()
                accuracy = (100.0 * correct / len(target))
                acc_vals.append(accuracy.item())

            val_acc = sum(acc_vals) / len(acc_vals)
            if val_acc >= best_acc:
                best_acc = val_acc
                best_epoch = epoch
                for i, (x, target) in enumerate(test_loader_lineval):
                    x = x.cuda()
                    target = target.cuda()

                    output = backbone(x).detach()
                    output = linear_layer(output)
                    # estimate the accuracy
                    prediction = output.argmax(-1)
                    correct = prediction.eq(target.view_as(prediction)).sum()
                    accuracy = (100.0 * correct / len(target))
                    acc_tests.append(accuracy.item())

                test_acc = sum(acc_tests) / len(acc_tests)

        print('[Test-{}] Val ACC:{:.2f}%, Best Test ACC.: {:.2f}% in Epoch {}'.
              format(epoch, val_acc, test_acc, best_epoch))
        early_stopping(val_acc, backbone)
        if early_stopping.early_stop:
            print("Early stopping")
            break
    torch.save(backbone.state_dict(),
               '{}/backbone_last.tar'.format(opt.ckpt_dir))

    return test_acc, best_epoch
Ejemplo n.º 29
0
    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

    ### PARAMETERS ###

    # LSTM & Q Learning
    IMAGE_SCALE = 28
    IMAGE_SIZE = IMAGE_SCALE*IMAGE_SCALE
    HIDDEN_LAYERS = 1
    HIDDEN_NODES = 200
    OUTPUT_CLASSES = args.class_vector_size
    ##################

    train_transform = transforms.Compose([
        transforms.Resize((IMAGE_SCALE, IMAGE_SCALE)),
        transforms.ToTensor()
    ])
    test_transform = transforms.Compose([
        transforms.Resize((IMAGE_SCALE, IMAGE_SCALE)),
        transforms.ToTensor()
    ])

    print("Loading trainingsets...")
    omniglot_loader = loader.OmniglotLoader('data/omniglot', classify=False, partition=0.8, classes=True)
    train_loader = torch.utils.data.DataLoader(
        OMNIGLOT('data/omniglot', train=True, transform=train_transform, download=True, omniglot_loader=omniglot_loader, batch_size=args.episode_size),
        batch_size=args.mini_batch_size, shuffle=True, **kwargs)
    print("Loading testset...")
    test_loader = torch.utils.data.DataLoader(
        OMNIGLOT('data/omniglot', train=False, transform=test_transform, omniglot_loader=omniglot_loader, batch_size=args.episode_size),
        batch_size=args.mini_batch_size, shuffle=True, **kwargs)
Ejemplo n.º 30
0
def supervised_train2(x_train, y_train, x_val, y_val, x_test, y_test, nb_class,
                      opt):
    # construct data loader
    # Those are the transformations used in the paper
    prob = 0.2  # Transform Probability
    cutout = transforms_ts.Cutout(sigma=0.1, p=prob)
    jitter = transforms_ts.Jitter(sigma=0.2, p=prob)  # CIFAR10
    scaling = transforms_ts.Scaling(sigma=0.4, p=prob)
    magnitude_warp = transforms_ts.MagnitudeWrap(sigma=0.3, knot=4, p=prob)
    time_warp = transforms_ts.TimeWarp(sigma=0.2, knot=8, p=prob)
    window_slice = transforms_ts.WindowSlice(reduce_ratio=0.8, p=prob)
    window_warp = transforms_ts.WindowWarp(window_ratio=0.3,
                                           scales=(0.5, 2),
                                           p=prob)

    transforms_list = {
        'jitter': [jitter],
        'cutout': [cutout],
        'scaling': [scaling],
        'magnitude_warp': [magnitude_warp],
        'time_warp': [time_warp],
        'window_slice': [window_slice],
        'window_warp': [window_warp],
        'G0': [jitter, magnitude_warp, window_slice],
        'G1': [jitter, time_warp, window_slice],
        'G2': [jitter, time_warp, window_slice, window_warp, cutout],
        'none': []
    }

    transforms_targets = list()
    for name in opt.aug_type:
        for item in transforms_list[name]:
            transforms_targets.append(item)

    train_transform = transforms_ts.Compose(transforms_targets +
                                            [transforms_ts.ToTensor()])
    transform_lineval = transforms.Compose([transforms.ToTensor()])

    train_set_lineval = UCR2018(data=x_train,
                                targets=y_train,
                                transform=train_transform)
    val_set_lineval = UCR2018(data=x_val,
                              targets=y_val,
                              transform=transform_lineval)
    test_set_lineval = UCR2018(data=x_test,
                               targets=y_test,
                               transform=transform_lineval)

    train_loader_lineval = torch.utils.data.DataLoader(train_set_lineval,
                                                       batch_size=128,
                                                       shuffle=True)
    val_loader_lineval = torch.utils.data.DataLoader(val_set_lineval,
                                                     batch_size=128,
                                                     shuffle=False)
    test_loader_lineval = torch.utils.data.DataLoader(test_set_lineval,
                                                      batch_size=128,
                                                      shuffle=False)

    # loading the saved backbone
    backbone_lineval = SimConv4().cuda()  # defining a raw backbone model

    # 64 are the number of output features in the backbone, and 10 the number of classes
    linear_layer = torch.nn.Linear(opt.feature_size, nb_class).cuda()
    optimizer = torch.optim.Adam([{
        'params': backbone_lineval.parameters()
    }, {
        'params': linear_layer.parameters()
    }],
                                 lr=opt.learning_rate)

    CE = torch.nn.CrossEntropyLoss()

    early_stopping = EarlyStopping(
        opt.patience,
        verbose=True,
        checkpoint_pth='{}/backbone_best.tar'.format(opt.ckpt_dir))

    torch.save(backbone_lineval.state_dict(),
               '{}/backbone_init.tar'.format(opt.ckpt_dir))

    best_acc = 0
    best_epoch = 0

    print('Supervised Train')
    for epoch in range(opt.epochs):
        backbone_lineval.train()
        linear_layer.train()

        acc_trains = list()
        for i, (data, target) in enumerate(train_loader_lineval):
            optimizer.zero_grad()
            data = data.cuda()
            target = target.cuda()

            output = backbone_lineval(data)
            output = linear_layer(output)
            loss = CE(output, target)
            loss.backward()
            optimizer.step()
            # estimate the accuracy
            prediction = output.argmax(-1)
            correct = prediction.eq(target.view_as(prediction)).sum()
            accuracy = (100.0 * correct / len(target))
            acc_trains.append(accuracy.item())

        print('[Train-{}][{}] loss: {:.5f}; \t Acc: {:.2f}%' \
              .format(epoch + 1, opt.model_name, loss.item(), sum(acc_trains) / len(acc_trains)))

        acc_vals = list()
        acc_tests = list()
        backbone_lineval.eval()
        linear_layer.eval()
        with torch.no_grad():
            for i, (data, target) in enumerate(val_loader_lineval):
                data = data.cuda()
                target = target.cuda()

                output = backbone_lineval(data).detach()
                output = linear_layer(output)
                # estimate the accuracy
                prediction = output.argmax(-1)
                correct = prediction.eq(target.view_as(prediction)).sum()
                accuracy = (100.0 * correct / len(target))
                acc_vals.append(accuracy.item())

            val_acc = sum(acc_vals) / len(acc_vals)
            if val_acc >= best_acc:
                best_acc = val_acc
                best_epoch = epoch
                for i, (data, target) in enumerate(test_loader_lineval):
                    data = data.cuda()
                    target = target.cuda()

                    output = backbone_lineval(data).detach()
                    output = linear_layer(output)
                    # estimate the accuracy
                    prediction = output.argmax(-1)
                    correct = prediction.eq(target.view_as(prediction)).sum()
                    accuracy = (100.0 * correct / len(target))
                    acc_tests.append(accuracy.item())

                test_acc = sum(acc_tests) / len(acc_tests)

        print('[Test-{}] Val ACC:{:.2f}%, Best Test ACC.: {:.2f}% in Epoch {}'.
              format(epoch, val_acc, test_acc, best_epoch))
        early_stopping(val_acc, backbone_lineval)
        if early_stopping.early_stop:
            print("Early stopping")
            break
    torch.save(backbone_lineval.state_dict(),
               '{}/backbone_last.tar'.format(opt.ckpt_dir))

    return test_acc, best_epoch