예제 #1
0
    def _get_data_loader(self, config, names=None):
        trs = self.__transformations
        if names is None:
            train_names, val_names = self._split_data(self.__train_dir,
                                                      config.train_percent)
        else:
            train_names, val_names = names

        train_folder = ImageFolder(self.__train_dir,
                                   train_names,
                                   transform=trs['train'])
        val_folder = ImageFolder(self.__train_dir,
                                 val_names,
                                 transform=trs['val'])
        if not len(train_folder) or not len(val_folder):
            raise ValueError('One of the image folders contains zero data, train: %s, val: %s' % \
                              (len(train_folder), len(val_folder)))

        sampler = None
        train_loader = torch.utils.data.DataLoader(
            train_folder,
            batch_size=config.batch_size,
            shuffle=True,
            sampler=sampler,
            num_workers=config.workers,
            pin_memory=True)
        val_loader = torch.utils.data.DataLoader(val_folder,
                                                 batch_size=config.batch_size,
                                                 shuffle=True,
                                                 num_workers=config.workers,
                                                 pin_memory=True)

        return train_loader, val_loader
예제 #2
0
    def _get_data_loader(self, config, names=None):
        transformations = self.__transform_func()
        if names is None:
            train_names, val_names = self._split_data(self.__train_dir, config.train_percent)
        else:
            train_names, val_names = names

        loader = self.__get_loader(config)

        train_folder = ImageFolder(self.__label_file, self.__train_dir, train_names, transform=transformations['train'],
                                   loader=loader)
        val_folder = ImageFolder(self.__label_file, self.__train_dir, val_names, transform=transformations['val'],
                                 loader=loader)
        if not len(train_folder) or not len(val_folder):
            raise ValueError, 'One of the image folders contains zero data, train: %s, val: %s' % \
                              (len(train_folder), len(val_folder))

        sampler = None
        if config.weigh_sample:
            sampler = WeightedRandomSampler(train_folder.weights, len(train_folder), replacement=True)

        train_loader = torch.utils.data.DataLoader(train_folder, batch_size=config.batch_size, shuffle=True,
                                                   sampler=sampler, num_workers=config.workers, pin_memory=True)
        val_loader = torch.utils.data.DataLoader(val_folder, batch_size=config.batch_size, shuffle=True,
                                                 num_workers=config.workers, pin_memory=True)

        return train_loader, val_loader
예제 #3
0
def get_loader(root,
               split,
               batch_size,
               scale_size,
               num_workers=2,
               shuffle=True):
    dataset_name = os.path.basename(root)
    image_root = os.path.join(root, 'splits', split)

    if dataset_name in ['CelebA']:
        dataset = ImageFolder(
            root=image_root,
            transform=transforms.Compose([
                transforms.CenterCrop(160),
                transforms.Scale(scale_size),
                transforms.ToTensor(),
                #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ]))
    else:
        dataset = ImageFolder(
            root=image_root,
            transform=transforms.Compose([
                transforms.Scale(scale_size),
                transforms.ToTensor(),
                #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ]))

    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=batch_size,
                                              shuffle=shuffle,
                                              num_workers=int(num_workers))
    data_loader.shape = [int(num) for num in dataset[0][0].size()]

    return data_loader
예제 #4
0
    def __write_submission(self, res, thresholds, output_file=None, detailed_output_file=None):
        """Write final result into csv"""
        def sort_arg(filename):
            base, number = filename.split('_')
            number = int(number[:-4])
            return base, number

        mapping = {i:k for i, k in enumerate(ImageFolder(self.__label_file, self.__train_dir).classes)}
        logger.info(str(mapping))

        output_file = output_file or self.__output_file.format(self.__cur_fold)
        detailed_output_file = detailed_output_file or self.__detailed_output_file.format(self.__cur_fold)

        with open(output_file, 'w') as wf, open(detailed_output_file, 'w') as dwf:
            dwf.write(json.dumps({mapping[i]:v for i, v in enumerate(thresholds)}) + '\n')
            wf.write('image_name,tags\n')
            dwf.write('image_name,probs\n')
            for file_name, probs in sorted(res, key=lambda x: sort_arg(x[0])):
                name = file_name[:-4]
                labels, detailed_enc = [], []
                for i, prob in enumerate(probs):
                    if prob > thresholds[i]:
                        labels.append(mapping[i])
                    detailed_enc.append('%s:%.5f' % (mapping[i], prob))
                wf.write(','.join([name, ' '.join(labels)]) + '\n')
                dwf.write(','.join([name, ' '.join(detailed_enc)]) + '\n')
예제 #5
0
def test_model(args):
    # create model
    model = dla.__dict__[args.arch](pretrained=args.pretrained,
                                    pool_size=args.crop_size // 32)
    model = torch.nn.DataParallel(model)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {} prec {:.03f}) "
                  .format(args.resume, checkpoint['epoch'], best_prec1))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    data = dataset.get_data(args.data_name)
    if data is None:
        data = dataset.load_dataset_info(args.data, data_name=args.data_name)
    if data is None:
        raise ValueError('{} is not pre-defined in dataset.py and info.json '
                         'does not exist in {}', args.data_name, args.data)
    # Data loading code
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=data.mean, std=data.std)

    if args.crop_10:
        t = transforms.Compose([
            transforms.Resize(args.scale_size),
            transforms.ToTensor(),
            normalize])
    else:
        t = transforms.Compose([
            transforms.Resize(args.scale_size),
            transforms.CenterCrop(args.crop_size),
            transforms.ToTensor(),
            normalize])
    val_loader = torch.utils.data.DataLoader(
        ImageFolder(valdir, t, out_name=args.crop_10),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    # define loss function (criterion) and pptimizer
    criterion = nn.CrossEntropyLoss()

    if args.cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    if args.crop_10:
        validate_10(args, val_loader, model,
                    '{}_i_{}_c_10.txt'.format(args.arch, args.start_epoch))
    else:
        validate(args, val_loader, model, criterion)
예제 #6
0
def get_groundtruth(permitted):
    folder = ImageFolder(LABEL_FILE, TRAIN_FOLDER_TIF, set(permitted))
    truth = map(lambda x: (x[0].split('.')[0], x[1].numpy()), folder.imgs)
    truth.sort()

    names, data = zip(*truth)
    data = np.array(data)

    return data
def init_data_loader(config, num_processes=4, path_feature=None):
    if os.path.exists(os.path.join(config["batches_dir"],
                                   "class_mapping.json")):
        import json
        with open(os.path.join(config["batches_dir"], "class_mapping.json"),
                  "r") as f:
            obj = json.loads(f.read())
            config["num_labels"] = len(obj)
    else:
        # The number of labels is the number of dirs in batches_dir
        label_dirs = [
            p for p in os.listdir(config["batches_dir"])
            if os.path.isdir(os.path.join(config["batches_dir"], p))
        ]
        config["num_labels"] = len(label_dirs)

    # init dataset
    logging.info("Initializing data loader, this might take a while.....")
    all_transforms = _init_transforms(config["img_h"], config["img_w"],
                                      config["data_augmentation"])

    if path_feature is not None:
        train_dataset = ImageFolderWithFeature(config["batches_dir"],
                                               path_feature,
                                               transform=all_transforms)
    else:
        train_dataset = ImageFolder(config["batches_dir"],
                                    transform=all_transforms)

    # init data loader
    # configure sampling strategy
    class_balanced_sampling = config["tri_loss_params"]["margin"] > 0 or \
        config["batch_sampling_params"]["class_balanced"]
    if class_balanced_sampling:
        logging.info("Using class_balanced sampling strategy.")

    # construct data loader
    batch_size = config["batch_size"] if (
        not class_balanced_sampling) else None
    shuffle = not class_balanced_sampling
    sampler = TripletSampler(config["batch_sampling_params"], train_dataset) \
        if class_balanced_sampling else None
    data_loader = torch.utils.data.DataLoader(train_dataset,
                                              shuffle=shuffle,
                                              batch_size=batch_size,
                                              num_workers=num_processes,
                                              batch_sampler=sampler)

    # log training set info
    count = len(train_dataset)
    iterations_per_epoch = len(data_loader)
    logging.info(
        "[TRAINING SET INFO] number of example: %s, number of labels: %s, "
        "iterations_per_epoch: %s" %
        (count, config["num_labels"], iterations_per_epoch))
    return data_loader
def store_delf_feature(cfg):
    # logging
    current_time = datetime.datetime.strftime(datetime.datetime.now(),'%Y-%m-%d %H:%M:%S')
    log_save_path = os.path.join(cfg.log_root, cfg.stage)
    os.makedirs(log_save_path, exist_ok=True)
    f = open(log_save_path+'/{}-train-{}.log'.format(current_time, cfg.stage), mode='w', encoding='utf-8')
    f.close()
    logging.basicConfig(format='%(asctime)s - %(message)s',
                        datefmt='%d-%b-%y %H:%M:%S',
                        level=logging.INFO,
                        filename=log_save_path+'/{}-train.log'.format(current_time))

    print('loading dataset...')
    dataset = ImageFolder(root=cfg.index_img, transform=transforms.ToTensor())
    dataloader = data.DataLoader(dataset=dataset, batch_size=1, shuffle=True, num_workers=2)
    print('dataset load done.')
    # model construct
    print('model construct...')
    model = get_delf_feature(cfg.kp_path)
    if torch.cuda.is_available():
        model = model.cuda()
    model.eval()
    print('model load done.')

    # pca trained params
    h5file = h5py.File(os.path.join(cfg.pca_saved, 'pca.h5'), 'r')
    pca_mean = copy.deepcopy(h5file['.']['pca_mean'].value)
    pca_var = copy.deepcopy(h5file['.']['pca_vars'].value)
    pca_matrix = copy.deepcopy(h5file['.']['pca_matrix'].value)
    # delf_pca = DelfPCA(pca_n_components=cfg.pca_dims, whitening=True, pca_saved_path=cfg.pca_saved)

    delf_features = []
    print('delf attention feature extracting...')
    pbar = tqdm(dataloader)
    for index, (inputs, _, filename) in enumerate(pbar):
        if torch.cuda.is_available():
            inputs = inputs.cuda()
        try:
            delf_feature = getDelfFeatureFromMultiScale(stage='delf', inputs=inputs, model=model, filename=filename, \
                                                pca_matrix=pca_matrix, pca_mean = pca_mean, pca_var=pca_var, pca_dims=cfg.pca_dims, rf=cfg.delf_rf, \
                                                stride=cfg.delf_stride, padding=cfg.delf_padding,topk=cfg.topk, scales=cfg.scales, \
                                                iou_thresh=cfg.iou_thres, attn_thres=cfg.atte_thres)
        except Exception as e:
            print(e)
            delf_feature = None
        if delf_feature != None:
            delf_features.append(delf_feature)
        msg = "image name: {}".format(filename[0])
        pbar.set_description(desc=msg)
        logging.info(msg)
    print('delf features get done.')
    with open(os.path.join(cfg.delf_saved, 'index.delf'), 'wb') as delf_file:
        pickle.dump(delf_features, delf_file, protocol=2)
    print('saved DeLF feature at {}'.format(os.path.join(cfg.delf_saved, 'index.delf')))
예제 #9
0
파일: main.py 프로젝트: sayano-lee/mmc_ocr
def filter():
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    dataset = ImageFolder(root=args.icdar_patches, transform=transform)
    loader = torch.utils.data.DataLoader(dataset, batch_size=10, shuffle=True)

    model = Extractor()
    for params in model.parameters():
        params.requires_grad = False
    acc = clustering(loader=loader, model=model)
def get_loader(root,
               split,
               labels_per_class,
               batch_size,
               scale_size,
               num_workers=2,
               shuffle=True):

    image_root = os.path.join(root, 'splits', split)

    dataset = ImageFolder(
        root=image_root,
        transform=transforms.Compose([
            transforms.CenterCrop(160),
            transforms.Scale(scale_size),
            transforms.ToTensor(),
            #transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
            #transforms.Normalize((0.51, 0.42, 0.37), (0.27, 0.27, 0.27))
        ]))  ## TODO add target_transform=onehot(n_labels)??

    if split == 'train':
        if labels_per_class != None:
            data_loader = torch.utils.data.DataLoader(
                dataset,
                batch_size=batch_size,
                shuffle=shuffle,
                num_workers=int(num_workers),
                sampler=get_sampler(dataset.train_labels.numpy(),
                                    labels_per_class))
        else:
            data_loader = torch.utils.data.DataLoader(
                dataset,
                batch_size=batch_size,
                shuffle=shuffle,
                num_workers=int(num_workers))
    else:
        data_loader = torch.utils.data.DataLoader(dataset,
                                                  batch_size=batch_size,
                                                  shuffle=shuffle,
                                                  num_workers=int(num_workers))

    data_loader.shape = [int(num) for num in dataset[0][0].size()]

    return data_loader
예제 #11
0
def main_worker(args):
    args.device = torch.device(
        'cuda:{}'.format(args.gpu) if torch.cuda.is_available() else 'cpu')

    if args.pretrained:
        print('=> using pre-trained model')
        model = init_encoder_model(args.embed_size, args.pretrained)
    else:
        print('=> Pretrained model not specified.')
        return

    model = model.to(args.device)

    e_weights = torch.FloatTensor(train_label_emb())
    label_model = nn.Embedding.from_pretrained(e_weights)
    label_model = label_model.to(args.device)

    # Data loading code
    print("Initializing Datasets and Dataloaders...")

    transformer = transforms.Compose([
        transforms.Resize(args.input_size),
        transforms.CenterCrop(args.input_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    # Create training and validation datasets
    image_datasets = ImageFolder(os.path.join(args.data, 'test'), transformer)
    # Create training and validation dataloaders
    dataloaders = torch.utils.data.DataLoader(image_datasets,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=4)

    print('Evaluating')
    validate(dataloaders, model, label_model, args)
    print('Finished')
예제 #12
0
파일: svm.py 프로젝트: sayano-lee/mmc_ocr
def train(loader, model, clf):
    print("============>training<============")
    test_dataset = ImageFolder(root="./data/icdar2015/test_patches",
                               transform=transforms)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=20,
                                              shuffle=True)
    pbar = tqdm(total=len(loader))
    for cnt, data in enumerate(loader):
        pbar.update(1)
        im, ann, im_fns = data[0], data[1], data[2]
        im = im.cuda()
        feat = model(im)
        x = feat.cpu().numpy()
        y = ann.numpy()
        try:
            clf.fit(x, y)
        except ValueError:
            continue

    pbar.close()
    print("\n")
    print("============>testing<============")
    test(test_loader, clf)
예제 #13
0
def train_pca(cfg):

    # logging
    current_time = datetime.datetime.strftime(datetime.datetime.now(),
                                              '%Y-%m-%d %H:%M:%S')
    log_save_path = os.path.join(cfg.log_root, cfg.stage)
    os.makedirs(log_save_path, exist_ok=True)
    f = open(log_save_path + '/{}-train-pca.log'.format(current_time),
             mode='w',
             encoding='utf-8')
    f.close()
    logging.basicConfig(format='%(asctime)s - %(message)s',
                        datefmt='%d-%b-%y %H:%M:%S',
                        level=logging.INFO,
                        filename=log_save_path +
                        '/{}-train.log'.format(current_time))

    print('loading dataset...')
    dataset = ImageFolder(root=cfg.index_img, transform=transforms.ToTensor())
    dataloader = data.DataLoader(dataset=dataset,
                                 batch_size=1,
                                 shuffle=True,
                                 num_workers=2)
    print('dataset load done.')
    # model construct
    print('model construct...')
    model = get_delf_feature(cfg.kp_path)
    if torch.cuda.is_available():
        model = model.cuda()
    model.eval()
    print('model load done.')

    delf_pca = DelfPCA(pca_n_components=cfg.pca_dims,
                       pca_saved_path=cfg.pca_saved)

    feature_maps = []
    print('delf attention feature extracting...')
    features_num = 0
    pbar = tqdm(dataloader)
    for (inputs, _, filename) in pbar:
        if torch.cuda.is_available():
            inputs = inputs.cuda()
        try:
            output = getDelfFeatureFromMultiScale(stage='pca', inputs=inputs, model=model, filename=filename, \
                                                pca_matrix=None, pca_var=None, pca_dims=cfg.pca_dims, rf=cfg.delf_rf, \
                                                stride=cfg.delf_stride, padding=cfg.delf_padding,topk=cfg.topk, scales=cfg.scales, \
                                                iou_thresh=cfg.iou_thres, attn_thres=cfg.atte_thres)
        except Exception as e:
            print(e)

        descriptor_np_list = output['descriptor_np_list']
        pca_feature = [
            descriptor_np_list[i, :]
            for i in range(descriptor_np_list.shape[0])
        ]  # 将ndarray变成列表
        feature_maps.extend(pca_feature)

        curr_fea_nums = descriptor_np_list.shape[0]
        features_num += curr_fea_nums
        msg = "curr feature nums: {}".format(features_num)
        pbar.set_description(desc=msg)
        logging.info(filename[0] + "-feature nums: {}".format(curr_fea_nums))

    print('features get done. Total feature nums: {}'.format(features_num))
    print('start train pca...')
    delf_pca(feature_maps)
    print('pca trained done. saved at {}'.format(cfg.pca_saved + 'pca.h5'))
예제 #14
0
    def extract(self, input_path, output_path):
        '''extract features from single image without batch process.
        '''
        assert self.mode.lower() in ['pca', 'delf']
        batch_timer = AverageMeter()
        data_timer = AverageMeter()
        since = time.time()
        
        # dataloader.
        dataset = ImageFolder(
            root = input_path,
            transform = transforms.ToTensor())
        self.dataloader = torch.utils.data.DataLoader(
            dataset = dataset,
            batch_size = 1,
            shuffle = True,
            num_workers = 0)
        feature_maps = []
        if self.mode.lower() in ['pca']:
            # bar = Bar('[{}]{}'.format(self.mode.upper(), self.title), max=len(self.dataloader))
            for batch_idx, (inputs, _, filename) in enumerate(self.dataloader):
                # image size upper limit.
                if not (len(inputs.size()) == 4):
                    if __DEBUG__:
                        print('wrong input dimenstion! ({},{})'.format(filename, input.size()))
                    continue;
                if not (inputs.size(2)*inputs.size(3) <= 1200*1200):
                    if __DEBUG__:
                        print('passed: image size too large! ({},{})'.format(filename, inputs.size()))
                    continue;
                if not (inputs.size(2) >= 112 and inputs.size(3) >= 112):
                    if __DEBUG__:
                        print('passed: image size too small! ({},{})'.format(filename, inputs.size()))
                    continue;
                
                data_timer.update(time.time() - since)
                # prepare inputs
                if __is_cuda__():
                    inputs = __cuda__(inputs)
                inputs = __to_var__(inputs)
                
                # get delf feature only for pca calculation.
                pca_feature = self.__extract_delf_feature__(inputs.data, filename, mode='pca')
                if pca_feature is not None:
                    feature_maps.extend(pca_feature)
               
                batch_timer.update(time.time() - since)
                since = time.time()
            
                # progress
                log_msg  = ('\n[Extract][Processing:({batch}/{size})] '+ \
                            'eta: (data:{data:.3f}s),(batch:{bt:.3f}s),(total:{tt:})') \
                .format(
                    batch=batch_idx + 1,
                    size=len(self.dataloader),
                    data=data_timer.val,
                    bt=batch_timer.val,
                    tt=0)#bar.elapsed_td
                print(log_msg)
                # bar.next()
                print('\nnumber of selected features so far: {}'.format(len(feature_maps)))
                if len(feature_maps) >= 10000000:        # UPPER LIMIT.
                    break;
                
                # free GPU cache every.
                if batch_idx % 10 == 0:
                    torch.cuda.empty_cache()
                    if __DEBUG__:
                        print('GPU Memory flushed !!!!!!!!!')

            # trian PCA.
            self.pca(feature_maps)
        
        else:
            # bar = Bar('[{}]{}'.format(self.mode.upper(), self.title), max=len(self.dataloader))
            assert self.mode.lower() in ['delf']
            feature_maps = []
            for batch_idx, (inputs, labels, filename) in enumerate(self.dataloader):
                # image size upper limit.
                if not (len(inputs.size()) == 4):
                    if __DEBUG__:
                        print('wrong input dimenstion! ({},{})'.format(filename, input.size()))
                    continue;
                if not (inputs.size(2)*inputs.size(3) <= 1200*1200):
                    if __DEBUG__:
                        print('passed: image size too large! ({},{})'.format(filename, inputs.size()))
                    continue;
                if not (inputs.size(2) >= 112 and inputs.size(3) >= 112):
                    if __DEBUG__:
                        print('passed: image size too small! ({},{})'.format(filename, inputs.size()))
                    continue;
                
                data_timer.update(time.time() - since)
                # prepare inputs
                source_input = inputs
                if __is_cuda__():
                    inputs = __cuda__(inputs)
                inputs = __to_var__(inputs)
                    
                # get delf everything (score, feature, etc.)
                delf_feature = self.__extract_delf_feature__(inputs.data, filename, mode='delf')

                # if delf_feature is not None:
                #     feature_maps.append(delf_feature)
               
                # log.
                batch_timer.update(time.time() - since)
                since = time.time()
                log_msg  = ('\n[Extract][Processing:({batch}/{size})] '+ \
                            'eta: (data:{data:.3f}s),(batch:{bt:.3f}s),(total:{tt:})') \
                .format(
                    batch=batch_idx + 1,
                    size=len(self.dataloader),
                    data=data_timer.val,
                    bt=batch_timer.val,
                    tt=0)#bar.elapsed_td
                print(log_msg)
                # bar.next()
                
                # free GPU cache every.
                if batch_idx % 10 == 0:
                    torch.cuda.empty_cache()
                    if __DEBUG__:
                        print('GPU Memory flushed !!!!!!!!!')
                
                save_path = "/media/liesmars/SSD1/vearch/plugin/src/streetView/DeLF-pytorch/extract/output"
                img_path = "/media/liesmars/SSD1/vearch/plugin/src/streetView/DeLF-pytorch/extract/testdata/_9oMPkzl60LSzvxIlG2FZA(22.279587,114.167117)"
                if True:
                    # for b in range(len(inputs)):
                    imgname = delf_feature["filename"][0]
                    # img = source_input.squeeze().numpy()*255
                    # img = img.astype(np.uint8)

                    img=cv2.imread(os.path.join(img_path, imgname))
                    # print(img)
                    
                    # cv2.imshow('image', img)
                    # cv2.waitKey(100)
                    feature_loc = delf_feature["location_np_list"]
      
                    for loc in feature_loc:
                        # cv2.circle(img, )
                        img = cv2.circle(img,(int(loc[0]),int(loc[1])),2,(0,0,230),-1)
                    
                    cv2.imwrite(os.path.join(save_path, imgname), img)
                    print("save")
예제 #15
0
    def __init__(self, params):
        self.params = params
        # specify the gpu id if using only 1 gpu
        if params.ngpu == 1:
            os.environ['CUDA_VISIBLE_DEVICES'] = str(params.gpu_id)
        try:
            os.makedirs(params.outf)
        except OSError:
            pass
        if params.manualSeed is None:
            params.manualSeed = random.randint(1, 10000)
        print("Random Seed: ", params.manualSeed)
        random.seed(params.manualSeed)
        torch.manual_seed(params.manualSeed)
        if params.cuda:
            torch.cuda.manual_seed_all(params.manualSeed)

        cudnn.benchmark = True

        if torch.cuda.is_available() and not params.cuda:
            print(
                "WARNING: You have a CUDA device, so you should probably run with --cuda"
            )

        # datase t
        if params.dataset == 'imagenet':
            # folder dataset
            self.dataset = ImageFolder(
                root=params.dataroot,
                transform=transforms.Compose([
                    transforms.Scale(params.imageSize),
                    transforms.CenterCrop(params.imageSize),
                    transforms.ToTensor(),
                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                ]),
                classes_idx=(10, 20))
        elif params.dataset == 'cifar10':
            self.dataset = dset.CIFAR10(root=params.dataroot,
                                        download=True,
                                        transform=transforms.Compose([
                                            transforms.Scale(params.imageSize),
                                            transforms.ToTensor(),
                                            transforms.Normalize(
                                                (0.5, 0.5, 0.5),
                                                (0.5, 0.5, 0.5)),
                                        ]))
        else:
            raise NotImplementedError("No such dataset {}".format(
                params.dataset))

        assert self.dataset
        self.dataloader = torch.utils.data.DataLoader(
            self.dataset,
            batch_size=params.batchSize,
            shuffle=True,
            num_workers=int(params.workers))

        # some hyper parameters
        self.ngpu = int(params.ngpu)
        self.nz = int(params.nz)
        self.ngf = int(params.ngf)
        self.ndf = int(params.ndf)
        self.num_classes = int(params.num_classes)
        self.nc = 3

        # Define the generator and initialize the weights
        if params.dataset == 'imagenet':
            self.netG = _netG(self.ngpu, self.nz)
        else:
            self.netG = _netG_CIFAR10(self.ngpu, self.nz)
        self.netG.apply(weights_init)
        if params.netG != '':
            self.netG.load_state_dict(torch.load(params.netG))
        print(self.netG)
        # Define the discriminator and initialize the weights
        if params.dataset == 'imagenet':
            self.netD = _netD(self.ngpu, self.num_classes)
        else:
            self.netD = _netD_CIFAR10(self.ngpu, self.num_classes)
        self.netD.apply(weights_init)
        if params.netD != '':
            self.netD.load_state_dict(torch.load(params.netD))
        print(self.netD)
        # loss functions
        self.dis_criterion = nn.BCELoss()
        self.aux_criterion = nn.NLLLoss()

        # tensor placeholders
        self.input = torch.FloatTensor(params.batchSize, 3, params.imageSize,
                                       params.imageSize)
        self.noise = torch.FloatTensor(params.batchSize, self.nz, 1, 1)
        self.eval_noise = torch.FloatTensor(params.batchSize, self.nz, 1,
                                            1).normal_(0, 1)
        self.dis_label = torch.FloatTensor(params.batchSize)
        self.aux_label = torch.LongTensor(params.batchSize)
        self.real_label = 1
        self.fake_label = 0

        # if using cuda
        if params.cuda:
            self.netD.cuda()
            self.netG.cuda()
            self.dis_criterion.cuda()
            self.aux_criterion.cuda()
            self.input, self.dis_label, self.aux_label = self.input.cuda(
            ), self.dis_label.cuda(), self.aux_label.cuda()
            self.noise, self.eval_noise = self.noise.cuda(
            ), self.eval_noise.cuda()

        # define variables
        self.input = Variable(self.input)
        self.noise = Variable(self.noise)
        self.eval_noise = Variable(self.eval_noise)
        self.dis_label = Variable(self.dis_label)
        self.aux_label = Variable(self.aux_label)
        # noise for evaluation
        self.eval_noise_ = np.random.normal(0, 1, (params.batchSize, self.nz))
        self.eval_label = np.random.randint(0, self.num_classes,
                                            params.batchSize)
        self.eval_onehot = np.zeros((params.batchSize, self.num_classes))
        self.eval_onehot[np.arange(params.batchSize), self.eval_label] = 1
        self.eval_noise_[np.arange(params.batchSize), :self.
                         num_classes] = self.eval_onehot[np.arange(
                             params.batchSize)]
        self.eval_noise_ = (torch.from_numpy(self.eval_noise_))
        self.eval_noise.data.copy_(
            self.eval_noise_.view(params.batchSize, self.nz, 1, 1))
        # setup optimizer
        self.optimizerD = optim.Adam(self.netD.parameters(),
                                     lr=params.lr,
                                     betas=(params.beta1, 0.999))
        self.optimizerG = optim.Adam(self.netG.parameters(),
                                     lr=params.lr,
                                     betas=(params.beta1, 0.999))
예제 #16
0
def main_worker(args):
    global best_acc1

    args.device = torch.device(
        'cuda:{}'.format(args.gpu) if torch.cuda.is_available() else 'cpu')

    if args.pretrained:
        print('=> using pre-trained model')
        model = init_encoder_model(args.embed_size, args.pretrained)
    else:
        print('=> creating model')
        model = init_encoder_model(args.embed_size, args.pretrained)
    model = model.to(args.device)

    # print("Params to learn:")
    # model_params = []
    # for name, param in dict(model.named_parameters()).items():
    #     if name.find("bias") > -1:
    #         print('Model Layer {} will be trained @ {}'.format(name, args.lr*2))
    #         model_params.append({'params': param, 'lr': args.lr*2, 'weight_decay': 0})
    #     else:
    #         print('Model Layer {} will be trained @ {}'.format(name, args.lr))
    #         model_params.append({'params': param, 'lr': args.lr, 'weight_decay': args.weight_decay})

    e_weights = torch.FloatTensor(train_label_emb())
    label_model = nn.Embedding.from_pretrained(e_weights)
    label_model = label_model.to(args.device)

    optimizer = optim.SGD(model.parameters(),
                          args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().to(args.device)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if args.gpu is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpu)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # Data loading code
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.RandomResizedCrop(args.input_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'val':
        transforms.Compose([
            transforms.Resize(args.input_size),
            transforms.CenterCrop(args.input_size),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
    }

    print("Initializing Datasets and Dataloaders...")

    # Create training and validation datasets
    image_datasets = {
        x: ImageFolder(os.path.join(args.data, x), data_transforms[x])
        for x in ['train', 'val']
    }
    # Create training and validation dataloaders
    dataloaders_dict = {
        x: torch.utils.data.DataLoader(image_datasets[x],
                                       batch_size=args.batch_size,
                                       shuffle=True,
                                       num_workers=4)
        for x in ['train', 'val']
    }

    if args.evaluate:
        validate(dataloaders_dict['val'], model, criterion, args)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(dataloaders_dict['train'], model, label_model, criterion,
              optimizer, epoch, args)

        # evaluate on validation set
        acc1 = validate(dataloaders_dict['val'], model, label_model, criterion,
                        args)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
            }, is_best)
예제 #17
0
############# DataLoader ######################
if opt.dataset == 'lsun':
    dataset = dset.LSUN(db_path=opt.dataroot,
                        classes=['bedroom_train'],
                        transform=transforms.Compose([
                            transforms.Scale(opt.imageSize),
                            transforms.CenterCrop(opt.imageSize),
                            transforms.ToTensor(),
                            transforms.Normalize((0.5, 0.5, 0.5),
                                                 (0.5, 0.5, 0.5)),
                        ]))
elif opt.dataset == 'CelebA':
    dataset = ImageFolder(root=opt.dataroot,
                          transform=transforms.Compose([
                              transforms.Resize(opt.imageSize),
                              transforms.CenterCrop(opt.imageSize),
                              transforms.ToTensor(),
                              transforms.Normalize((0.5, 0.5, 0.5),
                                                   (0.5, 0.5, 0.5)),
                          ]))
assert dataset
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batchSize,
                                         shuffle=True,
                                         num_workers=int(opt.workers))

nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = int(opt.nc)

예제 #18
0
args = parser.parse_args()

data_dir = os.path.join('.', args.dataset)

print(args.dataset)

train_transform = transforms.Compose([transforms.ToTensor()])

if args.dataset == "cifar10":
    train_set = torchvision.datasets.CIFAR10(root=data_dir, train=True, download=True, transform=train_transform)
    print(type(train_set.train_data))
    print(train_set.train_data.shape)
    print(train_set.__getitem__(0)[0].size())
    print(train_set.train_data.mean(axis=(0, 1, 2))/255)
    print(train_set.train_data.std(axis=(0, 1, 2))/255)
    train_set = ImageFolder("cifar10/images/train", transform=train_transform)
    image_tensor = torch.stack([x for x, y in train_set])
    image_tensor = image_tensor.numpy()
    print(image_tensor.shape)
    print(train_set.__getitem__(0)[0].size())
    print(image_tensor.mean(axis=(0, 2, 3)))
    print(image_tensor.std(axis=(0, 2, 3)))

elif args.dataset == "cifar100":
    train_set = torchvision.datasets.CIFAR100(root=data_dir, train=True, download=True, transform=train_transform)
    print(train_set.train_data.shape)
    print(np.mean(train_set.train_data, axis=(0, 1, 2))/255)
    print(np.std(train_set.train_data, axis=(0, 1, 2))/255)

elif args.dataset == "mnist":
    train_set = torchvision.datasets.MNIST(root=data_dir, train=True, download=True, transform=train_transform)
예제 #19
0
    'val': transforms.Compose(transform_val_list),
    'satellite': transforms.Compose(transform_satellite_list)}

train_all = ''
if opt.train_all:
    train_all = '_all'

image_datasets = {}
image_datasets['satellite'] = datasets.ImageFolder(os.path.join(data_dir, 'satellite'),
                                                   data_transforms['satellite'])
image_datasets['street'] = datasets.ImageFolder(os.path.join(data_dir, 'street'),
                                                data_transforms['train'])
image_datasets['drone'] = datasets.ImageFolder(os.path.join(data_dir, 'drone'),
                                               data_transforms['train'])
image_datasets['google'] = ImageFolder(os.path.join(data_dir, 'google'),
                                       # google contain empty subfolder, so we overwrite the Folder
                                       data_transforms['train'])

dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
                                              shuffle=True, num_workers=2, pin_memory=True)  # 8 workers may work faster
               for x in ['satellite', 'street', 'drone', 'google']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['satellite', 'street', 'drone', 'google']}
class_names = image_datasets['street'].classes
print(dataset_sizes)
use_gpu = torch.cuda.is_available()

######################################################################
# Training the model
# ------------------
#
# Now, let's write a general function to train a model. Here, we will
예제 #20
0
    transforms.Scale(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

data_dir = 'hymenoptera_data'

train_dataset = datasets.ImageFolder(os.path.join(data_dir, 'train'),
                                     transform=transform_train)

train_loader = DataLoader(train_dataset,
                          batch_size=args.batch_size,
                          shuffle=True)

train_infl = ImageFolder(os.path.join(data_dir, 'train'),
                         transform=transform_train)

loader_hess = DataLoader(train_infl,
                         batch_size=args.batch_size,
                         shuffle=True,
                         num_workers=10)

loader_infl = DataLoader(train_infl, batch_size=1, shuffle=False)

val_dataset = ImageFolder(os.path.join(data_dir, 'val'),
                          transform=transform_val)

val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)

file_time = time.strftime('%Y-%m-%d-%H:%M:%S')
예제 #21
0
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

# specify the gpu id if using only 1 gpu
if opt.ngpu == 1:
    os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu_id)
if opt.cuda:
    torch.cuda.manual_seed_all(opt.manualSeed)

cudnn.benchmark = True

# dataset
dataset = ImageFolder(
    root=opt.dataroot,
    transform=transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])
)

dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
                                         shuffle=True, num_workers=int(opt.workers))

# model
netVAE = VAE()
criterion = CustomLoss(3e-6)
optimizer = optim.Adam(netVAE.parameters(), lr=0.0001, betas=(0.5, 0.999))

if opt.cuda:
    netVAE.cuda()
예제 #22
0
def make_dir(dir):
    try:
        os.mkdir(dir)
    except Exception as e:
        pass


import numpy as np

if __name__ == '__main__':
    data_root = config.dataset
    landmark_save_root = config.save
    landmark_save_dir = landmark_save_root + '/{}'
    landmark_save_path = landmark_save_dir + '/{}'

    dataset = ImageFolder(root=data_root)
    dataloader = data.DataLoader(dataset,
                                 batch_size=4,
                                 shuffle=False,
                                 num_workers=0,
                                 collate_fn=collate_fn,
                                 pin_memory=False)
    print('data loaded')
    print(len(dataset))

    make_dir(landmark_save_root)
    maked_label = {}
    for i, it in enumerate(dataloader):
        for image, label, fname in it:
            fname = fname.split('.')[0] + '.png'
def get_dataset(args):
    ### explicitly set flip = True #######
    if args.dataset == "cityscape":
        # if 'Det' in args.frame_disc_model or 'Det' in args.video_disc_model or args.frame_det_disc or args.video_det_disc:
        clip_file = "/data/linz/proj/Dataset/Cityscape/load_files/int_{}_len_3_max_{}bb_area_3000_extra_panet_lsclip.pkl".format(
            int(args.interval), int(args.num_track_per_img))
        # if not args.track_gen and args.split == 'val':
        # 	clip_file = "/data/linz/proj/Dataset/Cityscape/load_files/int_{}_len_3_extra_lsclip.pkl".format(int(args.interval))
        obj_coord_file = "/data/linz/proj/Dataset/Cityscape/obj_coords/int_{}_len_3_extra_512x1024_max_{}bb_area_3000_panet_lsclip.pkl".format(
            int(args.interval), int(args.num_track_per_img))
        if args.syn_type == 'extra' and args.vid_length != 1:
            clip_file = "/data/linz/proj/Dataset/Cityscape/load_files/int_{}_len_{}_extra_lsclip.pkl".format(
                int(args.interval), args.vid_length + 2)
        if args.effec_flow:
            clip_file = "/data/linz/proj/Dataset/Cityscape/load_files/effec_flow_int_{}_len_3_extra_lsclip.pkl".format(
                int(args.interval))
        import pickle
        with open(clip_file, 'rb') as f:
            load_f = pickle.load(f)
            if args.split == 'train':
                clips_train_file = load_f['train']
            elif args.split == 'val':
                clips_val_file = load_f['val']
        with open(obj_coord_file, 'rb') as f:
            load_f = pickle.load(f)
            if args.split == 'train':
                coords_train_file = load_f['train']
            if args.split == 'val':
                coords_val_file = load_f['val']
            # else:
            # 	coords_val_file = None

        crop_size = (args.input_h, args.input_w)
        if args.split == 'train':
            # train
            tfs = []
            tfs.append(
                transforms.Compose(
                    [  #transforms.Resize(re_size, interpolation=Image.BILINEAR),
                        transforms.RandomCrop(crop_size)
                    ]))
            # tfs.append(transforms.Compose([		transforms.Resize((150, 300), interpolation=Image.NEAREST),
            # 											transforms.RandomCrop((128, 256))	]))
            tfs.append(
                transforms.Compose(
                    [  #transforms.Resize((150, 300), interpolation=Image.NEAREST),
                        transforms.RandomCrop(crop_size)
                    ]))

            train_dataset = ImageFolder(args,
                                        clips_train_file,
                                        transform=tfs,
                                        bboxes=coords_train_file)
        else:
            train_dataset = None

        if args.split == 'val':
            # val
            tfs = []
            tfs.append(
                transforms.Compose(
                    [  #transforms.Resize(crop_size, interpolation=Image.BILINEAR)
                    ]))
            tfs.append(
                transforms.Compose(
                    [  #transforms.Resize((128, 256), interpolation=Image.NEAREST)
                    ]))

            val_dataset = ImageFolder(args,
                                      clips_val_file,
                                      transform=tfs,
                                      bboxes=coords_val_file)
        else:
            val_dataset = None
    elif args.dataset == "ucf101":
        clip_file = "/data/linz/proj/Dataset/CyclicGen-master/UCF101_test_root_clip.pkl"
        with open(clip_file, 'rb') as f:
            import pickle
            load_f = pickle.load(f)
            clips_val_file = load_f['test']
        re_size = (256, 256)
        crop_size = (256, 256)
        train_dataset = None
        # val
        tfs = []
        tfs.append(
            transforms.Compose(
                [transforms.Resize(crop_size, interpolation=Image.BILINEAR)]))
        tfs.append(
            transforms.Compose(
                [transforms.Resize((256, 256), interpolation=Image.NEAREST)]))

        val_dataset = ImageFolder(args, clips_val_file, transform=tfs)
        # val_dataset = ImageFolder(args, clips_val_file,transform=None)
    elif args.dataset == 'vimeo':
        clip_train_file = '/data/linz/proj/Dataset/vimeo_triplet/tri_trainlist.txt'
        clip_val_file = '/data/linz/proj/Dataset/vimeo_triplet/tri_testlist.txt'
        clips_file = {'train': [], 'val': []}
        with open(clip_train_file, 'r') as f:
            for line in f:
                line = line.strip()
                if len(line) < 4:
                    break
                clips_file['train'].append(line)
        with open(clip_val_file, 'r') as f:
            for line in f:
                line = line.strip()
                if len(line) < 4:
                    break
                clips_file['val'].append(line)

        # crop_size = (128, 224)
        # train
        tfs = []
        tfs.append(
            transforms.Compose(
                [  #transforms.Resize(re_size, interpolation=Image.BILINEAR),
                    # transforms.RandomCrop(crop_size)
                ]))
        # tfs.append(transforms.Compose([		transforms.Resize((150, 300), interpolation=Image.NEAREST),
        # 											transforms.RandomCrop((128, 256))	]))
        tfs.append(
            transforms.Compose(
                [  #transforms.Resize((150, 300), interpolation=Image.NEAREST),
                    # transforms.RandomCrop(crop_size)
                ]))

        train_dataset = ImageFolder(args, clips_file['train'], transform=tfs)

        # val
        tfs = []
        tfs.append(
            transforms.Compose(
                [  #transforms.Resize(crop_size, interpolation=Image.BILINEAR)
                ]))
        tfs.append(
            transforms.Compose(
                [  #transforms.Resize((128, 256), interpolation=Image.NEAREST)
                ]))

        val_dataset = ImageFolder(args, clips_file['val'], transform=tfs)

    else:
        raise Exception('Invalid dataset %s' % args.dataset)

    return train_dataset, val_dataset
예제 #24
0
def execute(execution_path, writer):
    print(cudnn.benchmark)
    print(cudnn.deterministic)

    if args.dataset == "mnist":
        args.number_of_dataset_classes = 10
        dataset_path = args.dataset_dir if args.dataset_dir else "../datasets/mnist/images"
        normalize = transforms.Normalize(mean=[0.1307], std=[0.3081])
        train_transform = transforms.Compose(
            [transforms.ToTensor(), normalize])
        inference_transform = transforms.Compose([transforms.ToTensor(), normalize])
    elif args.dataset == "cifar10":
        args.number_of_dataset_classes = 10
        dataset_path = args.dataset_dir if args.dataset_dir else "../datasets/cifar10/images"
        normalize = transforms.Normalize(mean=[0.491, 0.482, 0.446], std=[0.247, 0.243, 0.261])
        train_transform = transforms.Compose(
            [transforms.RandomCrop(32, padding=4),
             transforms.RandomHorizontalFlip(),
             transforms.ToTensor(), normalize])
        inference_transform = transforms.Compose([transforms.ToTensor(), normalize])
    elif args.dataset == "cifar100":
        args.number_of_dataset_classes = 100
        dataset_path = args.dataset_dir if args.dataset_dir else "../datasets/cifar100/images"
        normalize = transforms.Normalize(mean=[0.507, 0.486, 0.440], std=[0.267, 0.256, 0.276])
        train_transform = transforms.Compose(
            [transforms.RandomCrop(32, padding=4),
             transforms.RandomHorizontalFlip(),
             transforms.ToTensor(), normalize])
        inference_transform = transforms.Compose([transforms.ToTensor(), normalize])
    else:
        args.number_of_dataset_classes = 1000
        dataset_path = args.dataset_dir if args.dataset_dir else "../datasets/imagenet2012/images"
        if args.arch.startswith('inception'):
            size = (299, 299)
        else:
            size = (224, 256)
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        train_transform = transforms.Compose(
            [transforms.RandomSizedCrop(size[0]),  # 224 , 299
             transforms.RandomHorizontalFlip(),
             transforms.ToTensor(), normalize])
        inference_transform = transforms.Compose(
            [transforms.Scale(size[1]),  # 256
             transforms.CenterCrop(size[0]),  # 224 , 299
             transforms.ToTensor(), normalize])

    args.normal_classes = sorted(random.sample(range(0, args.number_of_dataset_classes), args.number_of_model_classes))
    print("NORMAL CLASSES:\t", args.normal_classes)

    train_path = os.path.join(dataset_path, 'train')
    test_path = os.path.join(dataset_path, 'val')

    # Creating sets...
    train_set = ImageFolder(train_path, transform=train_transform, selected_classes=args.normal_classes,
                            target_transform=args.normal_classes.index)
    val_set = ImageFolder(train_path, transform=inference_transform, selected_classes=args.normal_classes,
                          target_transform=args.normal_classes.index)

    # Preparing train and validation samplers...
    total_examples = {}
    for index in range(len(train_set)):
        _, label = train_set[index]
        if label not in total_examples:
            total_examples[label] = 1
        else:
            total_examples[label] += 1
    train_indexes = []
    val_indexes = []
    train_indexes_count = {}
    val_indexes_count = {}
    indexes_count = {}
    for index in range(len(train_set)):
        _, label = train_set[index]
        if label not in indexes_count:
            indexes_count[label] = 1
            train_indexes.append(index)
            train_indexes_count[label] = 1
            val_indexes_count[label] = 0
        else:
            indexes_count[label] += 1
            if indexes_count[label] <= int(total_examples[label] * args.train_set_split):
                train_indexes.append(index)
                train_indexes_count[label] += 1
            else:
                val_indexes.append(index)
                val_indexes_count[label] += 1
    print("TRAIN SET INDEXES TOTALS:", train_indexes_count)
    print("VALID SET INDEXES TOTALS:", val_indexes_count)
    train_sampler = SubsetRandomSampler(train_indexes)
    val_sampler = SubsetRandomSampler(val_indexes)

    # Create loaders...
    train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, num_workers=args.workers,
                                               pin_memory=True, sampler=train_sampler)
    val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, num_workers=args.workers,
                                             pin_memory=True, sampler=val_sampler)

    print("\n$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
    print("TRAINSET LOADER SIZE: ====>>>> ", len(train_loader.sampler))
    print("VALIDSET LOADER SIZE: ====>>>> ", len(val_loader.sampler))
    print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")

    # Dataset created...
    print("\nDATASET:", args.dataset)

    # create model
    model = create_model()
    print("\nMODEL:", model)

    best_train_acc1 = 0
    best_val_acc1 = 0

    if args.train:
        ###################
        # Training...
        ###################

        # define loss function (criterion)...
        criterion = nn.CrossEntropyLoss().cuda()

        # define optimizer...
        optimizer = torch.optim.SGD(model.parameters(), lr=args.original_learning_rate, momentum=args.momentum,
                                    weight_decay=args.weight_decay, nesterov=True)

        # define scheduler...
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience=10, factor=0.2, verbose=True)

        print("\n################ TRAINING ################")
        best_model_file_path = os.path.join(execution_path, 'best_model.pth.tar')
        best_train_acc1, best_val_acc1 = train_val(train_loader, val_loader, model, criterion, optimizer,
                                                   scheduler, args.epochs, writer, best_model_file_path)

        # save to json file
        writer.export_scalars_to_json(os.path.join(execution_path, 'log.json'))

    if args.extract_logits:
        ######################
        # Extracting logits...
        ######################

        # Train dataset uses val transform to extract...
        train_set = ImageFolder(train_path, transform=inference_transform, selected_classes=args.normal_classes)
        train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, num_workers=args.workers,
                                                   pin_memory=True, sampler=train_sampler)

        val_set = ImageFolder(train_path, transform=inference_transform, selected_classes=args.normal_classes)
        val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, num_workers=args.workers,
                                                 pin_memory=True, sampler=val_sampler)

        test_set = ImageFolder(test_path, transform=inference_transform)
        test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, num_workers=args.workers,
                                                  pin_memory=True, shuffle=True)

        print("\n################ EXTRACTING LOGITS ################")
        best_model_file_path = os.path.join(execution_path, 'best_model.pth.tar')
        extract_logits_from_file(best_model_file_path, model, args.number_of_model_classes, execution_path,
                                 train_loader, val_loader, test_loader, "best_model")

    return best_val_acc1, best_train_acc1
예제 #25
0
            <h1>Results</h1>
            {}
            '''.format(full_filename, " ".join(html_imgs))
        return html
    else:
        html = '''
            <!doctype html>
            <title>Upload new File</title>
            <h1>Upload new File</h1>
            <form method=post enctype=multipart/form-data>
            <input type=file name=file>
            <input type=submit value=Upload>
            </form>
            '''
        return html

if __name__ == '__main__':

    data = ImageFolder('./Flickr', transform=TRANSFORM)
    kwargs = {'num_workers': 1, 'pin_memory': True} if CUDA else {}
    data_loader = torch.utils.data.DataLoader(data, batch_size=BATCH_SIZE, **kwargs)

    search_engine = SearchEngine(data, cuda = CUDA, threshold = THRESHOLD, save_directory = SAVE_DIRECTORY, transform=TRANSFORM)
    search_engine.fit(data_loader = data_loader, load_embeddings = True, verbose = True)

    app.debug = False
    app.run(
        host=os.getenv('LISTEN', '0.0.0.0'),
        port=int(os.getenv('PORT', '80'))
    )
예제 #26
0
파일: svm.py 프로젝트: sayano-lee/mmc_ocr
        pbar.update(1)
        im, ann, im_fns = data[0], data[1], data[2]
        im = im.cuda()
        feat = model(im)
        x = feat.cpu().numpy()
        y = ann.numpy()

        precision = precision + clf.score(x, y)
    pbar.close()

    print("\nAverage Precision is {}".format(precision / len(loader)))


if __name__ == '__main__':

    icdar_patches = "./data/icdar2015/patches"

    dataset = ImageFolder(root=icdar_patches, transform=transforms)
    loader = torch.utils.data.DataLoader(dataset, batch_size=20, shuffle=True)

    # extractor for deep features
    model = Extractor()
    model = model.cuda()
    for params in model.parameters():
        params.requires_grad = False

    # vanilla svm
    clf = svm.SVC(kernel="rbf", gamma=10)

    train(loader=loader, model=model, clf=clf)
예제 #27
0
cudnn.benchmark = True

if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

# datase t
if opt.dataset == 'imagenet':
    # folder dataset
    dataset = ImageFolder(root=opt.dataroot,
                          transform=transforms.Compose([
                              transforms.Scale(opt.imageSize),
                              transforms.CenterCrop(opt.imageSize),
                              transforms.ToTensor(),
                              transforms.Normalize((0.5, 0.5, 0.5),
                                                   (0.5, 0.5, 0.5)),
                          ]),
                          classes_idx=(10, 20))
elif opt.dataset == 'AWA':
    # folder dataset
    dataset = dset.ImageFolder(root=opt.dataroot,
                               transform=transforms.Compose([
                                   transforms.Scale(opt.imageSize),
                                   transforms.CenterCrop(opt.imageSize),
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5),
                                                        (0.5, 0.5, 0.5)),
                               ]))
elif opt.dataset == 'CUB':
예제 #28
0
    def extract(self, input_path, output_path):
        '''extract features from single image without batch process.
        '''
        assert self.mode.lower() in ['pca', 'delf']
        batch_timer = AverageMeter()
        data_timer = AverageMeter()
        since = time.time()

        # dataloader.
        dataset = ImageFolder(
            root = input_path,
            transform = transforms.ToTensor())
        self.dataloader = torch.utils.data.DataLoader(
            dataset = dataset,
            batch_size = 1,
            shuffle = True,
            num_workers = 0)
        feature_maps = []
        if self.mode.lower() in ['pca']:
            bar = Bar('[{}]{}'.format(self.mode.upper(), self.title), max=len(self.dataloader))
            for batch_idx, (inputs, _, filename) in enumerate(self.dataloader):
                # image size upper limit.
                if not (len(inputs.size()) == 4):
                    if __DEBUG__:
                        print('wrong input dimenstion! ({},{})'.format(filename, input.size()))
                    continue;
                if not (inputs.size(2)*inputs.size(3) <= 1200*1200):
                    if __DEBUG__:
                        print('passed: image size too large! ({},{})'.format(filename, inputs.size()))
                    continue;
                if not (inputs.size(2) >= 112 and inputs.size(3) >= 112):
                    if __DEBUG__:
                        print('passed: image size too small! ({},{})'.format(filename, inputs.size()))
                    continue;
                
                data_timer.update(time.time() - since)
                # prepare inputs
                if __is_cuda__():
                    inputs = __cuda__(inputs)
                inputs = __to_var__(inputs)
                
                # get delf feature only for pca calculation.
                pca_feature = self.__extract_delf_feature__(inputs.data, filename, mode='pca')
                if pca_feature is not None:
                    feature_maps.extend(pca_feature)
               
                batch_timer.update(time.time() - since)
                since = time.time()
            
                # progress
                log_msg  = ('\n[Extract][Processing:({batch}/{size})] '+ \
                            'eta: (data:{data:.3f}s),(batch:{bt:.3f}s),(total:{tt:})') \
                .format(
                    batch=batch_idx + 1,
                    size=len(self.dataloader),
                    data=data_timer.val,
                    bt=batch_timer.val,
                    tt=bar.elapsed_td)
                print(log_msg)
                bar.next()
                print('\nnumber of selected features so far: {}'.format(len(feature_maps)))
                if len(feature_maps) >= 10000000:        # UPPER LIMIT.
                    break;
                
                # free GPU cache every.
                if batch_idx % 10 == 0:
                    torch.cuda.empty_cache()
                    if __DEBUG__:
                        print('GPU Memory flushed !!!!!!!!!')

            # trian PCA.
            self.pca(feature_maps)
        
        else:
            bar = Bar('[{}]{}'.format(self.mode.upper(), self.title), max=len(self.dataloader))
            assert self.mode.lower() in ['delf']
            feature_maps = []
            for batch_idx, (inputs, labels, filename) in enumerate(self.dataloader):
                # image size upper limit.
                if not (len(inputs.size()) == 4):
                    if __DEBUG__:
                        print('wrong input dimenstion! ({},{})'.format(filename, input.size()))
                    continue;
                if not (inputs.size(2)*inputs.size(3) <= 1200*1200):
                    if __DEBUG__:
                        print('passed: image size too large! ({},{})'.format(filename, inputs.size()))
                    continue;
                if not (inputs.size(2) >= 112 and inputs.size(3) >= 112):
                    if __DEBUG__:
                        print('passed: image size too small! ({},{})'.format(filename, inputs.size()))
                    continue;
                
                data_timer.update(time.time() - since)
                # prepare inputs
                if __is_cuda__():
                    inputs = __cuda__(inputs)
                inputs = __to_var__(inputs)
                    
                # get delf everything (score, feature, etc.)
                delf_feature = self.__extract_delf_feature__(inputs.data, filename, mode='delf')
                if delf_feature is not None:
                    feature_maps.append(delf_feature)
               
                # log.
                batch_timer.update(time.time() - since)
                since = time.time()
                log_msg  = ('\n[Extract][Processing:({batch}/{size})] '+ \
                            'eta: (data:{data:.3f}s),(batch:{bt:.3f}s),(total:{tt:})') \
                .format(
                    batch=batch_idx + 1,
                    size=len(self.dataloader),
                    data=data_timer.val,
                    bt=batch_timer.val,
                    tt=bar.elapsed_td)
                print(log_msg)
                bar.next()
                
                # free GPU cache every.
                if batch_idx % 10 == 0:
                    torch.cuda.empty_cache()
                    if __DEBUG__:
                        print('GPU Memory flushed !!!!!!!!!')
                
            # use pickle to save DeLF features.
            self.__save_delf_features_to_file__(feature_maps, output_path)
예제 #29
0
    'val': transforms.Compose(transform_val_list),
    'satellite': transforms.Compose(transform_satellite_list)
}

train_all = ''
if opt.train_all:
    train_all = '_all'

image_datasets = {}
image_datasets['satellite'] = datasets.ImageFolder(
    os.path.join(data_dir, 'satellite'), data_transforms['satellite'])
image_datasets['street'] = datasets.ImageFolder(
    os.path.join(data_dir, 'street'), data_transforms['train'])
image_datasets['drone'] = datasets.ImageFolder(
    os.path.join(data_dir, 'drone3'), data_transforms['train'])
image_datasets['google'] = ImageFolder(os.path.join(data_dir, 'google'),
                                       data_transforms['train'])

dataloaders = {
    x:
    torch.utils.data.DataLoader(image_datasets[x],
                                batch_size=opt.batchsize,
                                shuffle=True,
                                num_workers=2,
                                pin_memory=True)  # 8 workers may work faster
    for x in ['satellite', 'street', 'drone', 'google']
}
dataset_sizes = {
    x: len(image_datasets[x])
    for x in ['satellite', 'street', 'drone', 'google']
}
class_names = image_datasets['street'].classes
예제 #30
0
    testtransformList.append(transforms.Resize(transResize))
    testtransformList.append(transforms.CenterCrop(transCrop))
    testtransformList.append(transforms.ToTensor())
    testtransformList.append(normalize)
    testtransformSequence = transforms.Compose(testtransformList)

    confusion_meter = tnt.meter.ConfusionMeter(args.n_class, normalized=True)

    if args.use_gpu:
        os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(args.device)
        torch.cuda.manual_seed(args.random_seed)
        kwargs = {'num_workers': 4, 'pin_memory': True}

    if args.is_train:

        train_dataset = ImageFolder(os.path.join(args.data_dir, 'train'),
                                    traintransformSequence)
        val_dataset = ImageFolder(os.path.join(args.data_dir, 'valid'),
                                  traintransformSequence)
        # train_dataset, val_dataset = get_MNIST_train_val_dataset('./data/MNIST')
        train_loader, val_loader = get_train_val_loader(
            train_dataset,
            val_dataset,
            val_split=args.val_split,
            random_split=args.random_split,
            batch_size=args.batch_size,
            **kwargs)
        args.num_class = train_loader.dataset.num_class
        args.num_channels = train_loader.dataset.num_channels

    else:
        test_dataset = ImageFolder(os.path.join(args.data_dir, 'test'),