Exemple #1
0
    def __init__(self, opt):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseDataset.__init__(self, opt)
        # self.dir_A = os.path.join(opt.dataroot, opt.phase + 'B')  # create a path '/path/to/data/trainA'
        self.dir_C = os.path.join(opt.dataroot, 'C/' + opt.phase)
        self.dir_E = os.path.join(opt.dataroot, 'E/' + opt.phase)
        self.dir_M = os.path.join(opt.dataroot, 'M/' + opt.phase)
        self.dir_B = os.path.join(
            opt.dataroot,
            'B/' + opt.phase)  # create a path '/path/to/data/trainB'

        # self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
        self.C_paths = sorted(make_dataset(self.dir_C, opt.max_dataset_size))
        self.E_paths = sorted(make_dataset(self.dir_E, opt.max_dataset_size))
        self.M_paths = sorted(make_dataset(self.dir_M, opt.max_dataset_size))
        self.B_paths = sorted(make_dataset(
            self.dir_B,
            opt.max_dataset_size))  # load images from '/path/to/data/trainB'
        # self.A_size = len(self.A_paths)  # get the size of dataset B
        self.C_size = len(self.C_paths)
        self.E_size = len(self.E_paths)
        self.M_size = len(self.M_paths)
        self.B_size = len(self.B_paths)  # get the size of dataset ori
        btoA = self.opt.direction == 'BtoA'
        input_nc = self.opt.output_nc if btoA else self.opt.input_nc  # get the number of channels of input image
        output_nc = self.opt.input_nc if btoA else self.opt.output_nc  # get the number of channels of output image
        # self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
        self.transform_C = get_transform(self.opt, grayscale=False)
        self.transform_E = get_transform(self.opt, grayscale=True)
        self.transform_M = get_transform(self.opt, grayscale=True)
        self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
 def __init__(self, root, transform=None):
     super(ilab_sup_imgfolder, self).__init__()
     self.root = root
     self.transform = transform
     self.dir_Appe = os.path.join(self.root, 'appearance')
     self.Appe_paths = make_dataset(self.dir_Appe)
     self.Appe_size = len(self.Appe_paths)
 def __init__(self, root, transform=None):
     super(ilab_threeswap_imgfolder, self).__init__()
     self.root = root
     self.transform = transform
     self.paths = make_dataset(self.root)
     self.C_size = len(self.paths)
     self.id_dict = {}
     self.bg_dict = {}
     self.pose_dict = {}
     self.id_cnt = 0
     self.bg_cnt = 0
     self.pose_cnt = 0
     for roots, dirs, files in os.walk('/home2/ilab2M_pose/train_img_c00_10class'):
         for file in files:
             category = file.split('-')[0]
             id = file.split('-')[1]
             background = file.split('-')[2]
             pose = file.split('-')[3] + file.split('-')[4]
             if id not in self.id_dict:
                 self.id_dict[id] = self.id_cnt
                 self.id_cnt += 1
             if background not in self.bg_dict:
                 self.bg_dict[background] = self.bg_cnt
                 self.bg_cnt += 1
             if pose not in self.pose_dict:
                 self.pose_dict[pose] = self.pose_cnt
                 self.pose_cnt += 1
Exemple #4
0
 def __init__(self, root, transform=None):
     super(ilab_threeswap_imgfolder, self).__init__()
     self.root = root
     self.transform = transform
     self.idf_dict = {}
     self.exp_dict = {}
     self.posef_dict = {}
     self.idf_cnt = 0
     self.exp_cnt = 0
     self.posef_cnt = 0
     for roots, dirs, files in os.walk('/home2/RaFD/train/data/'):
         for file in files:
             idf = file.split('_')[0]
             expression = file.split('_')[2].split('.')[0]
             posef = file.split('_')[1]
             if idf not in self.idf_dict:
                 self.idf_dict[idf] = self.idf_cnt
                 self.idf_cnt += 1
             if expression not in self.exp_dict:
                 self.exp_dict[expression] = self.exp_cnt
                 self.exp_cnt += 1
             if posef not in self.posef_dict:
                 self.posef_dict[posef] = self.posef_cnt
                 self.posef_cnt += 1
     print(root)
     self.paths = make_dataset(self.root)
     file = open('debug.txt', 'w')
     file.write(str(self.paths))
     file.close()
     self.C_size = len(self.paths) - 1
     print(self.C_size)
Exemple #5
0
    def __init__(self, mode='Train'):
        super(SiameseDataset, self).__init__()
        self.mode = mode
        self.train_dir = './data/photo_parse_train/'
        self.test_dir = './data/photo_parse_test/'
        self.cari_dir = './data/caricature_parse/'

        self.train_path = make_dataset(self.train_dir)
        self.train_paths = sorted(self.train_path)
        self.train_dict = {}
        for img_name in self.train_paths:
            person_name = img_name.split('/')[-1]
            person_name = person_name[:-11]
            if person_name not in self.train_dict.keys():
                self.train_dict[person_name] = [img_name]
            else:
                self.train_dict[person_name].append(img_name)
        self.train_size = len(self.train_path)

        self.test_path = make_dataset(self.test_dir)
        self.test_paths = sorted(self.test_path)
        self.test_size = len(self.test_path)

        self.cari_path = make_dataset(self.cari_dir)
        self.cari_paths = sorted(self.cari_path)
        self.cari_dict = {}
        for img_name in self.cari_paths:
            person_name = img_name.split('/')[-1]
            person_name = person_name[:-11]
            if person_name not in self.cari_dict.keys():
                self.cari_dict[person_name] = [img_name]
            else:
                self.cari_dict[person_name].append(img_name)
        self.cari_size = len(self.cari_path)

        transform_list = []
        transform_list += [transforms.ToTensor()]
        self.transform = transforms.Compose(transform_list)
        self.target_transform = self.transform
        if self.mode == 'Val':
            self.train_size = self.test_size
            self.train_path = self.test_path
            self.train_paths = self.test_paths
Exemple #6
0
    def __init__(self, opt):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseDataset.__init__(self, opt)
        self.dir_AB = os.path.join(opt.dataroot, opt.phase)  # get the image directory
        self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size))  # get image paths
        assert(self.opt.load_size >= self.opt.crop_size)   # crop_size should be smaller than the size of loaded image
        self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
        self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
    def __init__(self, args):

        self.args = args
        self.data_path = args.data_path

        self.dir_AB = os.path.join(args.data_path, args.mode)  # ???
        self.AB_paths = sorted(make_dataset(self.dir_AB))

        assert (agrs.resize_or_crop == 'resize_and_crop')

        transform_list = [
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]

        self.transform = transforms.Compose(transform_list)
 def __init__(self, root, transform=None):
     super(ilab_unsup_imgfolder, self).__init__()
     self.root = root
     self.transform = transform
     self.paths = make_dataset(self.root)
     self.C_size = len(self.paths)
Exemple #9
0
 def __init__(self, dataroot, csv, batch_size=40):
     self.batchSize = batch_size
     self.root = dataroot
     self.frame = pd.read_csv(csv, header=None)
     self.image_paths = sorted(make_dataset(self.root))
     self.dataset_size = len(self.image_paths)