Exemple #1
0
 def __init__(self, args, data_subset, transform=None, sample_inds=None):
     BaseDataset.__init__(self, args, data_subset)
     if transform is None:
         transform = BasicImagenetTransform(self.size, data_subset)
     datasets.ImageFolder.__init__(self, os.path.join(self.args.imagenet_data_path, data_subset), transform)
     if sample_inds is not None:
         self.samples = [self.samples[ii] for ii in sorted(sample_inds)]
    def __init__(self, opt):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseDataset.__init__(self, opt)
        self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))
        input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
        self.transform = get_transform(opt, grayscale=(input_nc == 1))
Exemple #3
0
    def __init__(self, train_file):
        BaseDataset.__init__(self, train_file)
        # original documents for test
        self.train_documents = []
        # vectorizers to apply
        self.vectorizer = TfidfVectorizer(min_df=1, ngram_range=(1, 1))
        # score functions for feature_selector

        # for extra features
        self.extra_features_names = ['0'] * len(self.EXTRA_FEATURES)
        # for extra features. We do not know in advance the number of instances
        self.extra_features_values = None

        self._load()
Exemple #4
0
    def __init__(self, opt):
        BaseDataset.__init__(self, opt)

        self.image_list = util.get_file_list(
            os.path.join(self.opt.data_dir, 'crop'))

        self.landmark_dict = self.load_landmark_dict()

        self.transforms_input = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5141, 0.4074, 0.3588],
                                 std=[1.0, 1.0, 1.0])
        ])

        self.transforms_gt = transforms.ToTensor()
Exemple #5
0
    def __init__(self, opt):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseDataset.__init__(self, opt)

        self.dir_A = os.path.join(opt.dataroot, 'trainA')  # create a path '/path/to/data/trainA'
        self.dir_B = os.path.join(opt.dataroot, 'trainB')  # create a path '/path/to/data/trainB'

        if os.path.exists(self.dir_A) and os.path.exists(self.dir_B):
            self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
            self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size))    # load images from '/path/to/data/trainB'
        self.A_size = len(self.A_paths)  # get the size of dataset A
        self.B_size = len(self.B_paths)  # get the size of dataset B

        assert len(self.A_paths) == 1 and len(self.B_paths) == 1,\
            "SingleImageDataset class should be used with one image in each domain"
        A_img = Image.open(self.A_paths[0]).convert('RGB')
        B_img = Image.open(self.B_paths[0]).convert('RGB')
        print("Image sizes %s and %s" % (str(A_img.size), str(B_img.size)))

        self.A_img = A_img
        self.B_img = B_img

        # In single-image translation, we augment the data loader by applying
        # random scaling. Still, we design the data loader such that the
        # amount of scaling is the same within a minibatch. To do this,
        # we precompute the random scaling values, and repeat them by |batch_size|.
        A_zoom = 1 / self.opt.random_scale_max
        zoom_levels_A = np.random.uniform(A_zoom, 1.0, size=(len(self) // opt.batch_size + 1, 1, 2))
        self.zoom_levels_A = np.reshape(np.tile(zoom_levels_A, (1, opt.batch_size, 1)), [-1, 2])

        B_zoom = 1 / self.opt.random_scale_max
        zoom_levels_B = np.random.uniform(B_zoom, 1.0, size=(len(self) // opt.batch_size + 1, 1, 2))
        self.zoom_levels_B = np.reshape(np.tile(zoom_levels_B, (1, opt.batch_size, 1)), [-1, 2])

        # While the crop locations are randomized, the negative samples should
        # not come from the same location. To do this, we precompute the
        # crop locations with no repetition.
        self.patch_indices_A = list(range(len(self)))
        random.shuffle(self.patch_indices_A)
        self.patch_indices_B = list(range(len(self)))
        random.shuffle(self.patch_indices_B)
Exemple #6
0
    def __init__(self, opt):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions

        A few things can be done here.
        - save the options (have been done in BaseDataset)
        - get image paths and meta information of the dataset.
        - define the image transformation.
        """
        # save the option and dataset root
        BaseDataset.__init__(self, opt)
        # get the image paths of your dataset;
        self.image_paths = [
        ]  # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root
        # define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function
        self.transform = get_transform(opt)
    def __init__(self, opt):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseDataset.__init__(self, opt)
        self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A')  # create a path '/path/to/data/trainA'
        self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B')  # create a path '/path/to/data/trainB'

        if opt.phase == "test" and not os.path.exists(self.dir_A) \
           and os.path.exists(os.path.join(opt.dataroot, "valA")):
            self.dir_A = os.path.join(opt.dataroot, "valA")
            self.dir_B = os.path.join(opt.dataroot, "valB")

        self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size))   # load images from '/path/to/data/trainA'
        self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size))    # load images from '/path/to/data/trainB'
        self.A_size = len(self.A_paths)  # get the size of dataset A
        self.B_size = len(self.B_paths)  # get the size of dataset B
 def __init__(self, opt, training):
     BaseDataset.__init__(self, opt, training)
     self.dirA = opt.dirA
     self.dirB = opt.dirB
     self.pathsA = file_utils.load_paths(self.dirA)
     self.pathsB = file_utils.load_paths(self.dirB)
Exemple #9
0
 def __init__(self, configuration):
     BaseDataset.__init__(self, configuration)
Exemple #10
0
 def __init__(self, opt, training):
     BaseDataset.__init__(self, opt, training)
     self.dir = opt.dir
     self.paths = file_utils.load_paths(self.dir)