def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.data_root = params['data_root']
        self.im_shape = params['im_shape']
        self.split = params['split']
        self.mean_file = params['mean_file']

        list_file = '../prepare_data/AFEW-VA/crop/{}_data.txt'.format(
            self.split)
        self.indexlist = [line.split(' ')[0] for line in open(list_file)]
        # get list of image indexes.
        # Read the mat file and assign to X
        mat_contents = scipy.io.loadmat(
            '../prepare_data/AFEW-VA/crop/{}_labels.mat'.format(self.split))
        self.X = np.zeros(mat_contents['{}_labels'.format(self.split)].shape,
                          dtype=np.float16)
        self.X[:, :] = mat_contents['{}_labels'.format(self.split)]

        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer(self.mean_file)

        print("BatchLoader initialized with {} images".format(
            len(self.indexlist)))
Esempio n. 2
0
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.number_classes = params['number_classes']
        self.img_ext = params['img_ext']
        self.dataset_root = params['dataset_root']
        self.im_shape = params['im_shape']

        # get list of image indexes.
        list_file = params['split'] + '.csv'
        self.indexlist = [
            line.rstrip('\n').split(',')[0]
            for line in open(osp.join(self.dataset_root, list_file))
        ]
        self.labellist = [
            line.rstrip('\n').split(',')[1]
            for line in open(osp.join(self.dataset_root, list_file))
        ]

        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        print("BatchLoader initialized with {} images".format(
            len(self.indexlist)))
class BatchLoader(object):

    """
    This class abstracts away the loading of images.
    Images can either be loaded singly, or in a batch. The latter is used for
    the asyncronous data layer to preload batches while other processing is
    performed.
    """

    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.pascal_root = params['pascal_root']
        self.im_shape = params['im_shape']
        self.im_mean = params['im_mean']
        # get list of image indexes.
        list_file = params['split'] + '.txt'
        self.indexlist = [line.rstrip('\n') for line in open(
            osp.join(self.pascal_root, 'ImageSets/Main', list_file))]
        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()
        self.transformer.set_mean(self.im_mean)

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist))

    def load_next_image(self):
        """
        Load the next image in a batch.
        """
        # Did we finish an epoch?
        if self._cur == len(self.indexlist):
            self._cur = 0
            shuffle(self.indexlist)

        # Load an image
        index = self.indexlist[self._cur]  # Get the image index
        image_file_name = index + '.jpg'
        im = np.asarray(Image.open(
            osp.join(self.pascal_root, 'JPEGImages', image_file_name)))
        im = scipy.misc.imresize(im, self.im_shape)  # resize

        # do a simple horizontal flip as data augmentation
        flip = np.random.choice(2)*2-1
        im = im[:, ::flip, :]

        # Load and prepare ground truth
        multilabel = np.zeros(20).astype(np.float32)
        anns = load_pascal_annotation(index, self.pascal_root)
        for label in anns['gt_classes']:
            # in the multilabel problem we don't care how MANY instances
            # there are of each class. Only if they are present.
            # The "-1" is b/c we are not interested in the background
            # class.
            multilabel[label - 1] = 1

        self._cur += 1
        return self.transformer.preprocess(im), multilabel
Esempio n. 4
0
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        # get list of image indexs
        self.indexlist = [
            line.rstrip('\n') for line in open(params['data']).readlines()
        ]

        self._cur = 0  # current image
        # this class does some simple data-manipulation
        self.transformer = SimpleTransformer()

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist))
Esempio n. 5
0
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.yelp_picture_root = params['yelp_picture_root']
        self.yelp_csv_root = params['yelp_csv_root']
        self.im_shape = params['im_shape']
        self.split = params['split']

        # get list of image indexes.
        if self.split in ["train", "validation"]:
            list_csv = self.yelp_csv_root + self.split + '_photo_to_biz_ids2.csv'
        else:
            list_csv = self.yelp_csv_root + self.split + '_photo_to_biz.csv'
        self.image_key = []
        with open(list_csv) as csv_file:
            reader = csv.DictReader(csv_file)
            for row in reader:
                self.image_key.append(row)

        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        print "BatchLoader initialized with {} images".format(
            len(self.image_key))

        if self.split in ["train", "validation", "poster"]:
            attributes_csv = osp.join(self.yelp_csv_root, self.split + '2.csv')
            self.attributes_dict = {}

            if self.split == "poster":
                attributes_csv = osp.join(self.yelp_picture_root,
                                          self.split + '.csv')

            with open(attributes_csv) as csv_file:
                reader = csv.DictReader(csv_file)
                for row in reader:
                    attr_string = row["labels"]
                    self.attributes_dict[row["business_id"]] = [
                        int(label) for label in row["labels"].split()
                    ]

        if self.split == 'validation':
            lmdb_dir = self.yelp_picture_root + 'val_lmdb'
        else:
            lmdb_dir = self.yelp_picture_root + self.split + '_lmdb'
        lmdb_env = lmdb.open(lmdb_dir)
        self.lmdb_txn = lmdb_env.begin()
        self.lmdb_cursor = self.lmdb_txn.cursor()
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.pascal_root = params['pascal_root']
        self.im_shape = params['im_shape']
        # get list of image indexes.
        list_file = params['split'] + '.txt'
        self.indexlist = [line.rstrip('\n') for line in open(
            osp.join(self.pascal_root, 'ImageSets/Main', list_file))]
        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist))
Esempio n. 7
0
def createPatch(x, y, label):
    print x, y
    patch = im[x - half_image_size:x + half_image_size,
               y - half_image_size:y + half_image_size]

    print np.mean(patch[:, :, 0]), np.mean(patch[:, :, 1]), np.mean(patch[:, :,
                                                                          2])
    st = SimpleTransformer()
    patch = st.preprocess(patch)  # no mean
    savePatch(patch, x, y, label)
    print np.mean(patch[:, :, 0]), np.mean(patch[:, :, 1]), np.mean(patch[:, :,
                                                                          2])
    bgrMean(np.mean(patch[:, :, 0]), np.mean(patch[:, :, 1]),
            np.mean(patch[:, :, 2]))

    return patch.tobytes()
Esempio n. 8
0
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.dataRoot = params['dataRoot']
        self.im_shape = params['im_shape']
        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        # load cifar10 images into memory
        # 32 x 32 x 3 channel images
        imageLen = self.im_shape[0] * self.im_shape[1] * 3
        self.data = np.empty(shape=[0,imageLen])

        self.labelDense = []
        for i in range(1,6):
            data = self.LoadCifar10(self.dataRoot + '/data_batch_%d' % i)
            self.data = np.concatenate((self.data, data['data']), axis=0)
            self.labelDense += data['labels']
            # self.label = np.concatenate((self.label, npLabel), axis=0)
        self.dataCount = self.data.shape[0]
        # self.labelOneHot = np.zeros(shape=[self.dataCount, 10])
        self.labelOneHot = self.DenseToOneHot(np.array(self.labelDense), 10)
        # print self.labelOneHot
        self.indexList = range(self.dataCount) # shuffle()
Esempio n. 9
0
    def __init__(self, params, lexicon, channel):

        self.im_shape = params['im_shape']
        self.data_folder = params['data_folder']
        self.lexicon = lexicon
        self.channel = channel

        self.indexlist = [
            line.rstrip('\n').split(",") for line in open(
                osp.join(self.data_folder, params['split'] +
                         '.txt'), 'r').readlines()
        ]

        self._cur = 0
        self.transformer = SimpleTransformer(channel)

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist))
class BatchLoader(object):

    """
    This class abstracts away the loading of images.
    Images can either be loaded singly, or in a batch. The latter is used for
    the asyncronous data layer to preload batches while other processing is
    performed.
    """

    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.pascal_root = params['pascal_root']
        self.im_shape = params['im_shape']
        # get list of image indexes.
        list_file = params['split'] + '.txt'
        self.indexlist = [line.rstrip('\n') for line in open(
            osp.join(self.pascal_root, 'ImageSets/Main', list_file))]
        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist))

    def load_next_image(self):
        """
        Load the next image in a batch.
        """
        # Did we finish an epoch?
        if self._cur == len(self.indexlist):
            self._cur = 0
            shuffle(self.indexlist)

        # Load an image
        index = self.indexlist[self._cur]  # Get the image index
        image_file_name = index + '.jpg'
        im = np.asarray(Image.open(
            osp.join(self.pascal_root, 'JPEGImages', image_file_name)))
        im = scipy.misc.imresize(im, self.im_shape)  # resize

        # do a simple horizontal flip as data augmentation
        flip = np.random.choice(2)*2-1
        im = im[:, ::flip, :]

        # Load and prepare ground truth
        multilabel = np.zeros(20).astype(np.float32)
        anns = load_pascal_annotation(index, self.pascal_root)
        for label in anns['gt_classes']:
            # in the multilabel problem we don't care how MANY instances
            # there are of each class. Only if they are present.
            # The "-1" is b/c we are not interested in the background
            # class.
            multilabel[label - 1] = 1

        self._cur += 1
        return self.transformer.preprocess(im), multilabel
Esempio n. 11
0
class BatchLoader(object):
    """
    This class abstracts away the loading of images.
    """
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.root_path = params['root_path']
        # get list of image indexs
        self.indexlist = [
            line.rstrip('\n') for line in open(params['data']).readlines()
        ]

        self._cur = 0  # current image
        # this class does some simple data-manipulation
        self.transformer = SimpleTransformer()

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist))

    def load_next_image(self):
        """
        Load the next image in a batch
        """
        # Did we finish an epoch?
        if self._cur == len(self.indexlist):
            self._cur = 0
            shuffle(self.indexlist)

        # Load images
        index_line = self.indexlist[self._cur]  # Get the image index

        indexs = index_line.split()

        images = []

        #for index in indexs[:-1]:
        im = np.asarray(Image.open(self.root_path + indexs[0]))

        # do a simple horizontal flip as data augmentation
        flip = np.random.choice(2) * 2 - 1
        im = im[:, ::flip, :]

        images.append(self.transformer.preprocess(im))

        # Load and prepare ground truth
        #label = np.zeros(self.gt_classes).astype(np.float32)
        label = int(indexs[-1]) - 1

        self._cur += 1
        return images, label
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.pascal_root = params['pascal_root']
        self.im_shape = params['im_shape']
        # get list of image indexes.
        list_file = params['split'] + '.txt'
        self.indexlist = [line.rstrip('\n') for line in open(
            osp.join(self.pascal_root, 'ImageSets/Main', list_file))]
        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist))
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.viper_root = params[
            'viper_root']  #'/home/zf/caffe/multible_channel_identification/data/VIPeR'
        self.im_shape_h = params['im_shape'][0]
        self.im_shape_w = params['im_shape'][1]
        # get list of image indexes.
        list_file_a = params['split'][0] + '.txt'  #split:cam_a或者cam_b
        list_file_b = params['split'][1] + '.txt'
        self.indexlist_a = [
            line.rstrip('\n\r')
            for line in open(osp.join(params['viper_root'][0], list_file_a))
        ]  #0000到0631
        self.indexlist_b = [
            line.rstrip('\n\r')
            for line in open(osp.join(params['viper_root'][1], list_file_b))
        ]
        self.i_a = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist_a))  #632
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.yelp_picture_root = params['yelp_picture_root']
        self.yelp_csv_root = params['yelp_csv_root']
        self.im_shape = params['im_shape']
	self.split=params['split']

        # get list of image indexes.
        if self.split in ["train", "validation"]:
       	    list_csv = self.yelp_csv_root + self.split + '_photo_to_biz_ids2.csv'
	else:
       	    list_csv = self.yelp_csv_root + self.split + '_photo_to_biz.csv'
        self.image_key = []
        with open(list_csv) as csv_file:
            reader = csv.DictReader(csv_file)
            for row in reader:
                self.image_key.append(row)
		
        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        print "BatchLoader initialized with {} images".format(
            len(self.image_key))

	if self.split in ["train", "validation", "poster"]:
	    attributes_csv = osp.join(self.yelp_csv_root, self.split + '2.csv')
            self.attributes_dict = {}

	    if self.split == "poster":
		attributes_csv = osp.join(self.yelp_picture_root, self.split + '.csv')

            with open(attributes_csv) as csv_file:
                reader = csv.DictReader(csv_file)
                for row in reader:
                    attr_string = row["labels"]
                    self.attributes_dict[row["business_id"]] = [int(label) for label in row["labels"].split()]

	if self.split == 'validation':
	    lmdb_dir = self.yelp_picture_root + 'val_lmdb'
	else:
	    lmdb_dir = self.yelp_picture_root + self.split + '_lmdb'
	lmdb_env = lmdb.open(lmdb_dir)
	self.lmdb_txn = lmdb_env.begin()
	self.lmdb_cursor = self.lmdb_txn.cursor()
class BatchLoader(object):
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.data_root = params['data_root']
        self.im_shape = params['im_shape']
        self.split = params['split']
        self.mean_file = params['mean_file']

        list_file = '../prepare_data/AFEW-VA/crop/{}_data.txt'.format(
            self.split)
        self.indexlist = [line.split(' ')[0] for line in open(list_file)]
        # get list of image indexes.
        # Read the mat file and assign to X
        mat_contents = scipy.io.loadmat(
            '../prepare_data/AFEW-VA/crop/{}_labels.mat'.format(self.split))
        self.X = np.zeros(mat_contents['{}_labels'.format(self.split)].shape,
                          dtype=np.float16)
        self.X[:, :] = mat_contents['{}_labels'.format(self.split)]

        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer(self.mean_file)

        print("BatchLoader initialized with {} images".format(
            len(self.indexlist)))

    def load_next_image(self):
        if self._cur == len(self.indexlist):
            self._cur = 0
            # shuffle(self.indexlist)

        # Load an image
        image_file_name = self.indexlist[self._cur]  # Get the image index
        im = np.asarray(Image.open(osp.join(self.data_root, image_file_name)))
        im = scipy.misc.imresize(im, self.im_shape)  # resize

        # do a simple horizontal flip as data augmentation
        flip = np.random.choice(2) * 2 - 1
        im = im[:, ::flip, :]

        multilabel = np.asarray(self.X[self._cur], np.float16)

        self._cur += 1
        return self.transformer.preprocess(im), multilabel
Esempio n. 16
0
class BatchLoader(object):
    def __init__(self, params, lexicon, channel):

        self.im_shape = params['im_shape']
        self.data_folder = params['data_folder']
        self.lexicon = lexicon
        self.channel = channel

        self.indexlist = [
            line.rstrip('\n').split(",") for line in open(
                osp.join(self.data_folder, params['split'] +
                         '.txt'), 'r').readlines()
        ]

        self._cur = 0
        self.transformer = SimpleTransformer(channel)

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist))

    def load_next_image(self):

        if self._cur == len(self.indexlist):
            self._cur = 0
            shuffle(self.indexlist)

        img_annotations = [
            self.indexlist[self._cur][i]
            for i in range(1, len(self.indexlist[self._cur]))
        ]
        img_path = self.indexlist[self._cur][0]

        im = scipy.misc.imread(img_path, flatten=(self.channel == 1))
        im = scipy.misc.imresize(im, self.im_shape)

        try:
            multilabel = np.zeros(self.lexicon.getNumLabel())
            for a in img_annotations:
                multilabel[int(a)] = 1.0
        except:
            print "load error!!", self.indexlist[self._cur]

        self._cur += 1
        return self.transformer.preprocess(im), multilabel
Esempio n. 17
0
    def __init__(self, params):
        self._image_dir = params['image_dir']
        self._gt_dir = params['gt_dir']

        self._file_list = params['file_list']
        self._image_ext = params['image_ext']

        self._nclasses = params['nclasses']
        assert self._nclasses >= 1

        self._batch_size = params['batch_size']
        assert self._batch_size >= 1

        self._bgr_mean = np.array(params['bgr_mean'])

        # If _im_size is none then we assume that the batch size is 1
        if 'im_size' in params:
            self._im_size = np.array(params['im_size'])
        else:
            self._im_size = None
            self._batch_size = 1

        if 'mirror' in params:
            self._mirror = params['mirror'] == 1
        else:
            self._mirror = False

        if 'random_seed' in params:
            rd.seed(params['random_seed'])

        # Read the file list
        fid = open(self._file_list, 'r')
        self._list = [f.strip() for f in fid]
        rd.shuffle(self._list)

        self._transformer = SimpleTransformer(mean=self._bgr_mean)
        self._idx = 0
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.data_root = params['data_root']
        self.im_shape = params['im_shape']
        self.n_classes = params['n_classes']
        # get list of image indexes.
        list_file = params['split'] + '.json'

        # indexlist is a list of all the photo_ids of the Yelp dataset
        self.indexlist = [d['photo_id'] for d in json.load(open(osp.join(self.data_root, list_file)))]
        # self.indexlist = [line.rstrip('\n') for line in
        #    open(osp.join(self.data_root, 'ImageSets/Main', list_file))]
        self._cur = 0  # current image

        # Get the corresponding attributes for the image file
        # self.attrs = self.load_yelp_attributes(self.data_root, self.n_classes)
        self.load_yelp_attributes(self.data_root, self.n_classes)

        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist))
Esempio n. 19
0
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.anno_list = params['anno_list']
        self.im_shape = params['im_shape']
        self.attribute_list_path = params['attribute_list_path']
        self.memimages = bool(params.get('memimages', 0))
        self.img_transform = params.get('img_transform', 'resize')
        self.ynorm = bool(params.get('ynorm', 0))   # y := y / ||y||_1

        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer(mean=[104, 117, 123])
        self.dictlist = []  # Annotation objects (image + labels) will be stored as dict here

        # Load Attributes ----------------------------------------------------------------------------------------------
        self.attr_id_to_name, self.attr_id_to_idx = load_attributes(self.attribute_list_path)
        self.idx_to_attr_id = {v: k for k, v in self.attr_id_to_idx.iteritems()}
        self.attr_id_list = self.attr_id_to_idx.keys()
        self.n_attr = len(self.attr_id_list)

        # Load Data ----------------------------------------------------------------------------------------------------
        # Store the list of annotation files as indexlist
        self.indexlist = [osp.join(DS_ROOT, line.rstrip('\n')) for line in open(self.anno_list)]

        if self.memimages:
            print "Loading images into memory"
        print "Loading {} annotations".format(len(self.indexlist))

        # Store each image-label object as a dict
        # But, do not store the images. Only store the image file path
        self.dictlist = [json.load(open(filename)) for filename in self.indexlist]
        shuffle(self.dictlist)
        self._cur = 0  # current image

        # Add additional information to each dict
        for idx, this_anno in enumerate(self.dictlist):
            # Prepare the multilabel
            # Get the list of attributes this corresponds to
            attr_set = set(this_anno['labels'])
            multilabel = labels_to_vec(attr_set, self.attr_id_to_idx)
            if self.ynorm and np.sum(multilabel) > 0:
                multilabel /= np.sum(multilabel)
            assert np.sum(multilabel) > 0, 'Failed: np.sum(multilabel) > 0'
            this_anno['label_vec'] = multilabel

            this_anno['image_path'] = osp.join(DS_ROOT, this_anno['image_path'])
            # Images can sometimes be huge (>5mb), which makes loading data extremely slow
            # So, resize and stash them to enable quick loading
            image_resized_path = this_anno['image_path'].replace('/images/', '/images_250/')
            if os.path.exists(image_resized_path):
                this_anno['image_path'] = image_resized_path

            # To make training even faster, load the images into memory before it begins
            if self.memimages:
                im = imread(this_anno['image_path'])
                if len(im.shape) == 2:
                    # This is a grayscale image
                    im = np.asarray(Image.open(this_anno['image_path']).convert('RGB'))
                elif len(im.shape) == 3 and im.shape[2] == 4:
                    # CMYK Image
                    im = np.asarray(Image.open(this_anno['image_path']).convert('RGB'))

                if self.img_transform == 'resize':
                    # Resize the image to the required shape
                    im = scipy.misc.imresize(im, self.im_shape)

                this_anno['im'] = im

                if idx % 100 == 0:
                    sys.stdout.write("processing %d/%d (%.2f%% done)   \r" % (
                    idx, len(self.dictlist), idx * 100.0 / len(self.dictlist)))
                    sys.stdout.flush()

        print "BatchLoader initialized with {} images".format(len(self.indexlist))
Esempio n. 20
0
class BatchLoader(object):

    """
    This class abstracts away the loading of images.
    Images can either be loaded singly, or in a batch. The latter is used for
    the asyncronous data layer to preload batches while other processing is
    performed.
    """

    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.anno_list = params['anno_list']
        self.im_shape = params['im_shape']
        self.attribute_list_path = params['attribute_list_path']
        self.memimages = bool(params.get('memimages', 0))
        self.img_transform = params.get('img_transform', 'resize')
        self.ynorm = bool(params.get('ynorm', 0))   # y := y / ||y||_1

        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer(mean=[104, 117, 123])
        self.dictlist = []  # Annotation objects (image + labels) will be stored as dict here

        # Load Attributes ----------------------------------------------------------------------------------------------
        self.attr_id_to_name, self.attr_id_to_idx = load_attributes(self.attribute_list_path)
        self.idx_to_attr_id = {v: k for k, v in self.attr_id_to_idx.iteritems()}
        self.attr_id_list = self.attr_id_to_idx.keys()
        self.n_attr = len(self.attr_id_list)

        # Load Data ----------------------------------------------------------------------------------------------------
        # Store the list of annotation files as indexlist
        self.indexlist = [osp.join(DS_ROOT, line.rstrip('\n')) for line in open(self.anno_list)]

        if self.memimages:
            print "Loading images into memory"
        print "Loading {} annotations".format(len(self.indexlist))

        # Store each image-label object as a dict
        # But, do not store the images. Only store the image file path
        self.dictlist = [json.load(open(filename)) for filename in self.indexlist]
        shuffle(self.dictlist)
        self._cur = 0  # current image

        # Add additional information to each dict
        for idx, this_anno in enumerate(self.dictlist):
            # Prepare the multilabel
            # Get the list of attributes this corresponds to
            attr_set = set(this_anno['labels'])
            multilabel = labels_to_vec(attr_set, self.attr_id_to_idx)
            if self.ynorm and np.sum(multilabel) > 0:
                multilabel /= np.sum(multilabel)
            assert np.sum(multilabel) > 0, 'Failed: np.sum(multilabel) > 0'
            this_anno['label_vec'] = multilabel

            this_anno['image_path'] = osp.join(DS_ROOT, this_anno['image_path'])
            # Images can sometimes be huge (>5mb), which makes loading data extremely slow
            # So, resize and stash them to enable quick loading
            image_resized_path = this_anno['image_path'].replace('/images/', '/images_250/')
            if os.path.exists(image_resized_path):
                this_anno['image_path'] = image_resized_path

            # To make training even faster, load the images into memory before it begins
            if self.memimages:
                im = imread(this_anno['image_path'])
                if len(im.shape) == 2:
                    # This is a grayscale image
                    im = np.asarray(Image.open(this_anno['image_path']).convert('RGB'))
                elif len(im.shape) == 3 and im.shape[2] == 4:
                    # CMYK Image
                    im = np.asarray(Image.open(this_anno['image_path']).convert('RGB'))

                if self.img_transform == 'resize':
                    # Resize the image to the required shape
                    im = scipy.misc.imresize(im, self.im_shape)

                this_anno['im'] = im

                if idx % 100 == 0:
                    sys.stdout.write("processing %d/%d (%.2f%% done)   \r" % (
                    idx, len(self.dictlist), idx * 100.0 / len(self.dictlist)))
                    sys.stdout.flush()

        print "BatchLoader initialized with {} images".format(len(self.indexlist))

    def load_next_image(self):
        """
        Load the next image in a batch.
        """

        # Sample image -------------------------------------------------------------------------------------------------
        # Did we finish an epoch?
        if self._cur == len(self.dictlist):
            self._cur = 0
            next_idx = self._cur
            shuffle(self.dictlist)
        else:
            next_idx = self._cur
            self._cur += 1

        dct = self.dictlist[next_idx]  # Get the anno

        # Load image ---------------------------------------------------------------------------------------------------
        image_path = dct['image_path']
        multilabel = dct['label_vec']

        # Load an image
        if 'im' in dct:  # Images can be preloaded before training with flag memimages
            im = dct['im']
        else:
            im = imread(image_path)
            if len(im.shape) == 2:
                # This is a grayscale image
                im = np.asarray(Image.open(image_path).convert('RGB'))
            elif len(im.shape) == 3 and im.shape[2] == 4:
                # CMYK Image
                im = np.asarray(Image.open(image_path).convert('RGB'))
        org_shape = im.shape

        # Resize/Transform image ---------------------------------------------------------------------------------------
        if self.img_transform == 'resize':
            # Resize the image to the required shape
            im = scipy.misc.imresize(im, self.im_shape)
        elif self.img_transform == 'rand_crop':
            # Take a random crop of size self.im_shape
            # im.shape = [H, W, 3]
            img_h, img_w, _ = im.shape
            crop_h, crop_w = self.im_shape

            if img_w < crop_w:
                new_w = crop_w
                new_h = int(np.round(img_h * (new_w / float(img_w))))   # Scale height to same aspect ratio
                im = scipy.misc.imresize(im, (new_h, new_w))
                img_w, img_h = new_w, new_h
                # print 'New (w, h): ', (img_w, img_h)

            if img_h < crop_h:
                new_h = crop_h
                new_w = int(np.round(img_w * (new_h / float(img_h))))
                im = scipy.misc.imresize(im, (new_h, new_w))
                img_w, img_h = new_w, new_h

            # Sample (x1, y1) i.e, top-left point of the image
            x1 = np.random.randint(low=0, high=(img_h - crop_h - 1))
            y1 = np.random.randint(low=0, high=(img_w - crop_w - 1))
            # Crop a window given this point
            x2 = x1 + crop_h
            y2 = y1 + crop_w
            im = im[x1:x2, y1:y2, :]

        # do a simple horizontal flip as data augmentation
        flip = np.random.choice(2)*2-1
        im = im[:, ::flip, :]

        transformed_im = self.transformer.preprocess(im)

        return transformed_im, multilabel
Esempio n. 21
0
class BatchLoader(object):
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.yelp_picture_root = params['yelp_picture_root']
        self.yelp_csv_root = params['yelp_csv_root']
        self.im_shape = params['im_shape']
        self.split = params['split']

        # get list of image indexes.
        if self.split in ["train", "validation"]:
            list_csv = self.yelp_csv_root + self.split + '_photo_to_biz_ids2.csv'
        else:
            list_csv = self.yelp_csv_root + self.split + '_photo_to_biz.csv'
        self.image_key = []
        with open(list_csv) as csv_file:
            reader = csv.DictReader(csv_file)
            for row in reader:
                self.image_key.append(row)

        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        print "BatchLoader initialized with {} images".format(
            len(self.image_key))

        if self.split in ["train", "validation", "poster"]:
            attributes_csv = osp.join(self.yelp_csv_root, self.split + '2.csv')
            self.attributes_dict = {}

            if self.split == "poster":
                attributes_csv = osp.join(self.yelp_picture_root,
                                          self.split + '.csv')

            with open(attributes_csv) as csv_file:
                reader = csv.DictReader(csv_file)
                for row in reader:
                    attr_string = row["labels"]
                    self.attributes_dict[row["business_id"]] = [
                        int(label) for label in row["labels"].split()
                    ]

        if self.split == 'validation':
            lmdb_dir = self.yelp_picture_root + 'val_lmdb'
        else:
            lmdb_dir = self.yelp_picture_root + self.split + '_lmdb'
        lmdb_env = lmdb.open(lmdb_dir)
        self.lmdb_txn = lmdb_env.begin()
        self.lmdb_cursor = self.lmdb_txn.cursor()

    def load_next_image(self):
        """
        Load the next image in a batch.
	"""
        # Did we finish an epoch?
        if self._cur == len(self.image_key):
            self._cur = 0
        if not self.lmdb_cursor.next():
            self.lmdb_cursor = self.lmdb_txn.cursor()
            self.lmdb_cursor.next()

        business_id = self.image_key[self._cur]["business_id"]
        photo_id = self.image_key[self._cur]["photo_id"]  # Get the image index

        # Load and prepare ground truth
        multilabel = np.zeros(9).astype(np.float32)
        if self.split in ["train", "validation", "poster"]:
            anns = self.load_yelp_attributes(business_id)
            for label in anns:
                # convert label information to a 1/0 array.
                multilabel[label] = 1

        self._cur += 1
        datum = caffe.proto.caffe_pb2.Datum()
        key, value = self.lmdb_cursor.item()

        datum.ParseFromString(value)
        label = datum.label
        # TODO(prad): Add a check for test as well.
        if self.split != 'test' and self.split != 'poster' and str(
                label) != business_id:
            print "Houston, we have a problem." + str(label) + ":" + str(
                business_id)

        data = caffe.io.datum_to_array(datum)
        im = scipy.misc.imresize(data, self.im_shape)  # resize

        # do a simple horizontal flip as data augmentation
        flip = np.random.choice(2) * 2 - 1
        im = im[:, ::flip, :]
        # The datum above already does BGR transformation. But we are redoing it using the transformer below.
        im = im[:, :, ::-1]

        return self.transformer.preprocess(im), multilabel, photo_id, label

    def load_yelp_attributes(self, business_id, mapping=None):
        return self.attributes_dict[business_id]
Esempio n. 22
0
class BatchLoader(object):

    """
    This class abstracts away the loading of images.
    Images can either be loaded singly, or in a batch. The latter is used for
    the asyncronous data layer to preload batches while other processing is
    performed.
    """

    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.dataRoot = params['dataRoot']
        self.im_shape = params['im_shape']
        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        # load cifar10 images into memory
        # 32 x 32 x 3 channel images
        imageLen = self.im_shape[0] * self.im_shape[1] * 3
        self.data = np.empty(shape=[0,imageLen])

        self.labelDense = []
        for i in range(1,6):
            data = self.LoadCifar10(self.dataRoot + '/data_batch_%d' % i)
            self.data = np.concatenate((self.data, data['data']), axis=0)
            self.labelDense += data['labels']
            # self.label = np.concatenate((self.label, npLabel), axis=0)
        self.dataCount = self.data.shape[0]
        # self.labelOneHot = np.zeros(shape=[self.dataCount, 10])
        self.labelOneHot = self.DenseToOneHot(np.array(self.labelDense), 10)
        # print self.labelOneHot
        self.indexList = range(self.dataCount) # shuffle()

        # print 'Batchloader initialized with %d images' % (self.dataCount)
        # print self.indexList

    # label_dense : m x 1
    # output : [m x num_classes] one hot
    def DenseToOneHot(self, labels_dense, num_classes):
        """Convert class labels from scalars to one-hot vectors."""
        labelOneHot = np.zeros((labels_dense.shape[0], num_classes))
        labelOneHot[np.arange(labels_dense.shape[0]), np.int8(labels_dense)] = 1
        return np.int8(labelOneHot)

    def LoadCifar10(self, file):
        import cPickle
        with open(file, 'rb') as fo:
            dict = cPickle.load(fo)
        return dict

    def load_next_image(self):
        """
        Load the next image in a batch.
        """
        # Did we finish an epoch?
        if self._cur == self.dataCount:

            self._cur = 0
            shuffle(self.indexList)

        index = self.indexList[self._cur]  # Get the image index
        im = self.data[index,:].reshape(self.im_shape[2],self.im_shape[0],self.im_shape[1]).transpose([1,2,0])

        # plt.ioff()
        # plt.imshow(im)
        # plt.title(self.labelDense[index])
        # plt.show()

        # im = scipy.misc.imresize(im, self.im_shape)  # resize

        # do a simple horizontal flip as data augmentation
        # flip = np.random.choice(2)*2-1
        # im = im[:, ::flip, :]

        # Load and prepare ground truth
        # multilabel = np.zeros(20).astype(np.float32)

        self._cur += 1
        # return self.transformer.preprocess(im), self.labelOneHot[index,:]

        return self.transformer.preprocess(im), self.labelDense[index]
Esempio n. 23
0
class BatchLoader(object):
    """
    This class abstracts away the loading of images.
    Images can either be loaded singly, or in a batch. The latter is used for
    the asyncronous data layer to preload batches while other processing is
    performed.
    """
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.anno_list = params['anno_list']
        self.im_shape = params['im_shape']
        self.label_shortlist_path = params['label_shortlist']
        self.num_labels = params.get('nlabels', NUM_LABELS)
        self.memimages = bool(params.get('memimages', 0))
        self.img_transform = params.get('img_transform', 'resize')
        self.ynorm = bool(params.get('ynorm', 0))  # y := y / ||y||_1
        self.wloss = bool(params.get('wloss', 0))
        self.user_prefs = params.get('user_prefs', None)
        self.scale_user_pref = bool(params.get('scale_user_pref', 0))

        # Possible options:
        # 'uniform' (Default) : Sample uniformly
        # 'weighted': Sample a weight uniformly (usually between 1-5). Then sample an example from on of these.
        self.sampling = params.get('sampling', 'uniform')

        if self.label_shortlist_path is not None:
            self.attr_id_to_idx = dict()
            self.attr_id_to_weight = dict()
            with open(self.label_shortlist_path, 'r') as f:
                f.readline()  # Skip header line
                for line in f:
                    idx, attr_id, count, weight = line.strip().split('\t')
                    idx = int(idx)
                    count = int(count)
                    weight = float(weight)
                    self.attr_id_to_idx[attr_id] = idx
                    self.attr_id_to_weight[attr_id] = weight
        else:
            assert False, "Not Supported"
            # self.attr_id_to_idx = load_attr_to_idx()

        self.attr_id_list = self.attr_id_to_idx.keys()
        self.n_attr = len(self.attr_id_list)

        self.user_mat = None
        if self.user_prefs is not None:
            '''
            This is a file of the format:
            <attribute_id>  <attribute_name>    <score_1>   <score_2> .... <score_U>
            where U = # of users
            score_U indicates how important this attribute is to him/her
            '''
            with open(self.user_prefs) as uf:
                uf.readline()  # Skip header line
                pref_dct = dict(
                )  # Store mapping: attr_id = [..., score_i, ...]
                for line in uf:
                    if line.strip() == '':
                        continue
                    tokens = line.strip().split('\t')
                    attr_id = tokens[0]
                    attr_name = tokens[1]
                    scores = [float(s) for s in tokens[2:]]
                    if attr_id in self.attr_id_to_idx:
                        pref_dct[attr_id] = scores

                # Check n_users is consistent
                n_users = len(pref_dct.values()[0])
                assert all([n_users == len(x) for x in pref_dct.values()
                            ]), Counter([len(x) for x in pref_dct.values()])

                # Manually fill-in safe
                pref_dct[SAFE_ATTR_ID] = np.ones(n_users) * SAFE_WEIGHT
                # Make sure we have preferences for all attributes that we need
                assert all([
                    pref_attr_id in self.attr_id_to_idx
                    for pref_attr_id in pref_dct.keys()
                ])

                # Represent as a matrix: A x U
                # Where col_j represents attribute preferences for user j
                n_attr = len(self.attr_id_to_idx)
                self.user_mat = np.zeros((n_attr, n_users))
                for attr_id, idx in self.attr_id_to_idx.iteritems():
                    attr_scores = pref_dct[attr_id]
                    self.user_mat[idx] = attr_scores

            print 'User preferences: '
            print self.user_mat
            print 'user_mat.shape = ', self.user_mat.shape

            # Normalize user_mat
            if self.scale_user_pref:
                self.user_mat -= 2.5  # Assuming mean of scores = 2.5, so scale to [-2.5, 2.5]
                self.user_mat /= 2.5  # Scale to [-1, 1]

        # Store the list of annotation files as indexlist
        self.indexlist = [line.rstrip('\n') for line in open(self.anno_list)]

        if self.memimages:
            print "Loading images into memory"
        print "Loading {} annotations".format(len(self.indexlist))

        # Store each image-label object as a dict
        # But, do not store the images. Only store the image file path
        self.dictlist = [json.load(open(aidx)) for aidx in self.indexlist]
        shuffle(self.dictlist)

        # Create a weight vector
        self.idx_to_attr_id = {
            v: k
            for k, v in self.attr_id_to_idx.iteritems()
        }
        self.idx_to_weight = np.ones(68)
        if self.wloss:
            for idx in sorted(self.idx_to_attr_id.keys()):
                attr_id = self.idx_to_attr_id[idx]
                self.idx_to_weight[idx] = self.attr_id_to_weight[attr_id]

        print 'Class weights: '
        print self.idx_to_weight

        if self.sampling == 'weighted':
            '''
            1. Create a mapping of WEIGHT (int) -> attribute_idx
               weight of example = max(weight of attribute i in example)
            2. When sampling next image:
              a. Sample weight ~ [1, 2, 3, 4, 5]
              b. Sample an example corresponding to this weight
            Maintain a dict:
                {
                    1: [3, 10, 4, ...],
                    2: [45, 11, 90, ...],
                    ...
                }
            and pop an idx from the list when asked for next image
            '''
            self.weight_to_idx_list = get_w2idx(self.dictlist,
                                                self.attr_id_to_weight)
            # Maintain a copy of this, because it will mutate in each iteration (pop() to consume)
            self.org_weight_to_idx_list = copy.deepcopy(
                self.weight_to_idx_list)
        elif self.sampling == 'class_weighted':
            '''
            1. Create a mapping of LABEL (attr_id) -> DICT_IDX
            2. When sampling next image:
              a. Sample class ~ [attr_1, attr_2, ..., attr_L]
              b. Sample an example corresponding to this label
            Maintain a dict:
                {
                    attr_1: [3, 10, 4, ...],
                    attr_2: [45, 11, 90, ...],
                    ...
                }
            and pop an idx from the list when asked for next image
            '''
            self.class_to_idx_list = get_class2idx(self.dictlist)
            # Maintain a copy of this, because it will mutate in each iteration (pop() to consume)
            self.org_class_to_idx_list = copy.deepcopy(self.class_to_idx_list)
        else:
            self._cur = 0  # current image
            self.weight_to_idx_list = None
            self.class_to_idx_list = None

        # Add to each dict the label vector
        for idx, this_anno in enumerate(self.dictlist):
            # Prepare the multilabel
            # Get the list of attributes this corresponds to
            if 'labels' in this_anno:
                attr_set = set(this_anno['labels'])
            else:
                this_attr_list = []
                for categ_id, attr_id_list in this_anno[
                        'attributes'].iteritems():
                    this_attr_list += attr_id_list
                attr_set = set(this_attr_list)
            multilabel = attribute_set_to_vec(self.attr_id_to_idx,
                                              attr_set,
                                              is_safe=this_anno['safe'])
            if self.ynorm and np.sum(multilabel) > 0:
                multilabel /= np.sum(multilabel)
            assert np.sum(multilabel) > 0, 'Failed: np.sum(multilabel) > 0'
            this_anno['label_vec'] = multilabel

            image_path = this_anno['image_path']
            image_resized_path = image_path.replace('/images_chunks/',
                                                    '/images_chunks_resized/')
            if os.path.exists(image_resized_path):
                this_anno['image_path'] = image_resized_path

            if self.memimages:
                im = imread(this_anno['image_path'])
                if len(im.shape) == 2:
                    # This is a grayscale image
                    im = np.asarray(
                        Image.open(this_anno['image_path']).convert('RGB'))
                elif len(im.shape) == 3 and im.shape[2] == 4:
                    # CMYK Image
                    im = np.asarray(
                        Image.open(this_anno['image_path']).convert('RGB'))

                if self.img_transform == 'resize':
                    # Resize the image to the required shape
                    im = scipy.misc.imresize(im, self.im_shape)

                this_anno['im'] = im

                if idx % 100 == 0:
                    sys.stdout.write("processing %d/%d (%.2f%% done)   \r" %
                                     (idx, len(self.dictlist),
                                      idx * 100.0 / len(self.dictlist)))
                    sys.stdout.flush()

        print 'multilabel.shape = ', multilabel.shape
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer(mean=[104, 117, 123])

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist))

    def get_weights(self):
        return self.idx_to_weight

    def get_user_prefs(self):
        return self.user_mat.copy()

    def get_attr_id_list(self):
        return self.attr_id_list

    def load_next_image(self):
        """
        Load the next image in a batch.
        """

        # Sample image -------------------------------------------------------------------------------------------------
        # Choose which idx in dctlist to read
        # The next block should fill this in
        if self.sampling == 'weighted':
            # 1. Sample a weight
            this_w = np.random.choice(self.weight_to_idx_list.keys())
            # 2.a. Is an image available for this weight. If not,
            if len(self.weight_to_idx_list[this_w]) == 0:
                # Copy from the original mapping
                self.weight_to_idx_list = copy.deepcopy(
                    self.org_weight_to_idx_list)
                # Shuffle indices
                for w in sorted(self.weight_to_idx_list.keys()):
                    shuffle(self.weight_to_idx_list[w])
            # 2.b. Get the next index
            next_idx = self.weight_to_idx_list[this_w].pop()
        elif self.sampling == 'class_weighted':
            # 1. Sample a label
            this_attr_id = np.random.choice(self.class_to_idx_list.keys())
            # 2a. Is there a training example available for this weight? If not,
            if len(self.class_to_idx_list[this_attr_id]) == 0:
                # Copy from original mapping
                self.class_to_idx_list = copy.deepcopy(
                    self.org_class_to_idx_list)
                # Shuffle them
                for ai in self.class_to_idx_list:
                    shuffle(self.class_to_idx_list[ai])
            # 2b. Get next index
            next_idx = self.class_to_idx_list[this_attr_id].pop()
        else:
            # Did we finish an epoch?
            if self._cur == len(self.dictlist):
                self._cur = 0
                next_idx = self._cur
                shuffle(self.dictlist)
            else:
                next_idx = self._cur
                self._cur += 1

        dct = self.dictlist[next_idx]  # Get the anno

        # Load image ---------------------------------------------------------------------------------------------------
        image_path = dct['image_path']

        multilabel = dct['label_vec']
        assert multilabel.shape[
            0] == self.num_labels, 'multilabel.shape[0] ({}) != self.num_labels ({})'.format(
                multilabel.shape[0], self.num_labels)

        # Load an image
        if 'im' in dct:
            im = dct['im']
        else:
            im = imread(image_path)
            if len(im.shape) == 2:
                # This is a grayscale image
                im = np.asarray(Image.open(image_path).convert('RGB'))
            elif len(im.shape) == 3 and im.shape[2] == 4:
                # CMYK Image
                im = np.asarray(Image.open(image_path).convert('RGB'))
        org_shape = im.shape

        # Resize/Transform image ---------------------------------------------------------------------------------------
        if self.img_transform == 'resize':
            # Resize the image to the required shape
            im = scipy.misc.imresize(im, self.im_shape)
        elif self.img_transform == 'rand_crop':
            # Take a random crop of size self.im_shape
            # im.shape = [H, W, 3]
            img_h, img_w, _ = im.shape
            crop_h, crop_w = self.im_shape

            # print 'Processing file: ', image_path
            # print 'Old (w, h): ', (img_w, img_h)

            if img_w < crop_w:
                new_w = crop_w
                new_h = int(np.round(
                    img_h *
                    (new_w /
                     float(img_w))))  # Scale height to same aspect ratio
                im = scipy.misc.imresize(im, (new_h, new_w))
                img_w, img_h = new_w, new_h
                # print 'New (w, h): ', (img_w, img_h)

            if img_h < crop_h:
                new_h = crop_h
                new_w = int(np.round(img_w * (new_h / float(img_h))))
                im = scipy.misc.imresize(im, (new_h, new_w))
                img_w, img_h = new_w, new_h
                # print 'New (w, h): ', (img_w, img_h)

            # Sample (x1, y1) i.e, top-left point of the image
            x1 = np.random.randint(low=0, high=(img_h - crop_h - 1))
            y1 = np.random.randint(low=0, high=(img_w - crop_w - 1))
            # Crop a window given this point
            x2 = x1 + crop_h
            y2 = y1 + crop_w
            im = im[x1:x2, y1:y2, :]

            # print '(x1, y1) = ', (x1, x2)
            # print 'Cropped (w, h): ', (x2-x1, y2-y1)
            # print 'im.shape = ', im.shape

        # do a simple horizontal flip as data augmentation
        flip = np.random.choice(2) * 2 - 1
        im = im[:, ::flip, :]

        transformed_im = self.transformer.preprocess(im)

        return transformed_im, multilabel
Esempio n. 24
0
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.anno_list = params['anno_list']
        self.im_shape = params['im_shape']
        self.label_shortlist_path = params['label_shortlist']
        self.num_labels = params.get('nlabels', NUM_LABELS)
        self.memimages = bool(params.get('memimages', 0))
        self.img_transform = params.get('img_transform', 'resize')
        self.ynorm = bool(params.get('ynorm', 0))  # y := y / ||y||_1
        self.wloss = bool(params.get('wloss', 0))
        self.user_prefs = params.get('user_prefs', None)
        self.scale_user_pref = bool(params.get('scale_user_pref', 0))

        # Possible options:
        # 'uniform' (Default) : Sample uniformly
        # 'weighted': Sample a weight uniformly (usually between 1-5). Then sample an example from on of these.
        self.sampling = params.get('sampling', 'uniform')

        if self.label_shortlist_path is not None:
            self.attr_id_to_idx = dict()
            self.attr_id_to_weight = dict()
            with open(self.label_shortlist_path, 'r') as f:
                f.readline()  # Skip header line
                for line in f:
                    idx, attr_id, count, weight = line.strip().split('\t')
                    idx = int(idx)
                    count = int(count)
                    weight = float(weight)
                    self.attr_id_to_idx[attr_id] = idx
                    self.attr_id_to_weight[attr_id] = weight
        else:
            assert False, "Not Supported"
            # self.attr_id_to_idx = load_attr_to_idx()

        self.attr_id_list = self.attr_id_to_idx.keys()
        self.n_attr = len(self.attr_id_list)

        self.user_mat = None
        if self.user_prefs is not None:
            '''
            This is a file of the format:
            <attribute_id>  <attribute_name>    <score_1>   <score_2> .... <score_U>
            where U = # of users
            score_U indicates how important this attribute is to him/her
            '''
            with open(self.user_prefs) as uf:
                uf.readline()  # Skip header line
                pref_dct = dict(
                )  # Store mapping: attr_id = [..., score_i, ...]
                for line in uf:
                    if line.strip() == '':
                        continue
                    tokens = line.strip().split('\t')
                    attr_id = tokens[0]
                    attr_name = tokens[1]
                    scores = [float(s) for s in tokens[2:]]
                    if attr_id in self.attr_id_to_idx:
                        pref_dct[attr_id] = scores

                # Check n_users is consistent
                n_users = len(pref_dct.values()[0])
                assert all([n_users == len(x) for x in pref_dct.values()
                            ]), Counter([len(x) for x in pref_dct.values()])

                # Manually fill-in safe
                pref_dct[SAFE_ATTR_ID] = np.ones(n_users) * SAFE_WEIGHT
                # Make sure we have preferences for all attributes that we need
                assert all([
                    pref_attr_id in self.attr_id_to_idx
                    for pref_attr_id in pref_dct.keys()
                ])

                # Represent as a matrix: A x U
                # Where col_j represents attribute preferences for user j
                n_attr = len(self.attr_id_to_idx)
                self.user_mat = np.zeros((n_attr, n_users))
                for attr_id, idx in self.attr_id_to_idx.iteritems():
                    attr_scores = pref_dct[attr_id]
                    self.user_mat[idx] = attr_scores

            print 'User preferences: '
            print self.user_mat
            print 'user_mat.shape = ', self.user_mat.shape

            # Normalize user_mat
            if self.scale_user_pref:
                self.user_mat -= 2.5  # Assuming mean of scores = 2.5, so scale to [-2.5, 2.5]
                self.user_mat /= 2.5  # Scale to [-1, 1]

        # Store the list of annotation files as indexlist
        self.indexlist = [line.rstrip('\n') for line in open(self.anno_list)]

        if self.memimages:
            print "Loading images into memory"
        print "Loading {} annotations".format(len(self.indexlist))

        # Store each image-label object as a dict
        # But, do not store the images. Only store the image file path
        self.dictlist = [json.load(open(aidx)) for aidx in self.indexlist]
        shuffle(self.dictlist)

        # Create a weight vector
        self.idx_to_attr_id = {
            v: k
            for k, v in self.attr_id_to_idx.iteritems()
        }
        self.idx_to_weight = np.ones(68)
        if self.wloss:
            for idx in sorted(self.idx_to_attr_id.keys()):
                attr_id = self.idx_to_attr_id[idx]
                self.idx_to_weight[idx] = self.attr_id_to_weight[attr_id]

        print 'Class weights: '
        print self.idx_to_weight

        if self.sampling == 'weighted':
            '''
            1. Create a mapping of WEIGHT (int) -> attribute_idx
               weight of example = max(weight of attribute i in example)
            2. When sampling next image:
              a. Sample weight ~ [1, 2, 3, 4, 5]
              b. Sample an example corresponding to this weight
            Maintain a dict:
                {
                    1: [3, 10, 4, ...],
                    2: [45, 11, 90, ...],
                    ...
                }
            and pop an idx from the list when asked for next image
            '''
            self.weight_to_idx_list = get_w2idx(self.dictlist,
                                                self.attr_id_to_weight)
            # Maintain a copy of this, because it will mutate in each iteration (pop() to consume)
            self.org_weight_to_idx_list = copy.deepcopy(
                self.weight_to_idx_list)
        elif self.sampling == 'class_weighted':
            '''
            1. Create a mapping of LABEL (attr_id) -> DICT_IDX
            2. When sampling next image:
              a. Sample class ~ [attr_1, attr_2, ..., attr_L]
              b. Sample an example corresponding to this label
            Maintain a dict:
                {
                    attr_1: [3, 10, 4, ...],
                    attr_2: [45, 11, 90, ...],
                    ...
                }
            and pop an idx from the list when asked for next image
            '''
            self.class_to_idx_list = get_class2idx(self.dictlist)
            # Maintain a copy of this, because it will mutate in each iteration (pop() to consume)
            self.org_class_to_idx_list = copy.deepcopy(self.class_to_idx_list)
        else:
            self._cur = 0  # current image
            self.weight_to_idx_list = None
            self.class_to_idx_list = None

        # Add to each dict the label vector
        for idx, this_anno in enumerate(self.dictlist):
            # Prepare the multilabel
            # Get the list of attributes this corresponds to
            if 'labels' in this_anno:
                attr_set = set(this_anno['labels'])
            else:
                this_attr_list = []
                for categ_id, attr_id_list in this_anno[
                        'attributes'].iteritems():
                    this_attr_list += attr_id_list
                attr_set = set(this_attr_list)
            multilabel = attribute_set_to_vec(self.attr_id_to_idx,
                                              attr_set,
                                              is_safe=this_anno['safe'])
            if self.ynorm and np.sum(multilabel) > 0:
                multilabel /= np.sum(multilabel)
            assert np.sum(multilabel) > 0, 'Failed: np.sum(multilabel) > 0'
            this_anno['label_vec'] = multilabel

            image_path = this_anno['image_path']
            image_resized_path = image_path.replace('/images_chunks/',
                                                    '/images_chunks_resized/')
            if os.path.exists(image_resized_path):
                this_anno['image_path'] = image_resized_path

            if self.memimages:
                im = imread(this_anno['image_path'])
                if len(im.shape) == 2:
                    # This is a grayscale image
                    im = np.asarray(
                        Image.open(this_anno['image_path']).convert('RGB'))
                elif len(im.shape) == 3 and im.shape[2] == 4:
                    # CMYK Image
                    im = np.asarray(
                        Image.open(this_anno['image_path']).convert('RGB'))

                if self.img_transform == 'resize':
                    # Resize the image to the required shape
                    im = scipy.misc.imresize(im, self.im_shape)

                this_anno['im'] = im

                if idx % 100 == 0:
                    sys.stdout.write("processing %d/%d (%.2f%% done)   \r" %
                                     (idx, len(self.dictlist),
                                      idx * 100.0 / len(self.dictlist)))
                    sys.stdout.flush()

        print 'multilabel.shape = ', multilabel.shape
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer(mean=[104, 117, 123])

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist))
class BatchLoader(object):
    """
    This class abstracts away the loading of images.
    Images can either be loaded singly, or in a batch. The latter is used for
    the asyncronous data layer to preload batches while other processing is
    performed.
    """
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.viper_root = params[
            'viper_root']  #'/home/zf/caffe/multible_channel_identification/data/VIPeR'
        self.im_shape_h = params['im_shape'][0]
        self.im_shape_w = params['im_shape'][1]
        # get list of image indexes.
        list_file_a = params['split'][0] + '.txt'  #split:cam_a或者cam_b
        list_file_b = params['split'][1] + '.txt'
        self.indexlist_a = [
            line.rstrip('\n\r')
            for line in open(osp.join(params['viper_root'][0], list_file_a))
        ]  #0000到0631
        self.indexlist_b = [
            line.rstrip('\n\r')
            for line in open(osp.join(params['viper_root'][1], list_file_b))
        ]
        self.i_a = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist_a))  #632

    def load_next_three_images(self):
        """
        Load the next image in a batch.
        """
        # Did we finish an epoch?
        if self.i_a == len(self.indexlist_a):
            self.i_a = 0
            shuffle(self.indexlist_a)

        # Load three images
        index_a = self.indexlist_a[self.i_a]  # Get the image index of cam_a
        i_b = 0
        while (self.indexlist_b[i_b] != index_a):
            i_b = i_b + 1
            if (i_b == len(self.indexlist_a)):
                print "there is some error in the data"
                return
        index_b = self.indexlist_b[i_b]  # Get the image index of cam_b
        i_c = random.randint(0, 631)
        if (i_c == self.i_a):
            i_c = random.randint(0, 631)
        index_c = self.indexlist_a[i_c]  #Get the positive
        image_file_a_name = index_a + '.bmp'
        image_file_b_name = index_b + '.bmp'
        image_file_c_name = index_c + '.bmp'

        im = np.zeros((3, 230, 80, 3))
        im[0] = np.asarray(
            Image.open(osp.join(self.viper_root[0], image_file_a_name)))
        im[1] = np.asarray(
            Image.open(osp.join(self.viper_root[1], image_file_b_name)))
        im[2] = np.asarray(
            Image.open(osp.join(self.viper_root[0], image_file_c_name)))

        #print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"

        pic = np.zeros((3, 3, 230, 80))
        pic[0] = self.transformer.preprocess(im[0])
        pic[1] = self.transformer.preprocess(im[1])
        pic[2] = self.transformer.preprocess(im[2])
        return pic
Esempio n. 26
0
class BatchLoader():
    """
    Class definition for loading a batch of data. Use this class with a worker
    thread
    """
    def __init__(self, params):
        self._image_dir = params['image_dir']
        self._gt_dir = params['gt_dir']

        self._file_list = params['file_list']
        self._image_ext = params['image_ext']

        self._nclasses = params['nclasses']
        assert self._nclasses >= 1

        self._batch_size = params['batch_size']
        assert self._batch_size >= 1

        self._bgr_mean = np.array(params['bgr_mean'])

        # If _im_size is none then we assume that the batch size is 1
        if 'im_size' in params:
            self._im_size = np.array(params['im_size'])
        else:
            self._im_size = None
            self._batch_size = 1

        if 'mirror' in params:
            self._mirror = params['mirror'] == 1
        else:
            self._mirror = False

        if 'random_seed' in params:
            rd.seed(params['random_seed'])

        # Read the file list
        fid = open(self._file_list, 'r')
        self._list = [f.strip() for f in fid]
        rd.shuffle(self._list)

        self._transformer = SimpleTransformer(mean=self._bgr_mean)
        self._idx = 0

    def load_one_data_point(self, fname):
        """
        Load a single data point and preprocess it.

        Parameters
        ----------
        fname: the file for which we need to load the data

        Returns
        -------
        im: Processed Image
        gt: Processed Semantic Labeling
        """
        im_name = os.path.join(self._image_dir,
                               '{}.{}'.format(fname, self._image_ext))
        gt_name = os.path.join(self._gt_dir, '{}.txt'.format(fname))

        im = skimage.io.imread(im_name)
        gt = np.loadtxt(gt_name, delimiter=' ')

        [h, w, _] = im.shape

        assert h > self._im_size[0]
        assert w > self._im_size[1]


        h_start_idx = rd.randint(0, h - self._im_size[0])
        h_end_idx = h_start_idx + self._im_size[0]

        w_start_idx = rd.randint(0, w - self._im_size[1])
        w_end_idx = w_start_idx + self._im_size[1]

        final_im = im[h_start_idx:h_end_idx, w_start_idx:w_end_idx, :]

        final_gt = gt[h_start_idx:h_end_idx, w_start_idx:w_end_idx]

        if self._mirror and rd.randint(0, 1) == 1:
            final_im = final_im[:, ::-1, :]
            final_gt = final_gt[:, ::-1]

        final_im = self._transformer.preprocess(final_im)
        return final_im, final_gt

    def get_fname(self):
        """
        Randomly select next file to process.

        Get the next file in the list to load in the CNN. If it finishes
        the list then it randomly shuffles the list. This process ensures
        that we look at each file in the list in each epoch and we randomly
        select files rather than a fixed order.

        Returns
        -------
        fname: A randomly Selected filename
        """
        if self._idx >= len(self._list):
            rd.shuffle(self._list)
            self._idx = 0
        fname = self._list[self._idx]
        self._idx += 1
        return fname

    def next_batch(self):
        """
        Get the next batch to process

        Returns
        -------
        ims: numpy array of the images
        gts: numpy array of labels
        """
        if self._batch_size == 1:
            fname = self.get_fname()
            (ims, gts) = self.load_one_data_point(fname)
        else:
            ims = np.zeros((self._batch_size, 3, self._im_size[0],
                           self._im_size[1]))
            gts = np.zeros((self._batch_size, 1, self._im_size[0],
                               self._im_size[1]))
            for iter in range(self._batch_size):
                fname = self.get_fname()
                (im, gt) = self.load_one_data_point(fname)
                ims[iter, :] = im
                gts[iter, :] = gt

        return ims, gts
class BatchLoader(object):

    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.yelp_picture_root = params['yelp_picture_root']
        self.yelp_csv_root = params['yelp_csv_root']
        self.im_shape = params['im_shape']
	self.split=params['split']

        # get list of image indexes.
        if self.split in ["train", "validation"]:
       	    list_csv = self.yelp_csv_root + self.split + '_photo_to_biz_ids2.csv'
	else:
       	    list_csv = self.yelp_csv_root + self.split + '_photo_to_biz.csv'
        self.image_key = []
        with open(list_csv) as csv_file:
            reader = csv.DictReader(csv_file)
            for row in reader:
                self.image_key.append(row)
		
        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        print "BatchLoader initialized with {} images".format(
            len(self.image_key))

	if self.split in ["train", "validation", "poster"]:
	    attributes_csv = osp.join(self.yelp_csv_root, self.split + '2.csv')
            self.attributes_dict = {}

	    if self.split == "poster":
		attributes_csv = osp.join(self.yelp_picture_root, self.split + '.csv')

            with open(attributes_csv) as csv_file:
                reader = csv.DictReader(csv_file)
                for row in reader:
                    attr_string = row["labels"]
                    self.attributes_dict[row["business_id"]] = [int(label) for label in row["labels"].split()]

	if self.split == 'validation':
	    lmdb_dir = self.yelp_picture_root + 'val_lmdb'
	else:
	    lmdb_dir = self.yelp_picture_root + self.split + '_lmdb'
	lmdb_env = lmdb.open(lmdb_dir)
	self.lmdb_txn = lmdb_env.begin()
	self.lmdb_cursor = self.lmdb_txn.cursor()
	

    def load_next_image(self):
        """
        Load the next image in a batch.
	"""
        # Did we finish an epoch?
        if self._cur == len(self.image_key):
            self._cur = 0
	if not self.lmdb_cursor.next():
	    self.lmdb_cursor = self.lmdb_txn.cursor()
	    self.lmdb_cursor.next()

	business_id = self.image_key[self._cur]["business_id"]
        photo_id = self.image_key[self._cur]["photo_id"]  # Get the image index

        # Load and prepare ground truth
        multilabel = np.zeros(9).astype(np.float32)
        if self.split in ["train", "validation","poster"]:
            anns = self.load_yelp_attributes(business_id)
            for label in anns:
                # convert label information to a 1/0 array.
                multilabel[label] = 1

        self._cur += 1
	datum = caffe.proto.caffe_pb2.Datum()
	key, value = self.lmdb_cursor.item()

    	datum.ParseFromString(value)
        label = datum.label
	# TODO(prad): Add a check for test as well.
	if self.split != 'test' and self.split != 'poster' and str(label) != business_id:
	    print "Houston, we have a problem." + str(label) + ":" + str(business_id)

	data = caffe.io.datum_to_array(datum)
	im = scipy.misc.imresize(data, self.im_shape)  # resize

        # do a simple horizontal flip as data augmentation
        flip = np.random.choice(2)*2-1
        im = im[:, ::flip, :]
	# The datum above already does BGR transformation. But we are redoing it using the transformer below.
	im = im[:, :, ::-1]
	
	return self.transformer.preprocess(im), multilabel, photo_id, label


    def load_yelp_attributes(self, business_id, mapping=None):
        return self.attributes_dict[business_id]
class BatchLoader(object):

    """
    This class abstracts away the loading of images.
    Images can either be loaded singly, or in a batch. The latter is used for
    the asyncronous data layer to preload batches while other processing is
    performed.
    """

    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.data_root = params['data_root']
        self.im_shape = params['im_shape']
        self.n_classes = params['n_classes']
        # get list of image indexes.
        list_file = params['split'] + '.json'

        # indexlist is a list of all the photo_ids of the Yelp dataset
        self.indexlist = [d['photo_id'] for d in json.load(open(osp.join(self.data_root, list_file)))]
        # self.indexlist = [line.rstrip('\n') for line in
        #    open(osp.join(self.data_root, 'ImageSets/Main', list_file))]
        self._cur = 0  # current image

        # Get the corresponding attributes for the image file
        # self.attrs = self.load_yelp_attributes(self.data_root, self.n_classes)
        self.load_yelp_attributes(self.data_root, self.n_classes)

        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        print "BatchLoader initialized with {} images".format(
            len(self.indexlist))

    def load_next_image(self):
        """
        Load the next image in a batch.
        """
        # Did we finish an epoch?
        if self._cur == len(self.indexlist):
            self._cur = 0
            shuffle(self.indexlist)

        #print "Loading image #{}".format(self._cur)

        # Load an image
        index = self.indexlist[self._cur]  # Get the image index
        image_file_name = index + '.jpg'
        im = np.asarray(Image.open(
            osp.join(self.data_root, 'images', image_file_name)))
        im = scipy.misc.imresize(im, self.im_shape)  # resize

        # do a simple horizontal flip as data augmentation
        flip = np.random.choice(2)*2-1
        im = im[:, ::flip, :]

        # Load and prepare ground truth
        multilabel = np.zeros(self.n_classes).astype(np.float32)

        # retrieve the attributes of the business to which this photo belongs
        attrs = self.get_business_attributes(index)
        # Always sort dict and return list of sorted key-value pairs to ensure feature vector is in always the same order
        attrs_list = sorted(attrs.iteritems())

        # print attrs
        for i, label in enumerate(attrs):
            # in the multilabel problem we don't care how MANY instances
            # there are of each class. Only if they are present.
            # The "-1" is b/c we are not interested in the background
            # class.
            multilabel[i] = attrs[label]

        self._cur += 1
        return self.transformer.preprocess(im), multilabel


    def load_yelp_attributes(self, data_root, n_classes):
        """
        This code is borrowed from Ross Girshick's FAST-RCNN code
        (https://github.com/rbgirshick/fast-rcnn).

        See publication for further details: (http://arxiv.org/abs/1504.08083).

        Thanks Ross!

        """
        classes = ("Accepts Credit Cards", "Alcohol",
        "Ambience_casual", "Ambience_classy", "Ambience_divey", "Ambience_hipster",
        "Ambience_intimate", "Ambience_romantic", "Ambience_touristy", "Ambience_trendy",
        "Ambience_upscale", "Attire_casual", "Attire_dressy", "Attire_formal", "Caters",
        "Delivery", "Dietary_Restrictions_dairy-free", "Dietary_Restrictions_gluten-free",
        "Dietary_Restrictions_halal", "Dietary_Restrictions_kosher", "Dietary_Restrictions_soy-free",
        "Dietary_Restrictions_vegan", "Dietary_Restrictions_vegetarian", "Drive-Thru",
        "Good_For_breakfast", "Good_For_brunch", "Good_For_dessert", "Good_For_dinner",
        "Good_For_latenight", "Good_For_lunch", "Good For Dancing", "Good For Groups",
        "Good for Kids", "Happy Hour", "Has TV", 'Music_background_music',
        'Music_dj', 'Music_jukebox', 'Music_karaoke', 'Music_live', 'Music_video',
        "Noise_Level_average", "Noise_Level_loud", "Noise_Level_quiet", "Noise_Level_very_loud",
        "Outdoor Seating",
        "Parking_garage", "Parking_lot", "Parking_street", "Parking_valet", "Parking_validated",
        "Price_Range_1", "Price_Range_2", "Price_Range_3", "Price_Range_4",
        'Smoking_outdoor', 'Smoking_yes', 'Smoking_no',
        "Take-out",
        "Takes Reservations",
        "Waiter Service",)
        class_to_ind = dict(zip(classes, xrange(n_classes)))

        attr_filename = osp.join(data_root, "business_attributes.json")
        # print 'Loading: {}'.format(filename)

        # Load the list of business attributes
        self.business_attributes = json.load(open(attr_filename))

        gt_classes = np.zeros((n_classes), dtype=np.int32)
        self.photo_2_business = json.load( open(osp.join(data_root, "photo_id_to_business_id.json")) )

        return self.business_attributes


    def get_business_attributes(self, index):
        """
        Get the attributes JSON for the image with name `index`
        """
        # Look for the business which the photo_2_business json points the image to.
        business = [b for b in self.photo_2_business if b["photo_id"] == index][0]

        #print sorted(self.business_attributes[0].keys())

        # Given the business from photo_2_business, use the business_id to find the attributes of the business
        attrs = [a for a in self.business_attributes if a["id"] == business["business_id"]][0]

        attributes = dict(attrs)

        # remove the id from the list of attributes
        attributes.pop("id")

        return attributes
Esempio n. 29
0
class BatchLoader(object):
    """
    This class abstracts away the loading of images.
    Images can either be loaded singly, or in a batch. The latter is used for
    the asyncronous data layer to preload batches while other processing is
    performed.
    """
    def __init__(self, params, result):
        self.result = result
        self.batch_size = params['batch_size']
        self.number_classes = params['number_classes']
        self.img_ext = params['img_ext']
        self.dataset_root = params['dataset_root']
        self.im_shape = params['im_shape']

        # get list of image indexes.
        list_file = params['split'] + '.csv'
        self.indexlist = [
            line.rstrip('\n').split(',')[0]
            for line in open(osp.join(self.dataset_root, list_file))
        ]
        self.labellist = [
            line.rstrip('\n').split(',')[1]
            for line in open(osp.join(self.dataset_root, list_file))
        ]

        self._cur = 0  # current image
        # this class does some simple data-manipulations
        self.transformer = SimpleTransformer()

        print("BatchLoader initialized with {} images".format(
            len(self.indexlist)))

    def load_next_image(self):
        """
        Load the next image in a batch.
        """
        # Did we finish an epoch?
        if self._cur == len(self.indexlist):
            self._cur = 0
            randnum = random.randint(0, 100)
            random.seed(randnum)
            shuffle(self.indexlist)
            random.seed(randnum)
            shuffle(self.labellist)

        # Load an image
        index = self.indexlist[self._cur]  # Get the image index
        image_file_name = index + self.img_ext
        im = np.asarray(
            Image.open(osp.join(self.dataset_root, 'images', image_file_name)))
        im = scipy.misc.imresize(im, self.im_shape)  # resize

        # do a simple horizontal flip as data augmentation
        flip = np.random.choice(2) * 2 - 1

        im = im[:, ::flip, :]

        # Load and prepare ground truth
        multilabel = np.zeros(self.number_classes).astype(np.float32)
        for label in self.labellist[self._cur].split(' '):
            multilabel[int(label)] = 1

        self._cur += 1
        return self.transformer.preprocess(im), multilabel