Example #1
0
def write_images_to_lmdb(img_dir, db_name, labels):
    for root, dirs, files in os.walk(img_dir, topdown = False):
        if root != img_dir:
            continue
        #multiply 2 to make the map_size large enough
        map_size = 2*IMAGE_WIDTH * IMAGE_HEIGHT *3*len(files)
        env = lmdb.Environment(db_name, map_size=map_size)
        txn = env.begin(write=True,buffers=True)

        for idx, name in enumerate(files):
            #print(img_dir + name)
            img = cv2.imread(img_dir + name, cv2.IMREAD_COLOR)
            img = transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)
            #print(img.shape)
            #print(img)
            img=img.transpose(2,0,1)
            y = labels[name]
            #print(name)
            #print(y)
            datum = array_to_datum(img,y)
            #print(datum)
            #lala
            str_id = '{:08}'.format(idx)
            txn.put(str_id.encode('ascii'), datum.SerializeToString())   

            if idx % 1000 == 1:
                print("transforming" + str(idx) + "th image to sb")
    txn.commit()
    env.close()
    print " ".join(["Writing to", db_name, "done!"])
Example #2
0
def createDB(name, contenido, funcion):
    db = lmdb.Environment(name, map_size=int(1e12))
    tx = db.begin(write=True)

    for label, imagen in enumerate(contenido):
        im = Image.open(imagen)
        punt = im.fp
        im = im.resize((IMAGE_WIDTH, IMAGE_HEIGHT))
        if 'cat' in os.path.split(imagen)[1]:
            y = 0
        else:
            y = 1
        x = np.array(im.getdata()).reshape(im.size[1], im.size[0], 3)
        datum = array_to_datum(np.transpose(x, (2, 0, 1)), y)

        if funcion(label, SCORE):
            print label
            tx.put('{:08}'.format(label), datum.SerializeToString())

        if not punt.closed:
            punt.close()

        if (label + 1) % 2500 == 0:
            tx.commit()
            tx = db.begin(write=True)
            print '------- commit -------'

    tx.commit()
    db.close()
Example #3
0
def create_lmdb(db_name, labels):
    map_size = SEQUENCE_LENGTH * ALPHA_LENGTH * 27 * len(
        labels)  # 27x is just for safety
    env = lmdb.Environment(db_name, map_size=map_size)
    txn = env.begin(write=True, buffers=True)

    X_copy = np.ndarray((3, SEQUENCE_LENGTH, ALPHA_LENGTH))

    for i, label in enumerate(labels):
        print('writing img-{}'.format(i))
        X = mp.imread(IMG_BASE_NAME.format(i)) * 255
        X_rev = np.transpose(X)
        X_copy[0, :, :] = X_rev
        X_copy[1, :, :] = X_rev
        X_copy[2, :, :] = X_rev

        if label < 5:
            class_label = 0
        else:
            class_label = 1

        datum = array_to_datum(X_copy.astype(np.uint8), class_label)
        str_id = '{:08}'.format(i)
        txn.put(str_id.encode('ascii'), datum.SerializeToString())

    txn.commit()
    env.close()
    print('Done creating {}!'.format(db_name))
Example #4
0
 def write_images_to_db(self, db_name, images, callback=lambda a: 0):
     '''
     Writes images to a lmdb database inside this model's folder. db_name is the name of the
     database. If a database by that name already exists, it will be overwritten. images is a
     list of 2-element tuples. The first elemnt of each tuple should be a path to an image, and
     the second element should be the label of that image.
     '''
     random.shuffle(images)  #Shuffle input data to improve training.
     p = os.path.join(self.get_folder(), db_name)
     s.call(['rm', '-r', p])
     map_size = 256 * 256 * 3 * 2 * len(images)
     env = lmdb.Environment(p, map_size=map_size)
     write_to = env.begin(write=True, buffers=True)
     i = 0
     num_images = len(images)
     update_interval = int(num_images / 100 + 1)
     for image in images:
         try:
             resize_image(image[0])
             input = np.transpose(
                 mp.imread('/tmp/resized.jpg'),
                 (2, 1, 0))  #Caffe wants CxHxW, not the standard WxHxC.
             datum = array_to_datum(input, image[1])
             write_to.put('{:08}'.format(i).encode('ascii'),
                          datum.SerializeToString())
             i += 1
         except:
             pass
         if (i % update_interval == 0):
             callback([(i / num_images, '')])
     write_to.commit()
     env.close()
Example #5
0
def create_seqdb(db_path, root_dir, seqs, rels, resize=None):
    # create a lmdb of sequences stacked at the channel dimensions
    # therefore each blob is (SEQ_LEN*3) X H X W

    # suffle data
    seqs, rels = np.array(seqs), np.array(rels)
    sff = np.random.permutation(seqs.shape[0])
    seqs, rels = seqs[sff], rels[sff]
    np.savez("{}_REL.npz".format(db_path), gt_rels=rels)

    in_db = lmdb.open(db_path, map_size=int(1e12))
    with in_db.begin(write=True) as in_txn:
        for in_idx, seq in enumerate(seqs):
            im_seq = None
            for img_fname in seq:
                im = Image.open(osp.join(root_dir, img_fname))
                im = im.convert('RGB') if im.mode == 'L' else im
                im = np.array(im.resize(
                    resize, Image.BILINEAR)) if resize else np.array(im)
                im = im[:, :, ::-1]
                im = im.transpose((2, 0, 1))
                im_seq = im if im_seq is None else np.append(
                    im_seq, im, axis=0)

            im_dat = array_to_datum(im_seq, label=0)
            in_txn.put('{:0>10d}'.format(in_idx).encode('ascii'),
                       im_dat.SerializeToString())
            if in_idx % 1e3 == 0:
                print("{}/{} images saved..".format(in_idx, len(seqs)))

    in_db.close()
    print("Dataset created at {} with {} datums".format(db_path, len(seqs)))
    return db_path
Example #6
0
def np2lmdb(data, lmdb_path, labels=None, shape=None):
    import matplotlib

    matplotlib.use("Agg")
    from caffe import io

    records = []
    assert isinstance(data, np.ndarray)
    data = data.astype(np.float)
    if labels is not None:
        assert len(data) == len(labels)
        labels = np.asarray(labels, dtype=np.int)

    for i, each in enumerate(data):
        try:
            if shape is None:
                each.resize(each.shape[0], 1, 1)
            else:
                each.resize(shape)
            one = io.array_to_datum(each)
            if labels is not None:
                one.label = labels[i]
            records.append(one.SerializeToString())
        except Exception, e:
            print each.shape
            raise e
def pair_gen(datum_1, datum_2):
    mat_1 = io.datum_to_array(datum_1)
    mat_2 = io.datum_to_array(datum_2)
    mat_3 = np.concatenate([mat_1, mat_2], axis=0)
    datum_3 = io.array_to_datum(mat_3)
    if datum_1.label == datum_2.label:
        datum_3.label = 1
    else:
        datum_3.label = 0
    return datum_3
Example #8
0
def resize_lmdb(src_db, dest_db, width, height, length):
    '''
  #resize_lmdb: create lmdb from existing lmdb with resizing
  #  src_db:  source lmdb name
  #  dest_db: new lmdb name
  #  width:   new image width
  #  height:  new image height
  #  length:  limit of item count to process
  '''
    print('[start] resize lmdb [{}]->[{}]'.format(src_db, dest_db))
    src_env = lmdb.open(src_db, readonly=True)
    map_size = 100000000 * length  #buffer size
    dest_env = lmdb.Environment(dest_db, map_size)
    dest_txn = dest_env.begin(write=True, buffers=True)
    with src_env.begin() as src_txn:
        cursor = src_txn.cursor()
        count = 0
        before, after = None, None
        for key, value in cursor:
            datum = caffe_pb2.Datum()
            datum.ParseFromString(value)
            x = datum_to_array(datum)  # (c,h,w)
            y = datum.label

            if datum.channels == CH_GRAY:
                img_array = x.transpose((1, 2, 0))  # (c,h,w)->(h,w,c)
                img_array = img_array.reshape(
                    img_array.shape[0], img_array.shape[1])  # (h,w,1)->(h,w)
                img = Image.fromarray(np.uint8(img_array))
                img = img.resize((width, height))
                img_array = np.asarray(img)
                img_array = img_array.reshape(height, width,
                                              1)  # (h,w)->(h,w,1)
            elif datum.channels == CH_RGB:
                img_array = x.transpose((1, 2, 0))  # (c,h,w)->(h,w,c)
                img = Image.fromarray(np.uint8(img_array))
                img = img.resize((width, height))
                img_array = np.asarray(img)
            img_array = img_array.transpose((2, 0, 1))  # (h,w,c) -> (c,h,w)
            out_datum = array_to_datum(img_array, y)
            dest_txn.put(key.encode('ascii'), out_datum.SerializeToString())
            if count == 0:
                before = x.shape
                after = img_array.shape
                #print ( '{0}: x.shape:{1} label:{2} -> x.shape{3} label:{4}'.format(key,x.shape,y,img.shape,y))
            count += 1
            if count >= length:
                break
        print('[finished] total count {}. shape {} -> {}'.format(
            count, before, after))
    dest_txn.commit()
    dest_env.close()
    src_env.close()
Example #9
0
def _create_lmdb_(lmdb_filename, X, Y):
    num = np.prod(X.shape)
    itemsize = np.dtype(X.dtype).itemsize
    # set a reasonable upper limit for database size
    map_size = 10240 * 1024 + num * itemsize * 2
    print 'save {} instances...'.format(num)
    env = lmdb.open(lmdb_filename, map_size=map_size)
    for i, (x, y) in enumerate(zip(X, Y)):
        datum = array_to_datum(x, y)
        str_id = '{:08}'.format(i)
        with env.begin(write=True) as txn:
            txn.put(str_id, datum.SerializeToString())
Example #10
0
def _create_lmdb_(lmdb_filename, X, Y):
    num = np.prod(X.shape)
    itemsize = np.dtype(X.dtype).itemsize
    # set a reasonable upper limit for database size
    map_size = 10240 * 1024 + num * itemsize * 2
    print 'save {} instances...'.format(num)
    env = lmdb.open(lmdb_filename, map_size=map_size)
    for i, (x, y) in enumerate(zip(X, Y)):
        datum = array_to_datum(x, y)
        str_id = '{:08}'.format(i)
        with env.begin(write=True) as txn:
            txn.put(str_id, datum.SerializeToString())
Example #11
0
def process_single_img(lmdb_txn, item_id, im, ptype):
    """convert single image according to different normalization methods"""
    datum = caffe_pb2.Datum()
    if ptype == SIMPLE_NORM:
        im = cv2.resize(im, (config.img_height, config.img_width))
        datum = array_to_datum(im)
        keystr = '{:0>8d}'.format(item_id)
        lmdb_txn.put(keystr, datum.SerializeToString())
    elif ptype == SQUARE_NORM:
        rim = np.transpose(im, axes=(1, 0, 2))
        im = cv2.resize(im, (config.img_height, config.img_width))
        rim = cv2.resize(rim, (config.img_height, config.img_width))

        # save origin image
        datum = array_to_datum(im)
        keystr = '{:0>8d}'.format(item_id)
        lmdb_txn.put(keystr, datum.SerializeToString())

        # save rotated image
        datum = array_to_datum(rim)
        keystr = '1{:0>7d}'.format(item_id)
        lmdb_txn.put(keystr, datum.SerializeToString())
Example #12
0
def createDB(db, funcion):
    global label

    label = 0

    for imagenes in getImagesAndClases():
        #for imagenes in getImagesFile():
        tx = db.begin(write=True)
        punt = None

        for imagen in imagenes:
            try:
                label += 1
                '''
				if not funcion(label,SCORE):
					label += 1

				if True:
				'''
                if funcion(label, SCORE):
                    im = Image.open(imagen)
                    punt = im.fp
                    im = im.resize((IMAGE_WIDTH, IMAGE_HEIGHT))

                    if im.mode != 'RGB':
                        im = im.convert('RGB')

                    x = np.array(im.getdata()).reshape(im.size[1], im.size[0],
                                                       3)
                    datum = array_to_datum(np.transpose(x, (2, 0, 1)), clase)

                    print label, imagen
                    tx.put('{:08}'.format(label), datum.SerializeToString())

                if (label + 1) % COMMIT == 0:
                    tx.commit()
                    tx = db.begin(write=True)
                    print '------- commit -------'

            except IOError as ioe:
                print 'Imagen:', imagen
                print ioe
            except:
                pass
            finally:
                if punt != None and not punt.closed:
                    punt.close()

        tx.commit()
def write_images_to_db(db_folder, db_name, images):
    random.shuffle(images) #Shuffle input data to improve training.
    p = path.join(db_folder, db_name)
    s.call(['rm', '-r', p])
    map_size = 256*256*3*2*len(images)
    env = lmdb.Environment(p, map_size = map_size)
    write_to = env.begin(write=True, buffers=True)
    for i, image in enumerate(images):
        resize_image(image[0])
        input = np.transpose(mp.imread('/tmp/resized.jpg'), (2, 1, 0)) #Caffe wants CxHxW, not the standard WxHxC.
        datum = array_to_datum(input, image[1])
        print('Finished image', i+1, 'which had label', image[1])
        write_to.put('{:08}'.format(i).encode('ascii'), datum.SerializeToString())
    write_to.commit()
    env.close()
Example #14
0
def write_to_lmdb(image_list, db_path, h5_root):

    map_size = 150000000000
    env = lmdb.Environment(db_path, map_size=map_size)
    with env.begin(write=True, buffers=True) as txn:
        for idx, image in enumerate(image_list):
            # 150 GB
            X = get_caffe_data.load_h5_file(os.path.join(h5_root, image[0]))
            X = X[np.newaxis, :, :]
            y = int(image[1])
            datum = array_to_datum(X, y)
            str_id = '{:08}'.format(idx)
            txn.put(str_id.encode('ascii'), datum.SerializeToString())

    env.close()
    print " ".join(["Writing to", db_path, "done!"])
Example #15
0
def write_images_to_lmdb(img_dir, db_name):
    for root, dirs, files in os.walk(img_dir, topdown=False):
        if root != img_dir:
            continue
        map_size = 300 * 300 * 3 * 5 * len(files)
        env = lmdb.Environment(db_name, map_size=map_size)
        txn = env.begin(write=True, buffers=True)
        for idx, name in enumerate(files):
            X = cv2.imread(os.path.join(root, name))
            y = 1
            datum = array_to_datum(X, y)
            str_id = '{:08}'.format(idx)
            txn.put(str_id.encode('ascii'), datum.SerializeToString())
    txn.commit()
    env.close()
    print " ".join(["Writing to", db_name, "done!"])
Example #16
0
def imgs2lmdb(imgs, lmdb_path, is_verbose=False):
    import matplotlib
    matplotlib.use('Agg')
    from caffe import io
    values = []
    for img in imgs:
        if img[0].ndim == 2:
            channels = int(1)
        else:
            channels = img[0].shape[2]
        height = img[0].shape[0]
        width= img[0].shape[1]

        img[0].resize(channels, height, width)
        datum = io.array_to_datum(img[0])
        datum.label = int(img[1])
        values.append(datum.SerializeToString())
    if len(values) is not 0:
        dump_to_lmdb(values, lmdb_path, is_verbose, random_order=True)
Example #17
0
def write_images_to_lmdb(img_dir, db_name, label):
    for root, dirs, files in os.walk(img_dir, topdown=False):
        if root != img_dir:
            continue
        map_size = 400 * 400 * 3 * 5 * len(files)
        env = lmdb.Environment(db_name, map_size=map_size)
        txn = env.begin(write=True, buffers=True)
        max_key = env.stat()["entries"]
        for idx, name in enumerate(files):
            X = cv2.imread(os.path.join(root, name))
            width, height, ch = X.shape
            X = cv2.resize(X, (256, 256))
            y = label
            datum = array_to_datum(X, y)
            str_id = '{:08}'.format(max_key + idx)
            txn.put(str_id.encode('ascii'), datum.SerializeToString())
    txn.commit()
    env.close()
    print " ".join(["Writing to", db_name, "done!"])
def write_images_to_lmdb(img_dir, db_name):

    for root, dirs, files in os.walk(img_dir, topdown = False):

        if root != img_dir:
            continue

        map_size = 64*64*3*2*len(files)
        env = lmdb.Environment(db_name, map_size=map_size)
        txn = env.begin(write=True,buffers=True)
        
        for idx, name in enumerate(files):
            X = mp.imread(os.path.join(root, name))
            y = 1
            datum = array_to_datum(X,y)
            str_id = '{:08}'.format(idx)
            # The encode is only essential in Python 3
            txn.put(str_id.encode('ascii'), datum.SerializeToString())
        
    txn.commit()
    env.close()
    print " ".join(["Writing to", db_name, "done!"])
Example #19
0
def create_lmdb_from_array(dest_db, imgs, labels):
    '''
  # multi cahennl lmdb
  # imgs : list of img array
  # labels : list of label id
  # imgs[0].shape = (channel, height, width)
  # len(imgs) = item count
  # len(labels) = item count
  '''
    #print ('[start] creating lmdb. ch:{}, w:{}, h:{}, item count:{}'.format(imgs[0].shape[0],imgs[0].shape[1],imgs[0].shape[2],len(imgs)))
    map_size = 100000000 * len(imgs[0])  #buffer size
    env = lmdb.Environment(dest_db, map_size)
    txn = env.begin(write=True, buffers=True)
    count = 0
    for idx, img in enumerate(imgs):
        str_id = '{:08}'.format(idx)
        clsid = labels[idx]
        #print ( '  {0} shape:{1}, class:{2}'.format(str_id, img.shape , clsid))
        datum = array_to_datum(img, clsid)
        txn.put(str_id.encode('ascii'), datum.SerializeToString())
        count += 1
    txn.commit()
    env.close()
    print('[finished] {} items have written to \'{}\''.format(count, dest_db))
Example #20
0
def build_db(files,db_fn,with_images=True,with_labels=False,
             trf_func=np.array,pad=0):
    # Where from
    pvc_im_path = "~/data/VOCdevkit/VOC2012/JPEGImages/"
    pvc_sg_path = "~/data/VOCdevkit/VOC2012/SegmentationClass/"
    sds_sg_path = "~/data/SDS/cls"
    
    d = dict( pvc_im = pvc_im_path,
              pvc_sg = pvc_sg_path,
              sds_sg = sds_sg_path)
    #Don't know how to clear lmdb using api
    shutil.rmtree(db_fn)

    # Start
    env = lmdb.Environment(db_fn,map_size=10**12)

    for k,fn in d.items():
        d[k] = os.path.expanduser(fn)
    
    files = sorted(list(files))

    for i in range(len(files)):
        file_fn = files[i]
        print(file_fn,i,"/",len(files))

        if with_images:
          im = Image.open(os.path.join(d["pvc_im"],file_fn + ".jpg"))
          #(366,500,3) -> (3,366,500)
          im = trf_func(im)[:,:,::-1].transpose(2,0,1)
          data = im

        if with_labels:
            sg_fn = os.path.join(d["pvc_sg"],file_fn + ".png")
            if  os.path.isfile(sg_fn):
                sg = Image.open(sg_fn)
                #(366,500) -> (1,366,500)
                sg = trf_func(sg)[:,:,np.newaxis].transpose(2,0,1)
            else:
                sg_fn = os.path.join(d["sds_sg"],file_fn + ".mat")
            
                if os.path.isfile(sg_fn):
                    data = loadmat(sg_fn)
                    sg = Image.fromarray(data["GTcls"][0,0][1])
                    sg = trf_func(sg)[:,:,np.newaxis].transpose(2,0,1)

            if with_images:
                print("Check images here.")
                raise NotImplementedError
                data = np.dstack( (np.array(im)[:,:,::-1],np.array(sg))).transpose(2,0,1)
            else:
                data = sg

        key = str(i).zfill(8) + '_' + file_fn.replace(".png",".jpg")
        datum = array_to_datum(data)
        with env.begin(write=True) as txn:
            cursor = txn.cursor()
            cursor.put(key,datum.SerializeToString())
    
    for j in range(pad):
        key = str(i+j).zfill(8) + '_' + "pad"

        print(j,"/",pad)

        # Should be able to put in empty images, but im afraid
        # that will raise errors, do this for now.
        if with_images:
            data = np.zeros( (3,384,384),dtype=np.uint8)
        if with_labels:
            data = np.zeros( (1,384,384),dtype=np.uint8 )
            data[...] = ignore_label

        datum = array_to_datum(data)
        with env.begin(write=True) as txn:
            cursor = txn.cursor()
            cursor.put(key,datum.SerializeToString())
Example #21
0
import caffe
from caffe.io import array_to_datum

parser = argparse.ArgumentParser()
parser.add_argument('--dir', type=str, required=True)
parser.add_argument('--output', type=str, required=True)
args = parser.parse_args()

if __name__ == '__main__':
    files = os.listdir(args.dir)
    new_db = lmdb.open(args.output, map_size=3 * 10**9)

    for f in files:
        print("processing: " + f)
        data = Dataset([args.dir + '/' + f])
        X_hit, X_info, y = data.get_data(crop=True)
        with new_db.begin(write=True) as txn:
            # txn is a Transaction object
            N = X_hit.shape[0]
            for i in range(N):
                img_i = X_hit[i, :, :, :]
                img_i = img_i.transpose((2, 0, 1))
                el = np.hstack([img_i.flatten(), X_info[i, :].flatten()])
                el = np.reshape(el, (1, 1, -1))
                datum = array_to_datum(el, int(y[i, 1]))
                str_id = '{}_ID{:08}'.format(f, i)

                # The encode is only essential in Python 3
                txn.put(str_id.encode('ascii'), datum.SerializeToString())
    new_db.close()
    def export_to_lmdb(self, lmdb_foldername, ages_as_means=True, map_size=-1, splitters=None, apply_normalizers=False):
        """
        Exports the current dataset to LMDB format.
        If the LMDB already exists, it will append to its content.
        :param lmdb_foldername: filename LMDB.
        :param ages_as_means: save the age_range in mean format.
        :param map_size: size of map of the LMDB database. If set to -1, it will attempt to calculate a map_size that
        fits this dataset. Remember however, that it won't allow to expand the LMDB database with new data and you'll
        need to create a new one in case you want to.
        :param splitters: a set of splitters to split the dataset into multiple lmdbs. The lmdb divided by each splitter
        will be stored with the splitter's name prepended to the lmdb name. This is useful if you want to extract a
        chunk of the dataset as a test or validation lmdbs.
        :param apply_normalizers: boolean flag to apply normalizers when the image is put into the dataset manually.
        """
        iteration = 0
        txn_index = 0

        if splitters is None:
            splitters = []

        keys = self.get_keys(shuffle=True)
        count = len(keys)
        datum_id_format = "{}:0>{}{}_dbuild_{}".format("{", len(str(count)), "}", "{}")

        if map_size == -1:
            #map_size = self.get_dataset_size() + count * 30000
            map_size = 1e12

        print("Map size is {} MBytes".format(round(map_size/1000/1000, 2)))

        if splitters:
            environments = [lmdb.Environment(lmdb_foldername + "_" + splitter.get_name(), map_size=map_size) for splitter in splitters]
        else:
            environments = [lmdb.Environment(lmdb_foldername, map_size=map_size)]

        txns = [env.begin(write=True, buffers=True) for env in environments]
        txn = env = None

        put_txns = {}

        for txn in txns:
            put_txns[txn] = 0

        for key in keys:
            iteration += 1

            for split_id in range(len(splitters)):

                splitter = splitters[split_id]

                if splitter.decide(iteration):
                    txn = txns[split_id]
                    env = environments[split_id]
                    txn_index = split_id
                    break

            if not txn or not env:
                txn = txns[0]
                env = environments[0]
                txn_index = 0

            image = self.get_image(key)
            image.load_from_uri()
            age_range = image.get_metadata()[0]

            if ages_as_means:
                label = age_range.get_mean()
            else:
                label = age_range.get_range()[0]

            image_blob = image.get_blob()
            if apply_normalizers:
                for normalizer in self.normalizers:
                    image_blob = normalizer.apply(image_blob)

            #HxWxC to CxHxW in caffe
            image_blob = np.transpose(image_blob, (2,0,1))

            # Datum is the element map in LMDB. We associate image with label here.
            datum = array_to_datum(image_blob, label)

            # Now we encode the image id in ascii format inside the lmdb container that corresponds to this input.

            txn.put(datum_id_format.format(iteration, image.get_id()).encode("ascii"), datum.SerializeToString())
            put_txns[txn] += 1

            # write batch
            if put_txns[txn] % LMDB_BATCH_SIZE == 0:
                txn.commit()
                old_txn_count = put_txns[txn]
                del put_txns[txn]
                txn = env.begin(write=True)
                txns[txn_index] = txn
                put_txns[txn] = old_txn_count
                print("[{}%] Stored batch of {} image in LMDB".format(round(iteration/count * 100, 2),
                                                                      LMDB_BATCH_SIZE))

        # There could be a last batch on each txn without being commited.
        [txn.commit() or print("[{}%] Stored batch of {} image in LMDB".format(round(iteration/len(keys) * 100, 2),
                               count % LMDB_BATCH_SIZE))
         for txn, count in put_txns.items() if count % LMDB_BATCH_SIZE != 0]

        [env.close() for env in environments]