def pair_gen(datum_1, datum_2):
    mat_1 = io.datum_to_array(datum_1)
    mat_2 = io.datum_to_array(datum_2)
    mat_3 = np.concatenate([mat_1, mat_2], axis=0)
    datum_3 = io.array_to_datum(mat_3)
    if datum_1.label == datum_2.label:
        datum_3.label = 1
    else:
        datum_3.label = 0
    return datum_3
def read_images_from_lmdb(db_name, visualize):

	env = lmdb.open(db_name)
	txn = env.begin()
	cursor = txn.cursor()
	X = []
	y = []
	idxs = []
	for idx, (key, value) in enumerate(cursor):
		datum = caffe_pb2.Datum()
		datum.ParseFromString(value)
		X.append(np.array(datum_to_array(datum)))
		y.append(datum.label)
		idxs.append(idx)
		
	if visualize:
	    print "Visualizing a few images..."
	    for i in range(9):
	        img = X[i]
	        plt.subplot(3,3,i+1)
	        plt.imshow(img)
	        plt.title(y[i])
	        plt.axis('off')
	    plt.show()
	    
	print " ".join(["Reading from", db_name, "done!"])
	return X, y, idxs
def read_lmdb(db_file, n = 1000):

    assert os.path.isdir(db_file), "dbfile does not exist"

    lmdb_env = lmdb.open(db_file)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe_pb2.Datum()
    size = int(lmdb_env.stat()['entries'])
    size = min(size, n)
    batch_image = [None]*size
    batch_label = [None]*size
    count = 0
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)
        batch_label[count] = datum.label
        batch_image[count] = datum_to_array(datum)
        count += 1
        if count == n:
            break
    
    batch_image = np.array(batch_image, dtype=np.float32)
    batch_label = np.array(batch_label, dtype=np.float32)
    lmdb_env.close()
    return batch_image, batch_label
Exemple #4
0
def read_images_from_lmdb(db_name, visualize, seq_len):
    X, y, idxs = [], [], []
    env = lmdb.open(db_name, readonly=True)
    with env.begin() as txn:
        cursor = txn.cursor()
        for idx, (key, value) in enumerate(cursor):
            datum = caffe_pb2.Datum()
            datum.ParseFromString(value)
            X.append(np.array(datum_to_array(datum)))
            y.append(datum.label)
            idxs.append(idx)

    rels = np.load("{}_REL.npz".format(db_name))["gt_rels"]

    if visualize:
        print "Visualizing a few images..."
        for i in range(9):
            img = X[i]
            for seqidx in range(seq_len):
                im = img[seqidx * 3:seqidx * 3 + 3, :, :]
                im = im.transpose((1, 2, 0))
                im = im[:, :, ::-1]
                plt.subplot(1, seq_len, seqidx + 1)
                plt.imshow(im)
                plt.title(rels[i, seqidx])
                plt.axis('off')
            plt.show()
    print " ".join(["Reading from", db_name, "done!"])
    return X, y, idxs
Exemple #5
0
def resize_lmdb(src_db, dest_db, width, height, length):
    '''
  #resize_lmdb: create lmdb from existing lmdb with resizing
  #  src_db:  source lmdb name
  #  dest_db: new lmdb name
  #  width:   new image width
  #  height:  new image height
  #  length:  limit of item count to process
  '''
    print('[start] resize lmdb [{}]->[{}]'.format(src_db, dest_db))
    src_env = lmdb.open(src_db, readonly=True)
    map_size = 100000000 * length  #buffer size
    dest_env = lmdb.Environment(dest_db, map_size)
    dest_txn = dest_env.begin(write=True, buffers=True)
    with src_env.begin() as src_txn:
        cursor = src_txn.cursor()
        count = 0
        before, after = None, None
        for key, value in cursor:
            datum = caffe_pb2.Datum()
            datum.ParseFromString(value)
            x = datum_to_array(datum)  # (c,h,w)
            y = datum.label

            if datum.channels == CH_GRAY:
                img_array = x.transpose((1, 2, 0))  # (c,h,w)->(h,w,c)
                img_array = img_array.reshape(
                    img_array.shape[0], img_array.shape[1])  # (h,w,1)->(h,w)
                img = Image.fromarray(np.uint8(img_array))
                img = img.resize((width, height))
                img_array = np.asarray(img)
                img_array = img_array.reshape(height, width,
                                              1)  # (h,w)->(h,w,1)
            elif datum.channels == CH_RGB:
                img_array = x.transpose((1, 2, 0))  # (c,h,w)->(h,w,c)
                img = Image.fromarray(np.uint8(img_array))
                img = img.resize((width, height))
                img_array = np.asarray(img)
            img_array = img_array.transpose((2, 0, 1))  # (h,w,c) -> (c,h,w)
            out_datum = array_to_datum(img_array, y)
            dest_txn.put(key.encode('ascii'), out_datum.SerializeToString())
            if count == 0:
                before = x.shape
                after = img_array.shape
                #print ( '{0}: x.shape:{1} label:{2} -> x.shape{3} label:{4}'.format(key,x.shape,y,img.shape,y))
            count += 1
            if count >= length:
                break
        print('[finished] total count {}. shape {} -> {}'.format(
            count, before, after))
    dest_txn.commit()
    dest_env.close()
    src_env.close()
Exemple #6
0
def convert_db_to_arr(db_path):
    db = leveldb.LevelDB(db_path)
    data, labels = [], []

    for item in db.RangeIter():
        datum = caffe_pb2.Datum()
        datum.ParseFromString(item[1])
        data.append(datum_to_array(datum))
        labels.append(datum.label)

    data, labels = np.asarray(data), np.asarray(labels)
    return np.rollaxis(data, 1, 4), labels
Exemple #7
0
def extract_features(img_label, feature_type):
    """extract features from training or test image set and save into csv"""
    if img_label == POS:
        lmdb_file = config.pos_images_lmdb
        feature_file = config.pos_features_csv
    elif img_label == NEG:
        lmdb_file = config.neg_images_lmdb
        feature_file = config.neg_features_csv

    feature_lst = []
    lmdb_env = lmdb.open(lmdb_file)
    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe_pb2.Datum()

    is_first = True
    batch_size = 256
    item_id = 0
    for key, value in lmdb_cursor:
        datum.ParseFromString(value)

        img = datum_to_array(datum)
        img = rgb2gray(img)
        feature = compute_feature(img, feature_type)
        feature_lst.append(feature)

        # save batch to csv
        if (item_id + 1) % batch_size == 0:
            feature_mat = np.array(feature_lst)
            data_frame = pd.DataFrame(feature_mat)
            feature_lst = []
            if is_first:
                data_frame.to_csv(feature_file)
                is_first = False
            else:
                data_frame.to_csv(feature_file, mode='a', header=False)
            print('saved {} image feature'.format(item_id + 1))

        item_id += 1

    # save extra feature to csv
    if (item_id + 1) % batch_size != 0:
        feature_mat = np.array(feature_lst)
        data_frame = pd.DataFrame(feature_mat)
        if is_first:
            data_frame.to_csv(feature_file)
        else:
            data_frame.to_csv(feature_file, mode='a', header=False)

        print('saved {} image feature'.format(item_id + 1))
        print('saved {}'.format(feature_file))
Exemple #8
0
def read_images_from_lmdb(db_name, visualize, seq_len):
    X = []
    env = lmdb.open(db_name, readonly=True)
    with env.begin() as txn:
        cursor = txn.cursor()
        for idx, (key, value) in enumerate(cursor):
            datum = caffe_pb2.Datum()
            datum.ParseFromString(value)
            img = np.array(datum_to_array(datum))

            seq = []
            for seqidx in range(seq_len):
                im = img[seqidx * 3:seqidx * 3 + 3, :, :]
                im = im.transpose((1, 2, 0))
                im = im[:, :, ::-1]
                seq.append(im)
            X.append(np.array(seq))
    X = np.array(X)
    return X
Exemple #9
0
def lmdb_to_images(db, dest, length):
    '''
  #lmdb_to_img: output image-files from lmdb. file path : <dest>/<class_id>/<key>.jpg 
  #  lmdb: target lmdb path
  #  dest: directory path
  #  length: limit of item count to output
  '''
    print('[start] creating images from lmdb[{}]'.format(db))
    if not os.path.isdir(dest):
        os.mkdir(dest)
        print('  directory \'{}\' created.'.format(dest))

    env = lmdb.open(db, readonly=True)
    with env.begin() as txn:
        cursor = txn.cursor()
        count = 0
        for key, value in cursor:
            datum = caffe_pb2.Datum()
            datum.ParseFromString(value)
            x = datum_to_array(datum)
            y = datum.label
            #print ( '{0}: x.shape:{1} label:{2}'.format(key,x.shape,y))
            if not os.path.isdir(os.path.join(dest, str(y))):
                os.mkdir(os.path.join(dest, str(y)))

            if datum.channels == CH_GRAY:
                img = Image.fromarray(np.uint8(x[0]))  # shape (h,w)
                img.save(os.path.join(dest, str(y), str(key) + '.jpg'))
            elif datum.channels == CH_RGB:
                img = Image.fromarray(np.uint8(x.transpose(
                    (1, 2, 0))))  # shape (ch,h,w)->(h,w,ch)
                img.save(os.path.join(dest, str(y), str(key) + '.jpg'))
            else:  #error
                print('  invalid channel')
                break

            count += 1
            if count >= length:
                break
        print('[finished] total {} images are written in \'{}\''.format(
            count, dest))
    env.close()
def load_data_lmdb(cur,batch_size):
    data = []
    label = []
    if cur.key == '':
        cur.first()
    import caffe.io as io
    import caffe.proto.caffe_pb2 as pb
    datum = pb.Datum()
    for i in range(batch_size):
        value = cur.value()
        datum.ParseFromString(value)
        img_mat = io.datum_to_array(datum)
        shape = (datum.channels,datum.height,datum.width)
        data.append(img_mat.reshape(np.prod(shape),))
        label.append(datum.label)
        if not cur.next():
            cur.first()
    data  = np.asarray(data,dtype = theano.config.floatX)
    label = np.asarray(label,dtype= 'int32')
    return (data,label)
Exemple #11
0
    def loop_records(self, num_records=0, init_key=None):
        env = lmdb.open(self.fn, readonly=True)
        datum = Datum()
        with env.begin() as txn:
            cursor = txn.cursor()
            if init_key is not None:
                if not cursor.set_key(init_key):
                    raise ValueError('key ' + init_key +
                                     ' not found in lmdb ' + self.fn + '.')

            num_read = 0
            for key, value in cursor:
                datum.ParseFromString(value)
                label = datum.label
                data = datum_to_array(datum).squeeze()
                yield (data, label, key)
                num_read += 1
                if num_records != 0 and num_read == num_records:
                    break
        env.close()
Exemple #12
0
def read_lmdb(db):
    '''
  #read_lmdb: 
  #  db: target lmdb path
  '''
    print('[start] read lmdb [{}]'.format(db))
    env = lmdb.open(db, readonly=True)
    with env.begin() as txn:
        cursor = txn.cursor()
        count = 0
        shape = None
        for key, value in cursor:
            datum = caffe_pb2.Datum()
            datum.ParseFromString(value)
            x = datum_to_array(datum)
            y = datum.label
            if count == 0:  # show shape of first item as sample
                shape = x.shape
            count += 1
        print('[finished] total count:{} , shape:{}'.format(count, shape))
    env.close()
def compute_means(path):
    dbenv = lmdb.open(sys.argv[1])
    channel_means_sum = None
    row_count = 0
    with dbenv.begin() as txn:
        cursor = txn.cursor()
        for _, val in cursor:
            datum = Datum()
            datum.ParseFromString(val)
            data = datum_to_array(datum)
            if channel_means_sum is None:
                channel_means_sum = data.reshape(datum.channels,-1).mean(axis=1)
            else:
                channel_means_sum += data.reshape(datum.channels,-1).mean(axis=1)
            row_count += 1

    if row_count == 0:
        print "no rows found"
    else:
        channel_means = channel_means_sum / row_count
        print "channel means are : {}".format(channel_means)
        image_means = channel_means.reshape(2, -1).mean(axis=1)
        print "image means are : {}".format(image_means)
Exemple #14
0
def read_images_from_lmdb(db_name, visualize):
    env = lmdb.open(db_name)
    txn = env.begin()
    cursor = txn.cursor()
    X = []
    y = []
    idxs = []
    for idx, (key, value) in enumerate(cursor):
        datum = caffe_pb2.Datum()
        datum.ParseFromString(value)
        X.append(np.array(datum_to_array(datum)).swapaxes(0, 2))
        y.append(datum.label)
        idxs.append(idx)
    if visualize:
        print("Visualizing a few images...")
        for i in range(9):
            img = X[i]**(1 / 8)
            plt.subplot(3, 3, i + 1)
            plt.imshow(img)
            plt.title(y[i])
            plt.axis('off')
        plt.show()
    print(" ".join(["Reading from", db_name, "done!"]))
    return X, y, idxs
Exemple #15
0
import lmdb
from caffe.io import caffe_pb2, datum_to_array
import cv2 as cv

env = lmdb.open("mnist_train_lmdb")
txn = env.begin()
cur = txn.cursor()
# print type(cur)
for key, value in cur:
    print(type(key), key)
    datum = caffe_pb2.Datum()
    datum.ParseFromString(value)

    label = datum.label
    data = datum_to_array(datum)
    print data.shape
    print datum.channels
    image = data[0]
    # image = data.transpose(1, 2, 0)

    print(type(label))
    cv.imshow(str(label), image)
    cv.waitKey(0)

cv.destroyAllWindows()
env.close()
Exemple #16
0
lmdb_path = root_Path + 'examples/_temp/val_features/'

lmdb_env = lmdb.open(lmdb_path)
numSamples = int(lmdb_env.stat()['entries'])
print('numSamples: ', numSamples)
lmdb_txn = lmdb_env.begin()
lmdb_cursor = lmdb_txn.cursor()
datum = caffe.proto.caffe_pb2.Datum()

txt = root_Path + 'examples/_temp/test_features_fc7_fromLmdb.txt'
fr = open(txt, 'w')
count = 1
for key, value in lmdb_cursor:
    datum.ParseFromString(value)
    label = datum.label
    print 'count, label', (count, label)
    count += 1
    data = datum_to_array(datum)
    print len(data)
    
    for i in range(0, len(data)):
        
        info  = str(i + 1 ) + ': ' + str(data[i]) + ' '
        fr.write(info)
    fr.write('\n')

#fr.close()
print 'done!'


Exemple #17
0
while not outofentries:
    print "batch ",ibatch," of ",nbatches
    keys = []
    nfilled = 0

    # we do multiple crops for each image
    ngroups_this_batch = 0
    for group in range( input_shape[0]/images_per_batch ):
        cursor.next()
        (key,raw_datum) = cursor.item()
        if key=='':
            outofentries = True
            break
        ngroups_this_batch += 1
        datum.ParseFromString(raw_datum)
        vec = datum_to_array(datum)
        keys.append(key)

        for n in range(0,images_per_batch):
            if nfilled>=input_shape[0]:
                break
            xoffset = int(np.random.rand()*(vec.shape[1]-input_shape[2]-1))
            yoffset = int(np.random.rand()*(vec.shape[2]-input_shape[3]-1))
            x1 = xoffset
            x2 = x1 + input_shape[2]
            y1 = yoffset
            y2 = y1 + input_shape[3]
            data[nfilled,:,:,:] = vec[:,x1:x2,y1:y2]-mean_arr[0,:,x1:x2,y1:y2]
            input_labels[nfilled] = datum.label
            nfilled += 1
Exemple #18
0
while not outofentries:
    print "batch ",ibatch," of ",nbatches
    keys = []
    nfilled = 0

    # we do multiple crops for each image
    ngroups_this_batch = 0
    for group in range( input_shape[0]/images_per_batch ):
        cursor.next()
        (key,raw_datum) = cursor.item()
        if key=='':
            outofentries = True
            break
        ngroups_this_batch += 1
        datum.ParseFromString(raw_datum)
        vec = datum_to_array(datum)
        keys.append(key)

        for n in range(0,images_per_batch):
            if nfilled>=input_shape[0]:
                break
            if images_per_batch>1:
                xoffset = int(np.random.rand()*(vec.shape[1]-input_shape[2]-1))
                yoffset = int(np.random.rand()*(vec.shape[2]-input_shape[3]-1))
            else:
                # if only 1 image, center crop
                xoffset = int(0.5*(vec.shape[1]-input_shape[2]-1))
                yoffset = int(0.5*(vec.shape[2]-input_shape[3]-1))
            x1 = xoffset
            x2 = x1 + input_shape[2]
            y1 = yoffset