Пример #1
0
def write_to_lmdb(image_list, db_path, h5_root):

    map_size = 150000000000
    env = lmdb.Environment(db_path, map_size=map_size)
    with env.begin(write=True, buffers=True) as txn:
        for idx, image in enumerate(image_list):
            # 150 GB
            X = get_caffe_data.load_h5_file(os.path.join(h5_root, image[0]))
            X = X[np.newaxis, :, :]
            y = int(image[1])
            datum = array_to_datum(X, y)
            str_id = '{:08}'.format(idx)
            txn.put(str_id.encode('ascii'), datum.SerializeToString())

    env.close()
    print " ".join(["Writing to", db_path, "done!"])
Пример #2
0
# define preprocessing
split = 1
_, test_list = get_caffe_data.get_train_test_lists(label_root, split)

#REMOVE TO WORK:

cur_start = 0
cur_finish = 10
increment = 10
frames = []

net.blobs['data'].reshape(10,1,150,4096)

while cur_finish < len(test_list):
    cur_list = test_list[cur_start:cur_finish]
    test_data = [get_caffe_data.load_h5_file(os.path.join(h5_root, val[0])) for val in cur_list]
    test_data = np.array(test_data, dtype=np.float32)
    test_data = np.ascontiguousarray(test_data[:,np.newaxis,:,:])
    net.blobs['data'].reshape(10,1,150,4096)
    net.blobs['data'].data[:,:,:,:] = test_data
    net.forward()
    argmax_vals = net.blobs['conv1'].data.argmax(2)
    for val in range(10):
        cur_counter = Counter(argmax_vals[val,:,0])
        best_frame = cur_counter.most_common(1)[0][0]
        frames.append(best_frame)

    cur_start += increment
    cur_finish += increment

final_list = test_list[cur_start:]