Example #1
0
 def ocr(self, img, psm=7, digits=False):
     tesseract_raw.set_page_seg_mode(self.handle, psm)
     tesseract_raw.set_is_numeric(self.handle, digits)
     img = PIL.fromarray(img.astype('uint8'))
     tesseract_raw.set_image(self.handle, img)
     text = tesseract_raw.get_utf8_text(self.handle)
     tesseract_raw.cleanup(self.handle)
     return text
Example #2
0
    gt = mat["image_info"][0, 0][0, 0][0]

    # Read image
    img = plt.imread(img_path)

    # Create a zero matrix of image size
    k = np.zeros((img.shape[0], img.shape[1]))

    # Generate hot encoded matrix of sparse matrix
    for i in range(0, len(gt)):
        if int(gt[i][1]) < img.shape[0] and int(gt[i][0]) < img.shape[1]:
            k[int(gt[i][1]), int(gt[i][0])] = 1

    # generate density map
    k = gaussian_filter_density(k)
    image = Image.fromarray(255 * k)
    # image.show()

    # File path to save density map
    file_path = img_path.replace('.jpg', '.h5').replace('images', 'ground')

    with h5py.File(file_path, 'w') as hf:
        hf['density'] = k

# %%
path_test = "G:/pycharm/CSRnet-master/data/part_A_final/train_data/ground/IMG_107.h5"

# file_path = img_paths[-5].replace('.jpg','.h5').replace('images','ground')

file_path = path_test
print(file_path)
Example #3
0
import cyni
import numpy as np
import PIL as Image

cyni.initialize()
device = cyni.getAnyDevice()
device.open()
depthStream = device.createStream("depth", fps=30, width=640, height=480)
#colorStream = device.createStream("color", fps=30, width=1280, height=960)
#colorStream = device.createStream("color", fps=30, width=640, height=480)
#device.setImageRegistrationMode("depth_to_color")
device.setDepthColorSyncEnabled(on=True)
depthStream.start()
# colorStream.start()
# colorFrame = colorStream.readFrame()
# colorFrame = colorStream.readFrame()
# colorFrame = colorStream.readFrame()
# colorFrame = colorStream.readFrame()
depthFrame = depthStream.readFrame()
# registered = cyni.registerColorImage(depthFrame.data, colorFrame.data, depthStream, colorStream)
# Image.fromarray(colorFrame.data).save("color.png")
# Image.fromarray(registered).save("registered.png")
Image.fromarray(cyni.depthMapToImage(depthFrame.data)).save("depth.png")
Example #4
0
    plt.figure()
    plt.scatter(lidarxx, lidaryy, color=colors, s=0.5)
    plt.scatter(edgexx, edgeyy, color='k', s=0.5)
    plt.xlim([0, w])
    plt.ylim([h, 0])

    edgeptsind = lidardepthmapIndrec[edges]
    edgepts3d = lidar_camcoord[edgeptsind, :]
    edgepts3dprojected = (intrinsic @ edgepts3d.T).T
    edgepts3dprojected[:,
                       0] = edgepts3dprojected[:, 0] / edgepts3dprojected[:, 2]
    edgepts3dprojected[:,
                       1] = edgepts3dprojected[:, 1] / edgepts3dprojected[:, 2]
    edgepts3dprojected = edgepts3dprojected[:, 0:3]
    plt.figure()
    plt.imshow(pil.fromarray(rgbarr))
    distances = edgepts3dprojected[:, 2]
    colors = cm.jet(1 / distances * 10)
    plt.gca().scatter(edgepts3dprojected[:, 0],
                      edgepts3dprojected[:, 1],
                      color=colors,
                      s=0.5)
    plt.xlim([0, w])
    plt.ylim([h, 0])

    from utils import *
    edges = (bsmvrec > 0) * (bsmvrec < 0.1)

    testsets = [[1610, 558, 1609, 554], [1610, 558, 1606, 558],
                [407, 618, 408, 613], [332, 587, 331, 583],
                [119, 743, 116, 734], [879, 651, 880, 645],