Пример #1
0
def main():
    checkEvn()
    signal.signal(signal.SIGINT, signal.SIG_DFL)
    app = QtWidgets.QApplication(sys.argv)

    window = Window()
    app.installEventFilter(window)
    window.show()

    sys.exit(app.exec_())
Пример #2
0
class Application:
    def __init__(self):
        self.app = QApplication(sys.argv)
        self.window = Window()

    def show_window(self):
        """Function shows application window."""
        self.window.show()
        sys.exit(self.app.exec_())

    def run(self):
        """Function starts the application."""
        self.show_window()
Пример #3
0
def play_depth(bag_file: Bag, camera: str, depth: str) -> None:
    """
    Play the ZED data in a bag file.

    Args:
        bag_file: the bag file to play
        camera: the topic to use to read compressed or raw camera data
        depth: the topic to use to read 32-bit floating point depth measures

    Returns:
        None

    """
    # open windows to stream the camera and depth image data to
    camera_window = None
    depth_window = None
    # iterate over the messages
    for topic, msg, _ in bag.read_messages(topics=[camera, depth]):
        # if topic is depth, unwrap and send to the depth window
        if topic == depth:
            # if the depth window is not setup yet, open it
            if depth_window is None:
                # create a title for the window
                title = '{} ({})'.format(bag_file.filename, depth)
                # initialize the window
                depth_window = Window(title, msg.height, msg.width)
            # get a depth image from the data and dimensions
            img = get_depth_image(msg.data, depth_window.shape)
            # show the image on the depth window
            depth_window.show(img)
        # if topic is camera, unwrap and send to the camera window
        elif topic == camera:
            # if the camera window is not setup yet, open it
            if camera_window is None:
                # create a title for the window
                title = '{} ({})'.format(bag_file.filename, camera)
                # initialize the window
                camera_window = Window(title, msg.height, msg.width)
            # get an image from the data and dimensions
            img = get_camera_image(msg.data, camera_window.shape)
            # show the image on the camera window
            camera_window.show(img)
    # shut down the viewer windows
    camera_window.close()
    depth_window.close()
Пример #4
0
def play_superpixel(bag_file: Bag, camera_info: str, camera: str, depth: str,
                    segmentation: str, downscale: int) -> None:
    """
    Play the camera data in a bag file through a super pixel algorithm.

    Args:
        bag_file: the bag file to play
        camera_info: the topic to use to read metadata about the camera
        camera: the topic to use to read compressed or raw camera data
        depth: the topic to use to read 32-bit floating point depth measures
        segmentation: the algorithm to use for segmentation
        downscale: the factor to downscale the image by before segmentation

    Returns:
        None

    """
    # extract the camera dimensions from the bag
    dims = get_camera_dimensions(bag, camera_info)
    # open a window to stream the data to
    window = Window('{} ({})'.format(bag_file.filename, camera), *dims)
    # iterate over the messages
    for topic, msg, _ in bag.read_messages(topics=[camera, depth]):
        # if topic is camera, unwrap the camera data
        if topic == camera:
            camera_img = get_camera_image(msg.data, dims)
        # if topic is depth, unwrap and calculate the segmentation
        elif topic == depth:
            depth_img = get_depth_image(msg.data, dims)
            # combine the image with the depth channel (Red only)
            img = np.concatenate([camera_img, depth_img[..., 0:1]], axis=-1)
            # segment the image and get a copy of the segmented pixels
            img = segment(img, method=segmentation, downscale=downscale)
            # send the segmented image to the window
            window.show(img)
    # shut down the viewer windows
    window.close()
Пример #5
0
import subprocess
import sys
import os

from PyQt4.Qt import QApplication

from django.conf import settings
import src.db.settings as my_settings
settings.configure(
    DATABASE_ENGINE=my_settings.DATABASE_ENGINE,
    DATABASE_NAME=my_settings.DATABASE_NAME,
    INSTALLED_APPS=my_settings.INSTALLED_APPS
)

from src.window import Window

if __name__ == '__main__':
    dirname = os.path.dirname(__file__)
    sys.path.insert(0, os.path.join(dirname, 'src', 'db'))
    manange_py = os.path.join(dirname, 'src', 'db', 'manage.py')
    if subprocess.check_call(['python', manange_py, 'syncdb']) != 0:
        print "Could not create the database."
        sys.exit(-1)

    app = QApplication([])
    window = Window()
    window.show()
    sys.exit(app.exec_())

Пример #6
0
def semantic_segment(
    metadata: str,
    input_bag: Bag,
    model: 'keras.models.Model',
    predict: str,
    output_bag: Bag = None,
    output_dir: str = None,
    base: str = None,
    num_samples: int = 200,
    encoding: str = 'rgb',
) -> None:
    """
    Predict a stream of images from an input ROSbag.

    Args:
        metadata: the metadata about the semantic segmentations from the model
        input_bag: the input bag to predict targets from a topic
        model: the semantic segmentation model to use to make predictions
        predict: the topic to get a priori estimates from
        output_bag: the output bag to write the a priori estimates to
        output_dir: the output directory to write image pairs to
        base: the base-name for the prediction image topic
        num_samples: the number of image pairs to sample for output directory
        encoding: the encoding for the images to write

    Returns:
        None

    """
    # create the base endpoint for the topics
    base = '' if base is None else '{}'.format(base)

    # setup the output directories
    if output_dir is not None:
        x_dir = os.path.join(output_dir, 'X', 'data')
        if not os.path.isdir(x_dir):
            os.makedirs(x_dir)
        y_dir = os.path.join(output_dir, 'y', 'data')
        if not os.path.isdir(y_dir):
            os.makedirs(y_dir)

    # read the RGB map and vectorized method from the metadata file
    rgb_map, unmap_rgb = read_rgb_map(metadata)

    # write the color map metadata to the output bag
    if output_bag is not None:
        ros_stamp = rospy.rostime.Time(input_bag.get_start_time())
        msg = String(repr(rgb_map))
        output_bag.write('{}/rgb_map'.format(rgb_map), msg, ros_stamp)

    # open a Window to play the video
    x_window = Window('img', model.input_shape[1], model.input_shape[2])
    y_window = Window('sem-seg', model.output_shape[1], model.output_shape[2])
    # create a progress bar for iterating over the messages in the bag
    total_messages = input_bag.get_message_count(topic_filters=predict)
    with tqdm(total=total_messages, unit='message') as prog:
        # iterate over the messages in this input bag
        for _, msg, time in input_bag.read_messages(topics=predict):
            # update the progress bar with a single iteration
            prog.update(1)
            if np.random.random() > num_samples / total_messages:
                continue
            # create a tensor from the raw pixel data
            pixels = get_camera_image(msg.data,
                                      (msg.height, msg.width))[..., :3]
            # flip the BGR image to RGB
            if encoding == 'bgr':
                pixels = pixels[..., ::-1]
            # resize the pixels to the shape of the model
            _pixels = resize(
                pixels,
                model.input_shape[1:],
                anti_aliasing=False,
                mode='symmetric',
                clip=False,
                preserve_range=True,
            ).astype('uint8')
            # pass the frame through the model
            y_pred = model.predict(_pixels[None, ...])[0]
            y_pred = np.stack(unmap_rgb(y_pred.argmax(axis=-1)), axis=-1)
            y_pred = y_pred.astype('uint8')
            # show the pixels on the windows
            x_window.show(_pixels)
            y_window.show(y_pred)
            # create an Image message and write it to the output ROSbag
            if output_bag is not None:
                msg = image_msg(y_pred, msg.header.stamp, y_pred.shape[:2],
                                'rgb8')
                output_bag.write('{}/image_raw'.format(base), msg,
                                 msg.header.stamp)
            # sample a number and write the image pair to disk
            if output_dir is not None:
                x_file = os.path.join(x_dir, '{}.png'.format(time))
                Image.fromarray(pixels).save(x_file)
                y_file = os.path.join(y_dir, '{}.png'.format(time))
                y_pred = resize(
                    y_pred,
                    pixels.shape[:2],
                    anti_aliasing=False,
                    mode='symmetric',
                    clip=False,
                    preserve_range=True,
                ).astype('uint8')
                Image.fromarray(y_pred).save(y_file)