示例#1
0
    def pipelines():
        from os import listdir
        from os.path import isfile, join, basename, splitext
        onlyfiles = [
            join(PIPELINE_FOLDER, f) for f in listdir(PIPELINE_FOLDER)
            if isfile(join(PIPELINE_FOLDER, f))
        ]
        pipelines = []
        for f in onlyfiles:
            if not f.endswith(".ini"):
                continue
            conf = Configuration(f)

            pipeline_metadata = {
                "basename": splitext(basename(f))[0],
                "name": conf.get("name"),
                "description": conf.get("description"),
                "author": conf.get("author")
            }

            svg_file = f.replace(".ini", ".svg")
            if os.path.isfile(svg_file):
                with open(svg_file, "rb") as image_file:
                    encoded_string = base64.b64encode(
                        image_file.read()).decode('utf-8')
                    pipeline_metadata[
                        "icon"] = "data:image/svg+xml;base64," + encoded_string

            pipelines.append(pipeline_metadata)

        response = make_response(json.dumps(pipelines))
        response.headers['Content-Type'] = 'application/json'
        return response
示例#2
0
    def init(self):
        '''
                @description:
                    intializes:
                        - configurations
                        - db connections
            '''
        Configuration.initialize()
        values = Configuration.values()

        self.dbconn = SQLUtils(
            host=values['mysql-db']['sms_api_config']['host'],
            port=values['mysql-db']['sms_api_config']['port'],
            database=values['mysql-db']['sms_api_config']['db'],
            user=values['mysql-db']['sms_api_config']['user'],
            password=values['mysql-db']['sms_api_config']['password'])

        self.redisconn = gredis.client.Connection(
            address=str(values['redis_servers']['sms_api']['host']),
            port=int(values['redis_servers']['sms_api']['port']))
        self.redisconn.connect()

        print '-------'
        print 'redis:'
        print 'port: %s' % values['redis_servers']['sms_api']['port']
        print 'host: %s' % values['redis_servers']['sms_api']['host']
        print '\n'
        print 'sql:'
        print 'port: %s' % values['mysql-db']['sms_api_config']['host']
        print 'host: %s' % values['mysql-db']['sms_api_config']['port']
        print 'db: %s' % values['mysql-db']['sms_api_config']['db']
        print 'user: %s' % values['mysql-db']['sms_api_config']['user']
        print 'password: %s' % values['mysql-db']['sms_api_config']['password']
        print '-------'
    def __init__(self, config={}, port_num=19997):
        self.VrepEnvBase_config = Configuration(default_config)
        self.VrepEnvBase_config.update(config)

        print ('Program started')
        vrep.simxFinish(-1)  # just in case, close all opened connections
        self.clientID = vrep.simxStart('127.0.0.1', port_num, True,
                                       True, 5000, 5)  # Connect to V-REP
        if self.clientID != -1:
            print ('Connected to remote API server')
            vrep.simxStartSimulation(
                self.clientID, vrep.simx_opmode_oneshot_wait)
            vrep.simxSynchronous(self.clientID, True)
            # Now try to retrieve data in a blocking fashion (i.e. a service
            # call):
            res, objs = vrep.simxGetObjects(
                self.clientID, vrep.sim_handle_all, vrep.simx_opmode_blocking)
            if res == vrep.simx_return_ok:
                print ('Number of objects in the scene: ', len(objs))
            else:
                print ('Remote API function call returned with error code: ', res)
            print("connected through port number: {}".format(port_num))
    
            # used to connect multiple clients in synchronous mode http://www.coppeliarobotics.com/helpFiles/en/remoteApiModusOperandi.htm
            return_code, iteration = vrep.simxGetIntegerSignal(self.clientID, "iteration", vrep.simx_opmode_streaming)
            time.sleep(2)
示例#4
0
    def __init__(self, config={}, helper=None, py_logger=None, tb_logger=None):
        #### common setup ####
        self.config = Configuration({'NuScenesAgent_config': {}})
        self.config.update(config)

        super().__init__(config=self.config['NuScenesAgent_config'],
                         helper=helper,
                         py_logger=py_logger,
                         tb_logger=tb_logger)
        self.name = 'SceneGraphics'
        #######
        self.map_layers = [
            'road_divider',
            'lane_divider',
            'drivable_area',
            #'road_segment',
            #'road_block',
            'lane',
            #'ped_crossing',
            'walkway',
            #'stop_line',
            #'carpark_area',
            #'traffic_light'
        ]

        self.plot_list = [
            'ego', 'other_cars', 'pedestrian', 'cam', 'map_info',
            'labeled_map', 'sensing_patch', 'sensor_info'
        ]
 def init(self):
     '''
         @description:
             intializes:
                 - configurations
                 - db connections
     '''
     Configuration.initialize()
     values = Configuration.values()
     self.dbconn = SQLUtils(
         host=values['mysql-db']['sms_api_config']['host'],
         port=values['mysql-db']['sms_api_config']['port'],
         database=values['mysql-db']['sms_api_config']['db'],
         user=values['mysql-db']['sms_api_config']['user'],
         password=values['mysql-db']['sms_api_config']['password'])
示例#6
0
文件: main.py 项目: gpgomes/pyTorrent
 def __init__(self):
     self.configuration = Configuration()
     self.torrentAPIHandler = TorrentAPIHandler()
     self.file_handler = FileHandler()
     self.audit = Audit(type(self).__name__)
     self.torrent_list = []
     self.utorrent_client = None
示例#7
0
 def __init__(self, config: Configuration, owner: str, repo: str):
     super().__init__(config)
     self.releases = self.git_release.format(owner, repo)
     self.header = {
         'Authorization': 'Basic ' + config.gh_auth(),
         "User-Agent": "PostmanRuntime/7.23.0"
     }
示例#8
0
 def __init__(self):
     """
     Tworzy instancję serwera.
     """
     self.__configuration = Configuration()
     TCPServer.__init__(
         self, (self.__configuration.host, self.__configuration.port),
         ClientServerHandler)
 def __init__(self, plist_path):
     """
     :param plist_path: plist 文件路径
     """
     super().__init__(plist_path)
     self.app_config = Configuration(self.content,
                                     conf=EnvEnum.CONFIGURATION.value)
     self.export_plist_path = f'{EnvEnum.SCRIPT_PATH.value}/plist/{self.app_config.method}.plist'
     self.icon_url = f'{EnvEnum.SCRIPT_URL.value}{self.app_config.icon_path}'
示例#10
0
    def __init__(self, workDirectory=""):
        """
        Pobiera konfigurację serwera oraz zapisuje katalog roboczy. Tworzy też pustą listę plikó do usunięcia.

        Args:
            workDirectory (Optional(str)): Katalog roboczy (id użytkownika, nazwa grupy)
        """
        self.__configuration = Configuration()
        self.__dir = "".join(["/", workDirectory])
        self.__usedFiles = []
示例#11
0
    def __init__(self, *args, **kwargs):
        super(PipelinesModel, self).__init__(*args, **kwargs)
        pipeline_folder = "resources/pipelines"
        self.pipelines = []
        onlyfiles = [
            join(pipeline_folder, f) for f in listdir(pipeline_folder)
            if isfile(join(pipeline_folder, f))
        ]
        for f in onlyfiles:
            if not f.endswith(".ini"):
                continue
            pipeline_conf = Configuration(f)

            pipeline_metadata = {
                "filename": f,
                "basename": splitext(basename(f))[0],
                "name": pipeline_conf.get("name"),
                "description": pipeline_conf.get("description"),
                "author": pipeline_conf.get("author")
            }
            self.pipelines.append(pipeline_metadata)
示例#12
0
 def __init__(self, subtitle):
     self.configuration = Configuration()
     self.file_handler = FileHandler()
     self.title = subtitle[0:len(subtitle)-4]
     self.subtitle = subtitle
     self.subtitle_path = self.set_subtitle_path()
     self.magnet_link = ''
     self.directory = ''
     self.video_file = ''
     self.video_path = ''
     self.ready = False
     self.done = False
    def __init__(self, environment):
        # Logging configuration
        logging.basicConfig()
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.INFO)

        # Application configuration
        self.config = Configuration(self.logger, environment)

        # Tools configuration
        self.kugawana_tool = KugawanaInventoryTool(self.config.slack_bot_token)
        self.sc = SlackClient(self.config.slack_app_token)
示例#14
0
    def __init__(self, global_conf, pipeline_file ):
        self.pipeline_conf = Configuration(pipeline_file)
        self.filters = []
        self.pipeline = self.pipeline_conf.sections()
        self.source = ImageSource()
        self.source.configure(global_conf, "source", self.pipeline_conf)
        # the input format for the first filter element
        input_format = "image"

        for pipeline_section in self.pipeline:
            print(pipeline_section)
            # ignore the common section
            if pipeline_section == "common":
                continue
            # ignore the source section
            if pipeline_section == "source":
                continue

            instance = clazz.instance_by_name(pipeline_section)
            instance.configure(global_conf, pipeline_section, self.pipeline_conf)
            # try to load an image/icon for the give filter
            python_file = inspect.getfile(instance.__class__)
            svg_file = python_file.replace(".py", ".svg")
            if os.path.isfile(svg_file):
                with open(svg_file, "rb") as image_file:
                    encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
                    instance.icon = "data:image/svg+xml;base64,"+encoded_string

            # check that the output format if the predecessor filter matches with the input if this
            # filter
            meta = instance.meta()
            if not meta["input"] == input_format:
                print("Filter '{}' is unable to process input format '{}'. Expected was '{}'".format(python_file, input_format, meta["input"] ))
                print("Wrong pipeline definition. Exit")
                exit_process()

            # the output if this filter is the input of the next filter
            input_format = meta["output"]

            self.filters.append(instance)
示例#15
0
 def __init__(self, config: Configuration):
     super().__init__(config)
     credentials = ServicePrincipalCredentials(config.az_client(),
                                               config.secret(),
                                               tenant=config.az_tenant())
     self.client = ContainerServiceClient(credentials,
                                          config.az_subscription())
     self.rg = config.az_resource_group()
     self.aks = config.aks()
示例#16
0
    def __init__(self, pipeline_file):
        QObject.__init__(self)

        self.pipeline_conf = Configuration(pipeline_file)
        self.filters = []
        self.pipeline = self.pipeline_conf.sections()

        icon_file = pipeline_file.replace(".ini", ".png")
        if os.path.isfile(icon_file):
            self.icon_path = icon_file

        # the input format for the first filter element
        input_format = None

        for pipeline_section in self.pipeline:
            # ignore the common section
            if pipeline_section == "common":
                continue

            instance = clazz.instance_by_name(pipeline_section, pipeline_section, self.pipeline_conf)
            instance.index = len(self.filters)

            # check that the output format if the predecessor filter matches with the input if this
            # filter
            meta = instance.meta()
            if not input_format==None and not meta["input"] == input_format:
                print("Filter '{}' is unable to process input format '{}'. Expected was '{}'".format(python_file, input_format, meta["input"]))
                print("Wrong pipeline definition. Exit")
                exit_process()

            instance.param_changed.connect(self.process)

            # the output if this filter is the input of the next filter
            input_format = meta["output"]

            self.filters.append(instance)
示例#17
0
    def __init__(self, environment, token, user_id, callback_id, form,
                 response_url):
        # Logging configuration
        logging.basicConfig()
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.INFO)

        # Application configuration
        self.config = Configuration(self.logger, environment)

        # Tools configuration
        self.sc = SlackClient(self.config.slack_app_token)

        # Interactive messages informations
        self.token = token
        self.user_id = user_id
        self.callback_id = callback_id
        self.form = form
        self.response_url = response_url
def getUDIDforEmails():

    slack_token = Configuration().slack_token
    print('--- INFO: Connect to Slack')
    slack = Slacker(slack_token)

    udids_and_emails = {}
    print('--- INFO: Call Slack API to list Users')
    response = slack.users.list()
    users = response.body['members']

    for user in users:
        if not user['deleted']:
            try:
                email = user['profile']['email']
            except KeyError:
                email = "*****@*****.**"
            udids_and_emails[email] = user['id']

    return udids_and_emails
示例#19
0
    def __init__(self, environment, token, user_id, command, parameter,
                 response_url, trigger_id):
        # Logging configuration
        logging.basicConfig()
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.INFO)

        # Application configuration
        self.config = Configuration(self.logger, environment)

        # Tools configuration
        self.sc = SlackClient(self.config.slack_app_token)

        # Slash command informations
        self.token = token
        self.user_id = user_id
        self.command = command
        self.parameter = parameter
        self.response_url = response_url
        self.trigger_id = trigger_id
示例#20
0
    def __init__(self):
        """
        Tworzy całe CA. Wczytuje konfiguracje CA z pliku. Ładuje lub (w przypadku gdy nie istnieją)
        tworzy certyfikat i klucz prywatny.

        Args:
            certificatesDir (str): Katalog, w którym będą trzymane certyfikaty użytkowników.
            keysDir (str): Katalog, w któym będą trzymane klucze publiczne użytkowników.

        Raises:
            IOError: Jeśli nie można utworzyć klucza, albo odczytać go z pliku.
        """
        self.__configuration = Configuration()
        if not self.__checkFiles():
            self.__certificate, self.__keys = self.__newCaCertificate()
        else:
            self.__certificate = self.__loadCertificateFromFile(
                self.__configuration.certificateFile)
            self.__keys = self.__loadPrivateKeyFromFile(
                self.__configuration.keysFile)
        if not self.__keys:
            raise IOError
    def launch(self, event):
        # Manage 'challenge' from Slack to validate the lambda.
        if "challenge" in event:
            return event["challenge"]

        slack_event = event['event']

        # Ignore message from bot.
        if not "bot_id" in slack_event \
           and slack_event['type'] == 'user_change' \
           and 'XfELFP2WL9' in slack_event['user']['profile']['fields']:

            # Application configuration
            config = Configuration(self.logger, self.environment)

            # Check input token
            if not event['token'] in config.slack_event_token:
                return "403 Forbidden"

            self.logger.info(slack_event['user']['real_name'] + " gets " +
                             slack_event['user']['profile']['fields']
                             ['XfELFP2WL9']['value'] + " certification!")

            user_udid = slack_event['user']['id']
            user_level_name = re.search(
                ' \((.+?) level\)', slack_event['user']['profile']['fields']
                ['XfELFP2WL9']['value'].lower()).group(1)

            user = User.get(user_udid)
            level = Level.getByName(user_level_name)

            if user and level:
                for user_certification in user.user_certifications:
                    user_certification.passesCertification(level)

        return "200 OK"
示例#22
0
class VideoPipeline:
    def __init__(self, global_conf, pipeline_file ):
        self.pipeline_conf = Configuration(pipeline_file)
        self.filters = []
        self.pipeline = self.pipeline_conf.sections()
        self.source = ImageSource()
        self.source.configure(global_conf, "source", self.pipeline_conf)
        # the input format for the first filter element
        input_format = "image"

        for pipeline_section in self.pipeline:
            print(pipeline_section)
            # ignore the common section
            if pipeline_section == "common":
                continue
            # ignore the source section
            if pipeline_section == "source":
                continue

            instance = clazz.instance_by_name(pipeline_section)
            instance.configure(global_conf, pipeline_section, self.pipeline_conf)
            # try to load an image/icon for the give filter
            python_file = inspect.getfile(instance.__class__)
            svg_file = python_file.replace(".py", ".svg")
            if os.path.isfile(svg_file):
                with open(svg_file, "rb") as image_file:
                    encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
                    instance.icon = "data:image/svg+xml;base64,"+encoded_string

            # check that the output format if the predecessor filter matches with the input if this
            # filter
            meta = instance.meta()
            if not meta["input"] == input_format:
                print("Filter '{}' is unable to process input format '{}'. Expected was '{}'".format(python_file, input_format, meta["input"] ))
                print("Wrong pipeline definition. Exit")
                exit_process()

            # the output if this filter is the input of the next filter
            input_format = meta["output"]

            self.filters.append(instance)


    def meta(self):
        meta_info = []
        for instance in self.filters:
            menu = self.pipeline_conf.get_boolean("menu", instance.conf_section)
            meta  =instance.meta()
            meta["menu"]= menu
            meta_info.append(meta)
        return {
            "name":self.pipeline_conf.get("name"),
            "description":self.pipeline_conf.get("description"),
            "author":self.pipeline_conf.get("author"),
            "filters":meta_info
        }


    def filter_count(self):
        return len(self.filters)


    def override_source_image(self, value):
        self.source.set_image(value)


    def get_source_image(self):
        return self.source.get_image()


    def set_parameter(self, index, name, value):
        self.filters[index].set_parameter(name, value)


    @perf_tracker()
    def gcode(self, contour_3d):
        return self.filters[len(self.filters)-1].gcode(contour_3d)

    def process(self):
        result = []
        image = self.get_source_image()
        cnt = []

        for instance in self.filters:
            start = time.process_time()
            try:
                print("Running filter: ", type(instance))
                image, cnt = instance.process(image, cnt)
                end = time.process_time()
                print(instance.meta()["name"], end - start)
                print("Contour Count:", len(cnt))
                cnt = ensure_3D_contour(cnt)
                if image is None:
                    print("unable to read image from filter: "+instance.meta()["name"])
                    break
                if len(image.shape) != 3:
                    print("Image must have 3 color channels. Filter '{}' must return RGB image for further processing".format(instance.conf_section))
                result.append({"filter": instance.conf_section, "image":image, "contour": cnt })
                print("------------------------")
            except Exception as exc:
                exc_type, exc_obj, exc_tb = sys.exc_info()
                fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                print(exc_type, fname, exc_tb.tb_lineno)
                print(type(instance), exc)

        return result

    def stop(self):
        for instance in self.filters:
            instance.stop()
示例#23
0
import cv2

from utils.configuration import Configuration
import os
import numpy as np
from utils.image import image_resize

from processing.image.black_white import Filter as Filter1
from processing.image.skeletonize import Filter as Filter2

TEST_IMAGE = "./test-images/Jeannette_Logo.png"

configuration_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "config", "configuration.ini"))
conf = Configuration(configuration_dir)

filter1 = Filter1()
filter1.threshold = 128

filter2 = Filter2()

img = cv2.imread(TEST_IMAGE, cv2.IMREAD_COLOR)
img = image_resize(img, height=600)
img1, cnt = filter1.process(img, None)
img2, cnt = filter2.process(img1, None)


img = 255 - cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img1 = 255 - cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2 = 255 - cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

# calculate the watershed distance
示例#24
0
def main(argv):

    ## Create and load client configuration
    cfg_ = Configuration()
    cfg_.load(CONFIG_FILE)
    print cfg_

    ## Create UnrealCV client and connect
    ucv_client_ = ucv.connect_client(cfg_.m_unrealengine_host,
                                     cfg_.m_unrealengine_port)

    ## Load sequence configuration file and properties and save description
    seq_ = Sequence()
    seq_.load(cfg_.m_sequence_filename)
    print seq_
    seq_.save("../data/" + seq_.m_name + "/sequence.json")

    ## Generate camera description
    cam_ = Camera()
    cam_.m_fx = cfg_.m_camera_fx
    cam_.m_fy = cfg_.m_camera_fy
    cam_.m_cx = cfg_.m_camera_cx
    cam_.m_cy = cfg_.m_camera_cy
    cam_.m_fov = cfg_.m_camera_fov
    cam_.m_depthmin = cfg_.m_camera_depthmin
    cam_.m_depthmax = cfg_.m_camera_depthmax
    print cam_
    cam_.save("../data/" + seq_.m_name + "/camera.json")

    ## Generate objects description
    object_list_ = ucv_client_.request("vget /objects").split(' ')

    objects_ = {}
    objects_["total_number"] = len(object_list_)
    objects_["objects"] = {}

    print("There are " + str(objects_["total_number"]) +
          " objects in this scene...")

    ### Load object instance to object class mapping
    instance_class_ = {}
    with open("../config/instance_class.json") as f:
        instance_class_ = json.load(f)

    ### Load class information
    classes_ = {}
    with open("../config/classes.json") as f:
        classes_json_ = json.load(f)

        for i_classid in classes_json_.keys():
            obj_class_ = ObjectClass()
            obj_class_.parse_json(classes_json_[i_classid])
            classes_[i_classid] = obj_class_

    ### Describe each object
    for i_objname in object_list_:

        object_ = SceneObject()

        object_.m_instance_name = i_objname
        print("Getting color for {0}".format(i_objname))
        object_color_ = Color(
            ucv_client_.request("vget /object/{0}/color".format(i_objname)))
        print("Object {0} has color {1}".format(i_objname, object_color_))
        object_.m_instance_color = object_color_

        if i_objname in instance_class_:
            object_.m_class = classes_[instance_class_[i_objname]]
        else:
            object_.m_class = classes_["none"]

        objects_["objects"][i_objname] = object_.to_json()

    with open("../data/" + seq_.m_name + "/objects.json", 'w') as f:
        json.dump(objects_, f, indent=2)

    ## Get frames
    for i in range(seq_.m_total_frames):

        print("Getting frame {0} out of {1}...".format(i, seq_.m_total_frames))

        if i < FRAME_START:
            print("Skipping frame " + str(i))
            continue

        frame_ = seq_.m_frames[i]

        frame_id_ = frame_["id"]
        frame_timestamp_ = frame_["timestamp"]
        frame_camera_ = frame_["camera"]
        frame_objects_ = frame_["objects"]

        print frame_timestamp_
        res_camera_ = ucv.place_camera(ucv_client_, frame_camera_)
        while (res_camera_ is None):
            print("ERROR: Trying to place camera again...")
            ucv_client_.disconnect()
            ucv_client_ = ucv.connect_client(cfg_.m_unrealengine_host,
                                             cfg_.m_unrealengine_port)
            res_camera_ = ucv.place_camera(ucv_client_, frame_camera_)

        ucv.place_objects(ucv_client_, frame_objects_)

        frame_rgb_ = cli_rgb.get_rgb(ucv_client_)
        while (frame_rgb_ is None):
            print("ERROR: Trying to get RGB frame again...")
            ucv_client_.disconnect()
            ucv_client_ = ucv.connect_client(cfg_.m_unrealengine_host,
                                             cfg_.m_unrealengine_port)
            frame_rgb_ = cli_rgb.get_rgb(ucv_client_)

        frame_rgb_im_ = Image.fromarray(frame_rgb_)
        frame_rgb_im_.save("../data/" + seq_.m_name + "/rgb/" + frame_id_ +
                           ".png")

        frame_mask_ = cli_sgm.get_object_mask(ucv_client_)
        while (frame_mask_ is None):
            print("ERROR: Trying to get mask frame again...")
            ucv_client_.disconnect()
            ucv_client_ = ucv.connect_client(cfg_.m_unrealengine_host,
                                             cfg_.m_unrealengine_port)
            frame_mask_ = cli_sgm.get_object_mask(ucv_client_)

        frame_mask_im_ = Image.fromarray(frame_mask_)
        frame_mask_im_.save("../data/" + seq_.m_name + "/mask/" + frame_id_ +
                            ".png")

        frame_depth_ = cli_dpt.get_depth(ucv_client_)
        while (frame_depth_ is None):
            print("ERROR: Trying to get depth frame again...")
            ucv_client_.disconnect()
            ucv_client_ = ucv.connect_client(cfg_.m_unrealengine_host,
                                             cfg_.m_unrealengine_port)
            frame_depth_ = cli_dpt.get_depth(ucv_client_)

        frame_depth_im_ = Image.fromarray(frame_depth_)
        cli_utils.save_16bit_png(
            frame_depth_,
            "../data/" + seq_.m_name + "/depth/" + frame_id_ + ".png",
            cam_.m_depthmin, cam_.m_depthmax)
示例#25
0
import os
import base64
import time

from flask import Flask, render_template, make_response, send_file, request

from utils.webgui import FlaskUI  # get the FlaskUI class

from pipeline import VideoPipeline
from grbl import GrblWriter
from utils.configuration import Configuration

configuration_dir = os.path.abspath(
    os.path.join(os.path.dirname(__file__), "..", "config",
                 "configuration.ini"))
conf = Configuration(configuration_dir)

POOL_TIME = conf.get_int("image-read-ms") / 1000  # convert to Seconds
PIPELINE_FOLDER = os.path.abspath(
    os.path.join(os.path.dirname(__file__), conf.get("pipelines")))
SERIAL_PORT = conf.get("serial-port")
SERIAL_BAUD = conf.get_int("serial-baud")

grbl = GrblWriter(SERIAL_PORT, SERIAL_BAUD)

log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)

app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
ui = FlaskUI(app=app, port=8080)
import time

from config import config_dict
from datasets.generateData import generate_dataset
from net.generateNet import generate_net
import torch.optim as optim
from net.sync_batchnorm.replicate import patch_replication_callback
from torch.utils.data import DataLoader
from utils.configuration import Configuration
from utils.finalprocess import writelog
from utils.imutils import img_denorm
from utils.DenseCRF import dense_crf
from utils.test_utils import single_gpu_test
from utils.imutils import onehot

cfg = Configuration(config_dict, False)

def ClassLogSoftMax(f, category):
	exp = torch.exp(f)
	exp_norm = exp/torch.sum(exp*category, dim=1, keepdim=True)
	softmax = exp_norm*category
	logsoftmax = torch.log(exp_norm)*category
	return softmax, logsoftmax

def test_net():
	period = 'val'
	dataset = generate_dataset(cfg, period=period, transform='none')
	def worker_init_fn(worker_id):
		np.random.seed(1 + worker_id)
	dataloader = DataLoader(dataset, 
				batch_size=1, 
示例#27
0
from utils.save_video_ffmpeg import VideoSaver

save_directory = "play_results"
image_extension = "png"
zoom_factor = 10
framerate = 5

if __name__ == "__main__":
    # Loads configuration file
    parser = argparse.ArgumentParser()
    parser.add_argument("--config", type=str, required=True)
    arguments = parser.parse_args()

    config_path = arguments.config

    configuration = Configuration(config_path)
    configuration.check_config()
    configuration.create_directory_structure()

    config = configuration.get_config()

    logger = Logger(config)
    search_name = config["model"]["architecture"]
    model = getattr(importlib.import_module(search_name), 'model')(config)
    model.cuda()

    datasets = {}

    dataset_splits = DatasetSplitter.generate_splits(config)
    transformations = TransformsGenerator.get_final_transforms(config)
示例#28
0
### How to start to work with A.R.M.O.R
    1. helm repository is hosted here: https://yaroslavnikolaev.github.io/A.R.M.O.R./
    2. Deploy to your central cluster or to 
'''

COLLECTORS = '''
### ARMOR supports following collectors:
<table style="width:100%">  <tr>    <th>Application</th>    <th>Armor annotation key</th>    <th>Description</th>  </tr>
'''

STORAGES = '''\n### ARMOR supports following storages: 
- Prometheus \n'''

if __name__ == '__main__':
    '''Automatically generate Readme.md'''
    configuration = Configuration()
    factory = CollectorFactory(configuration)
    with open("./docs/README.md", "w") as readme:
        readme.write(INTRO)
        readme.write(STRUCTURE)
        readme.write(HOWTO)
        readme.write(COLLECTORS)
        # todo add description to collectors and storages use __doc__
        description = ""
        for key in sorted(factory.collectors.keys()):
            application = key.split(".")[-1]
            readme.write(
                f'''<tr>    <th>{application}</th>    <th>armor.io/{key}</th>    <th>{description}</th>  </tr>\n'''
            )
        readme.write("""</table> \n""")
        readme.write(STORAGES)
示例#29
0
class SceneGraphics(NuScenesAgent):
    def __init__(self, config={}, helper=None, py_logger=None, tb_logger=None):
        #### common setup ####
        self.config = Configuration({'NuScenesAgent_config': {}})
        self.config.update(config)

        super().__init__(config=self.config['NuScenesAgent_config'],
                         helper=helper,
                         py_logger=py_logger,
                         tb_logger=tb_logger)
        self.name = 'SceneGraphics'
        #######
        self.map_layers = [
            'road_divider',
            'lane_divider',
            'drivable_area',
            #'road_segment',
            #'road_block',
            'lane',
            #'ped_crossing',
            'walkway',
            #'stop_line',
            #'carpark_area',
            #'traffic_light'
        ]

        self.plot_list = [
            'ego', 'other_cars', 'pedestrian', 'cam', 'map_info',
            'labeled_map', 'sensing_patch', 'sensor_info'
        ]

    def update_all_info(self):
        pass

    def make_video_from_images(self,
                               image_dir: str = None,
                               video_save_dir: str = None,
                               video_layout=None):
        if video_layout is None:
            video_layout = {
                'figsize': (15, 15),
                'nb_rows': 6,
                'nb_cols': 6,
                'components': {
                    'birdseye': [[0, 4], [0, 6]],
                    'camera': [[4, 6], [0, 6]]
                }
            }

        img_fn_list = [
            str(p).split('/')[-1] for p in Path(image_dir).rglob('*.png')
        ]

        component_img_list = {}
        for k, v in video_layout['components'].items():
            img_list = [
                p for p in img_fn_list if k in p and 'checkpoint' not in p
            ]
            idx = np.argsort(np.array([int(p[:2]) for p in img_list]))
            img_list = np.array(img_list)[idx]
            nb_images = len(img_list)
            component_img_list[k] = img_list

        fig = plt.figure(figsize=video_layout['figsize'],
                         constrained_layout=False)
        gs = fig.add_gridspec(nrows=video_layout['nb_rows'],
                              ncols=video_layout['nb_cols'],
                              wspace=0.01)
        axes = {}
        for k, v in video_layout['components'].items():
            ax = fig.add_subplot(gs[v[0][0]:v[0][1], v[1][0]:v[1][1]])
            ax.axis('off')
            axes[k] = ax

        camera = Camera(fig)

        for i in tqdm.tqdm(range(nb_images)):
            for k, v in component_img_list.items():
                axes[k].imshow(plt.imread(os.path.join(image_dir, v[i])))
            camera.snap()

        animation = camera.animate()

        if video_save_dir is not None:
            animation.save(video_save_dir + '/video.mp4')
        return animation

    def plot_ego_scene(
        self,
        ego_centric=True,
        sample_token: str = None,
        instance_token: str = None,
        scene_token: str = None,
        idx: str = "",
        save_pkl_dir: str = None,
        save_img_dir: str = None,
        sensor_info=None,
        text_box=False,
        plot_list=None,
        ego_traj=None,
        ado_traj=None,
        contour=None,
        read_from_cached=False,
        paper_ready=False,
        other_images_to_be_saved=None,
        render_additional=None,
        plot_human_ego=True,
        patch_margin=30,
    ):
        '''
        ego_traj = {
           <name>: {
            'traj': <traj>,
            'color': <color>
          }
        }

        ado_traj = {
          <instance_token>: {
             "traj_dist": [[mean, cov], [], ...],
             "frame": <"local" or "global">,
             "pos": <np.ndarray or list>, # global coordinate of origin
             "quat": <np.ndarray or list> # global rotation of origin
          }
        }
        '''

        if sample_token is not None and scene_token is not None:
            raise ValueError(
                "only one of sample_token or scene_token should be provided")

        if scene_token is not None:
            sample_token = self.nusc.get('scene',
                                         scene_token)['first_sample_token']

        sample = self.nusc.get('sample', sample_token)
        if plot_list is None:
            plot_list = self.plot_list
        if sensor_info is not None:
            plot_list += ['sensing_patch']
            self.plot_list += ['sensing_patch']

        # this decides whether plotting in ego_centric or sim_ego_centric
        sim_ego_pose = None
        if ego_traj is not None:
            if 'sim_ego' in ego_traj.keys():
                sim_ego = ego_traj['sim_ego']
                if sim_ego is not None:
                    sim_ego_pose = {'translation': sim_ego['pos']}

        fig, ax, other = self.plot_agent_scene(
            ego_centric=ego_centric,
            sample_token=sample_token,
            instance_token=instance_token,
            sensor_info=sensor_info,
            text_box=text_box,
            plot_list=plot_list,
            ego_traj=ego_traj,
            read_from_cached=read_from_cached,
            paper_ready=paper_ready,
            render_additional=render_additional,
            plot_human_ego=plot_human_ego,
            patch_margin=patch_margin,
            sim_ego_pose=sim_ego_pose)

        #### plot sim ego ####
        if sim_ego_pose is not None:
            self.plot_elements(sim_ego['pos'],
                               sim_ego['yaw'],
                               'sim_ego',
                               ax,
                               animated_agent=paper_ready)

        #### plot ado traj ####
        if ado_traj is not None:
            self.plot_trajectory_distributions(ax, ado_traj)

        #### plot contour ####
        if contour is not None:
            self.plot_contour(ax, contour)

        #### save stuff ####
        if save_pkl_dir is not None:
            p = os.path.join(save_pkl_dir, idx + "_" + sample_token + ".pkl")
            with open(p, 'wb') as f:
                cloudpickle.dump(ax, f)

        if save_img_dir is not None:
            p = os.path.join(save_img_dir,
                             idx + "_" + sample_token + "_birdseye.png")
            #fig.savefig(p, dpi=300, quality=95)
            fig.savefig(p)
            if 'cam' in plot_list:
                p = os.path.join(save_img_dir,
                                 idx + "_" + sample_token + "_camera.png")
                #other['sfig'].savefig(p, dpi=300, quality=95)
                other['sfig'].savefig(p)

            if other_images_to_be_saved is not None:
                for k, v in other_images_to_be_saved.items():
                    p = os.path.join(
                        save_img_dir,
                        idx + "_" + sample_token + "_" + k + ".png")
                    if isinstance(v, np.ndarray):
                        plt.imsave(p, v)
                    elif isinstance(v, matplotlib.figure.Figure):
                        plt.savefig(p)

        return fig, ax, other

    def plot_agent_scene(
            self,
            ego_centric: bool = False,
            ego_traj=None,
            instance_token: str = None,
            sample_token: str = None,
            map_layers=None,
            sensor_info=None,
            sensing_patch=None,
            predictions=None,
            agent_state_dict=None,
            plot_list=None,
            legend=False,
            text_box=False,
            show_axis=True,
            render_ego_pose_range=False,
            paper_ready=False,
            read_from_cached=False,
            plot_agent_trajs=True,
            animated_agent=False,
            bfig=None,  # for birdseye image
            bax=None,
            sfig=None,  # for camera
            sax=None,
            render_additional=None,
            plot_human_ego=True,
            patch_margin=30,
            sim_ego_pose=None,
            save_image_ax=False):

        if paper_ready:
            legend = False
            text_box = False
            show_axis = False
            render_ego_pose_range = False
            plot_agent_trajs = False
            animated_agent = True
        #show_axis = True
        if map_layers is None:
            map_layers = self.map_layers

        if plot_list is None:
            plot_list = self.plot_list

        sample = self.nusc.get('sample', sample_token)
        scene = self.nusc.get('scene', sample['scene_token'])
        scene_log = self.nusc.get('log', scene['log_token'])
        nusc_map = NuScenesMap(dataroot=self.dataroot,
                               map_name=scene_log['location'])

        patch_margin = patch_margin
        min_diff_patch = 30

        if not ego_centric:
            agent_future = self.helper.get_future_for_agent(
                instance_token,
                sample_token,
                self.na_config['pred_horizon'],
                in_agent_frame=False,
                just_xy=True)

            agent_past = self.helper.get_past_for_agent(
                instance_token,
                sample_token,
                self.na_config['obs_horizon'],
                in_agent_frame=False,
                just_xy=True)

            #### set plot patch ####
            if agent_future.shape[0] > 0:
                p = agent_future[0]
            else:
                p = agent_past[-1]

            my_patch = (
                p[0] - patch_margin,
                p[1] - patch_margin,
                p[0] + patch_margin,
                p[1] + patch_margin,
            )

            # min_patch = np.floor(agent_future.min(axis=0) - patch_margin)
            # max_patch = np.ceil(agent_future.max(axis=0) + patch_margin)
            # diff_patch = max_patch - min_patch

            # if any(diff_patch < min_diff_patch):
            #     center_patch = (min_patch + max_patch) / 2
            #     diff_patch = np.maximum(diff_patch, min_diff_patch)
            #     min_patch = center_patch - diff_patch / 2
            #     max_patch = center_patch + diff_patch / 2
            # my_patch = (min_patch[0], min_patch[1], max_patch[0], max_patch[1])

        else:
            sample_data = self.nusc.get('sample_data',
                                        sample['data']['CAM_FRONT'])
            ego_pose = self.nusc.get('ego_pose', sample_data['ego_pose_token'])
            my_patch = (
                ego_pose['translation'][0] - patch_margin,
                ego_pose['translation'][1] - patch_margin,
                ego_pose['translation'][0] + patch_margin,
                ego_pose['translation'][1] + patch_margin,
            )

            if sim_ego_pose is not None:
                my_patch = (
                    sim_ego_pose['translation'][0] - patch_margin,
                    sim_ego_pose['translation'][1] - patch_margin,
                    sim_ego_pose['translation'][0] + patch_margin,
                    sim_ego_pose['translation'][1] + patch_margin,
                )

        #### read from saved path if present ####
        read_img = False
        if read_from_cached:
            scene_path = os.path.join(
                scene_img_dir,
                scene['name'] + "-token-" + sample['scene_token'])
            p_scene = Path(scene_img_dir)
            saved_scene_list = [
                str(f) for f in p_scene.iterdir() if f.is_dir()
            ]

            if scene_path in saved_scene_list:
                p_sample = Path(scene_path)
                for f in p_sample.iterdir():
                    if sample_token in str(f):
                        ax = cloudpickle.load(
                            open(os.path.join(scene_path, str(f)), 'rb'))
                        fig = plt.figure(figsize=(10, 10))
                        fig._axstack.add('ax', ax)
                        read_img = True
        if not read_img:
            fig, ax = nusc_map.render_map_patch(
                my_patch,
                map_layers,
                figsize=(10, 10),
                render_egoposes_range=render_ego_pose_range,
                render_legend=legend,
                fig=bfig,
                axes=bax)

            if not ego_centric:
                ax.set_title(scene['name'] + " instance_token: " +
                             instance_token + ", sample_token: " +
                             sample_token + "\n" + ", decription " +
                             scene['description'])
            else:
                ax.set_title(scene['name'] + ", sample_token: " +
                             sample_token + "\n" + ", decription " +
                             scene['description'])

        #### label map ####
        if 'labeled_map' in plot_list:
            records_within_patch = nusc_map.get_records_in_patch(
                my_patch, nusc_map.non_geometric_layers, mode='within')
            self.label_map(ax,
                           nusc_map,
                           records_within_patch['stop_line'],
                           text_box=text_box)

            #### Plot ego ####
            if 'ego' in plot_list:
                ego_pos, ego_quat = self.plot_ego(
                    ax,
                    sample,
                    ego_traj=ego_traj,
                    animated_agent=animated_agent,
                    plot_ego=plot_human_ego)

            #### Plot other agents ####
            if 'pedestrian' in plot_list or 'other_cars' in plot_list:
                road_agents_in_patch = self.plot_road_agents(
                    ax,
                    instance_token,
                    sample,
                    plot_list,
                    text_box,
                    sensor_info,
                    my_patch,
                    plot_traj=plot_agent_trajs,
                    animated_agent=animated_agent)

        ##################
        # Car to predict #
        ##################
        if not ego_centric:
            agent_pos, agent_quat = self.plot_car_to_predict(
                ax, agent_future, agent_past, instance_token, sample_token,
                text_box, predictions, agent_state_dict)

        #### plot all_info ####
        if 'map_info' in self.plot_list:
            if not ego_centric:
                self.plot_map_info(ax, agent_pos, nusc_map, text_box=text_box)
            else:
                self.plot_map_info(ax, ego_pos, nusc_map, text_box=text_box)

        #### plot sensor info ###
        if sensor_info is not None and 'sensor_info' in plot_list:
            self.plot_sensor_info(ax,
                                  sensor_info=sensor_info,
                                  text_box=text_box)

        #### render map layers on camera images ####
        if 'cam' in plot_list:
            #sfig, sax = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=True, figsize=(9,16))

            layer_names = [
                'road_segment', 'lane', 'ped_crossing', 'walkway', 'stop_line',
                'carpark_area'
            ]
            layer_names = []
            #cam_names = ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT',
            #             'CAM_BACK_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT']
            cam_names = ['CAM_FRONT']
            k = 0
            if sfig is None:
                if len(cam_names) == 6:
                    sfig, sax = plt.subplots(nrows=2, ncols=3)
                    for i in range(2):
                        for j in range(3):
                            sax[i, j].xaxis.set_visible(False)
                            sax[i, j].yaxis.set_visible(False)
                            cam_fig, cam_ax = nusc_map.render_map_in_image(
                                self.nusc,
                                sample_token,
                                layer_names=layer_names,
                                camera_channel=cam_names[k],
                                ax=sax[i, j])
                            k += 1
                elif len(cam_names) == 1:
                    cam_fig, cam_ax = plt.subplots()
                    cam_ax.xaxis.set_visible(False)
                    cam_ax.yaxis.set_visible(False)
                    nusc_map.render_map_in_image(self.nusc,
                                                 sample_token,
                                                 layer_names=layer_names,
                                                 camera_channel=cam_names[k],
                                                 ax=cam_ax)
                else:
                    raise ValueError('')

            if sfig is not None:
                sfig.tight_layout(pad=0)
                sfig.set_figheight(7)
                sfig.set_figwidth(15)

            # for car_info in road_agents_in_patch['vehicles']:
            #     instance_token = car_info['instance_token']
            #     # render annotations inside patch
            #     ann = self.helper.get_sample_annotation(instance_token, sample_token)
            #     ann_fig, ann_ax = self.nusc.render_annotation(ann['token'])
            #     if sensing_patch is not None and self.in_shapely_polygon(car_info['translation'], sensing_patch):
            #         ann_ax.set_title("Sensed")
        else:
            sfig, sax = None, None
            cam_fig, cam_ax = None, None

        #### render additional outside information ####
        if render_additional is not None:
            self.render_additional(ax, render_additional)

        if not show_axis:
            plt.axis('off')
            plt.grid('off')
            ax.grid(False)
        ax.set_aspect('equal')

        other = {
            'cam_fig': cam_fig,
            'cam_ax': cam_ax,
            'sfig': sfig,
            'sax': sax
        }
        return fig, ax, other

    def render_additional(self, ax, render_dict: dict = None):
        if 'lines' in render_dict.keys():
            # lines = [
            #     {
            #         'start': <2x1 vector>,
            #         'end': <2x1 vector>
            #         'color': <color>
            #     },
            #     {
            #          'traj': <2xn vector>,
            #          'color': <color>,
            #          'marker': <marker>
            #     }
            # ]
            for l in render_dict['lines']:
                if 'start' in l.keys():
                    ax.plot([l['start'][0], l['end'][0]],
                            [l['start'][1], l['end'][1]],
                            c=l['color'])
                elif 'traj' in l.keys():
                    ax.plot(l['traj'][:, 0],
                            l['traj'][:, 1],
                            c=l['color'],
                            linestyle=l['marker'])

        if 'scatters' in render_dict.keys():
            # scatters = [
            #     {
            #         'traj': <nx2 matrix>,
            #         'color': <color>
            #     }
            # ]
            for s in render_dict['scatters']:
                ax.scatter(s['traj'][:, 0],
                           s['traj'][:, 1],
                           color=s['color'],
                           s=30,
                           zorder=700)

        if 'text_boxes' in render_dict.keys():
            # text_boxes = [
            #     {
            #         'text_string': <str>,
            #         'pos': np.ndarray
            #     }
            # ]
            for textbox in render_dict['text_boxes']:
                self.plot_text_box(ax, textbox['text_string'], textbox['pos'])

    def in_my_patch(self, pos, my_patch):
        if pos[0] > my_patch[0] and pos[1] > my_patch[1] and pos[0] < my_patch[
                2] and pos[1] < my_patch[3]:
            return True
        else:
            return False

    def plot_center_lanes(self):
        pass

    def plot_elements(self,
                      pos: np.ndarray,
                      heading: float,
                      object_type="current_car",
                      ax=None,
                      label: str = "",
                      attribute: str = "",
                      animated_agent=False):
        '''pos is the global coordinate of the object
           heading is in degrees
           object_type can be 'current_car' 'other_car', 'pedestrian'
        '''
        if object_type == 'current_car':
            obj = robot
        elif object_type == 'other_cars':
            obj = cars[0]
        elif object_type == 'ego':
            obj = cars[4]
        elif object_type == 'sim_ego':
            obj = cars[2]
        elif object_type == 'pedestrian':
            obj = ped
        else:
            raise ValueError('object type not supported')

        if object_type != 'pedestrian':
            r_img = rotate(obj, angle=90, axes=(1, 0))
            r_img = rotate(r_img, angle=-heading, axes=(1, 0))
            if object_type == 'current_car':
                oi = OffsetImage(r_img, zoom=0.02, zorder=700)
                color = 'green'
            elif object_type == 'other_cars':
                oi = OffsetImage(r_img, zoom=0.035, zorder=700)
                color = 'blue'
            elif object_type == 'ego':
                oi = OffsetImage(r_img, zoom=0.015, zorder=700)
                color = 'red'
            elif object_type == 'sim_ego':
                oi = OffsetImage(r_img, zoom=0.015, zorder=700)
                color = 'yellow'

            veh_box = AnnotationBbox(oi, (pos[0], pos[1]), frameon=False)
            veh_box.zorder = 700
            if animated_agent:
                ax.add_artist(veh_box)
            else:
                ax.scatter(pos[0],
                           pos[1],
                           marker='H',
                           color=color,
                           s=100,
                           zorder=700)
        else:
            ax.scatter(pos[0], pos[1], marker='*', color='green', s=100)

    def plot_text_box(self,
                      ax,
                      text_string: str,
                      pos: np.ndarray,
                      facecolor: str = 'wheat'):
        props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
        ax.text(pos[0],
                pos[1],
                text_string,
                fontsize=10,
                bbox=props,
                zorder=800)

    def plot_contour(self, ax, contour):
        X = contour['X']
        Y = contour['Y']
        Z = contour['Z']
        levels = contour['levels']
        transform = contour['transform']

        X_global = X
        Y_global = Y
        # if transform is not None:
        #     Coord_local = np.concatenate([np.expand_dims(X, axis=0),
        #                                   np.expand_dims(Y, axis=0)], axis=0)
        #     coord_global = convert_local_coords_to_global(Coord_local.reshape(2, -1).T, transform['translation'], transform['rotation'])

        #     X_global = []
        #     Y_global = []
        #     for i in range(0, coord_global.shape[0], X.shape[1]):
        #         X_global.append(coord_global[i:i+X.shape[1], 0].tolist())
        #         Y_global.append(coord_global[i:i+X.shape[1], 1].tolist())
        #     X_global = np.array(X_global)
        #     Y_global = np.array(Y_global)

        cp = ax.contourf(X_global,
                         Y_global,
                         Z,
                         levels,
                         zorder=100,
                         alpha=0.5,
                         cmap='Reds',
                         linewidths=3)

    def plot_sensor_info(self,
                         ax,
                         sensor_info,
                         text_box=True,
                         plot_ado_connection_lines=False):
        #### plot sensing patch ####
        sensing_patch = sensor_info['sensing_patch']['polygon']
        polygon = matplotlib.patches.Polygon(
            np.array(list(sensing_patch.exterior.coords)),
            fill=True,
            fc='green',
            alpha=0.3,
            #edgecolor='green',
            #linestyle='--',
            linewidth=2)
        if 'sensing_patch' in self.plot_list:
            ax.add_patch(polygon)

        #### plot ego ####
        ego_info = sensor_info['ego_info']
        ego_pos = ego_info['translation'][:2]
        ego_quat = ego_info['rotation_quat']

        #### plot agents ####
        agent_info = sensor_info['agent_info']
        if plot_ado_connection_lines:
            for agent in agent_info:
                agent_pos = agent['translation'][:2]
                ax.plot([ego_pos[0], agent_pos[0]], [ego_pos[1], agent_pos[1]],
                        c='black')

        #### plot map info ####

    def plot_map_info(self, ax, agent_pos, nusc_map, text_box=True):
        closest_lane_id = nusc_map.get_closest_lane(agent_pos[0],
                                                    agent_pos[1],
                                                    radius=2)
        closest_lane_record = nusc_map.get_lane(closest_lane_id)

        closest_lane_poses = np.array(
            arcline_path_utils.discretize_lane(closest_lane_record,
                                               resolution_meters=1))

        incoming_lane_ids = nusc_map.get_incoming_lane_ids(closest_lane_id)
        incoming_lane_data = []
        for incoming_lane_id in incoming_lane_ids:
            i_record = nusc_map.get_lane(incoming_lane_id)
            i_poses = np.array(
                arcline_path_utils.discretize_lane(i_record,
                                                   resolution_meters=1))
            incoming_lane_data.append({'record': i_record, 'poses': i_poses})

        outgoing_lane_ids = nusc_map.get_outgoing_lane_ids(closest_lane_id)
        outgoing_lane_data = []
        for outgoing_lane_id in outgoing_lane_ids:
            o_record = nusc_map.get_lane(outgoing_lane_id)
            o_poses = np.array(
                arcline_path_utils.discretize_lane(o_record,
                                                   resolution_meters=1))
            outgoing_lane_data.append({'record': o_record, 'poses': o_poses})

        map_info = {
            'closest_lane': {
                'record': closest_lane_record,
                'poses': closest_lane_poses
            },
            'incoming_lanes': incoming_lane_data,
            'outgoing_lanes': outgoing_lane_data
        }

        for k, v in viewitems(map_info):
            if k == 'stop_line':
                for d in v:
                    bd = d['bounding_box']
                    center = [(bd[0] + bd[2]) / 2, (bd[1] + bd[3]) / 2]
                    if text_box:
                        self.plot_text_box(
                            ax, 'detected_' + d['record']['stop_line_type'],
                            center, 'blue')

            elif k == 'closest_lane':
                p = np.array(v['poses'])
                ax.plot(p[:, 0],
                        p[:, 1],
                        linestyle="-.",
                        linewidth=2,
                        color='yellow')

            elif k == 'incoming_lanes':
                for d in v:
                    p = np.array(d['poses'])
                    ax.plot(p[:, 0],
                            p[:, 1],
                            linestyle="-.",
                            linewidth=2,
                            color='brown')
            elif k == 'outgoing_lanes':
                for d in v:
                    p = np.array(d['poses'])
                    ax.plot(p[:, 0],
                            p[:, 1],
                            linestyle="-.",
                            linewidth=2,
                            color='white')
            else:
                raise ValueError(f'info type {k} not supported')

    def plot_car_to_predict(self,
                            ax,
                            agent_future: np.ndarray,
                            agent_past: np.ndarray,
                            instance_token: str,
                            sample_token: str,
                            text_box: bool = True,
                            predictions: list = None,
                            agent_state_dict: dict = None):
        '''
        predictions = {
            'name': {'data': <data>, 'color': <coloar>, 'frame': <frame>, 'style':'.'}
        }
        
        '''

        ## plot car ####
        ann = self.helper.get_sample_annotation(instance_token, sample_token)

        category = ann['category_name']
        if len(ann['attribute_tokens']) != 0:
            attribute = self.nusc.get('attribute',
                                      ann['attribute_tokens'][0])['name']
        else:
            attribute = ""

        agent_yaw = Quaternion(ann['rotation'])
        agent_yaw = quaternion_yaw(agent_yaw)
        agent_yaw = angle_of_rotation(agent_yaw)
        agent_yaw = np.rad2deg(agent_yaw)

        self.plot_elements([ann['translation'][0], ann['translation'][1]],
                           agent_yaw, 'current_car', ax)
        if text_box:
            self.plot_text_box(
                ax, category,
                [ann['translation'][0] + 1.2, ann['translation'][1]])
            self.plot_text_box(
                ax, attribute,
                [ann['translation'][0] + 1.2, ann['translation'][1] + 1.2])
            if agent_state_dict is not None:
                state_str = ""
                for k, v in agent_state_dict.items():
                    state_str += f"{k[0]}:{v:.2f}, "
                self.plot_text_box(
                    ax, state_str,
                    [ann['translation'][0] + 1.2, ann['translation'][1] + 3.2])

        agent_pos = [ann['translation'][0], ann['translation'][1]]
        agent_yaw_deg = agent_yaw

        # plot ground truth
        if len(agent_future) > 0:
            ax.scatter(agent_future[:, 0],
                       agent_future[:, 1],
                       s=20,
                       c='yellow',
                       alpha=1.0,
                       zorder=200)
        if len(agent_past) > 0:
            ax.scatter(agent_past[:, 0],
                       agent_past[:, 1],
                       s=20,
                       c='k',
                       alpha=0.5,
                       zorder=200)

        # plot predictions
        if predictions is not None:
            for k, v in viewitems(predictions):
                if v['frame'] == 'local':
                    v['data'] = convert_local_coords_to_global(
                        v['data'], np.array(agent_pos),
                        np.array(ann['rotation']))
                if 'style' not in v.keys():
                    v['style'] = '.'
                if v['style'] == '.':
                    ax.scatter(v['data'][:, 0],
                               v['data'][:, 1],
                               s=20,
                               c=v['color'],
                               alpha=1.0,
                               zorder=2)
                elif v['style'] == '-':
                    ax.plot(v['data'][:, 0],
                            v['data'][:, 1],
                            c=v['color'],
                            alpha=1.0,
                            zorder=2)
                else:
                    raise ValueError('style not supported')

        return agent_pos, ann['rotation']

    def label_map(self, ax, nusc_map, map_records, text_box=True):
        #### Label map ####
        for record_token in map_records:
            bd = nusc_map.get_bounds('stop_line', record_token)
            center = [(bd[0] + bd[2]) / 2, (bd[1] + bd[3]) / 2]
            record = nusc_map.get('stop_line', record_token)
            stop_line_type = record['stop_line_type']
            if text_box:
                self.plot_text_box(ax, stop_line_type, center, 'white')

    def plot_ego(self,
                 ax,
                 sample,
                 ego_traj=None,
                 animated_agent=False,
                 plot_ego=True):
        sample_data = self.nusc.get('sample_data', sample['data']['CAM_FRONT'])
        ego_pose = self.nusc.get('ego_pose', sample_data['ego_pose_token'])

        pos = [ego_pose['translation'][0], ego_pose['translation'][1]]
        ego_yaw = Quaternion(ego_pose['rotation'])
        ego_yaw = quaternion_yaw(ego_yaw)
        ego_yaw = angle_of_rotation(ego_yaw)
        ego_yaw = np.rad2deg(ego_yaw)

        if plot_ego:
            self.plot_elements(pos,
                               ego_yaw,
                               'ego',
                               ax,
                               animated_agent=animated_agent)

        if ego_traj is not None:
            for name, traj_dict in ego_traj.items():
                ax.scatter(traj_dict['traj'][:, 0],
                           traj_dict['traj'][:, 1],
                           c=traj_dict['color'],
                           s=60,
                           zorder=80)

        return pos, ego_pose['rotation']

    def plot_trajectory_distributions(self, ax, traj_dist_dict):
        '''
        traj_dist_dict = {
            <instance_sample_token>: {"traj_dist": [[mean, cov], [], ...],
                                      "frame": <"local" or "global">,
                                      "pos": <np.ndarray or list>, # global coordinate of origin
                                      "quat": <np.ndarray or list> # global rotation of origin

                                      }
        }
    
        '''
        for k, v in traj_dist_dict.items():
            traj_dist = v['traj_dist']
            for dist in traj_dist:
                mean, cov = dist
                if v['frame'] == 'local':
                    mean = convert_local_coords_to_global(
                        np.array(mean), v['pos'], v['quat'])
                x, y = np.random.multivariate_normal(mean, cov, size=100).T
                sns.kdeplot(x=x, y=y, cmap='Blues', ax=ax)

    def plot_road_agents(self,
                         ax,
                         fig,
                         sample,
                         plot_list,
                         text_box,
                         sensor_info,
                         my_patch,
                         plot_traj=False,
                         contour_func=None,
                         animated_agent=False):

        road_agents_in_patch = {'pedestrians': [], 'vehicles': []}
        for ann_token in sample['anns']:
            ann = self.nusc.get('sample_annotation', ann_token)
            category = ann['category_name']
            if len(ann['attribute_tokens']) != 0:
                attribute = self.nusc.get('attribute',
                                          ann['attribute_tokens'][0])['name']
            else:
                attribute = ""

            pos = [ann['translation'][0], ann['translation'][1]]
            instance_token = ann['instance_token']
            sample_token = sample['token']
            #### Plot other agents ####
            valid_agent = False
            if 'other_cars' in plot_list and 'vehicle' in category and 'parked' not in attribute and self.in_my_patch(
                    pos, my_patch):
                valid_agent = True
                agent_yaw = Quaternion(ann['rotation'])
                agent_yaw = quaternion_yaw(agent_yaw)
                agent_yaw = angle_of_rotation(agent_yaw)
                agent_yaw = np.rad2deg(agent_yaw)
                self.plot_elements(pos,
                                   agent_yaw,
                                   'other_cars',
                                   ax,
                                   animated_agent=animated_agent)

                car_info = {
                    'instance_token': ann['instance_token'],
                    'category': category,
                    'attribute': attribute,
                    'translation': pos,
                    'rotation_quat': ann['rotation'],
                    'rotation_deg': agent_yaw
                }
                road_agents_in_patch['vehicles'].append(car_info)

                if text_box:
                    self.plot_text_box(
                        ax, category,
                        [ann['translation'][0] + 1.2, ann['translation'][1]])
                    self.plot_text_box(ax, attribute, [
                        ann['translation'][0] + 1.2,
                        ann['translation'][1] - 1.2
                    ])
                    self.plot_text_box(ax, ann['instance_token'], [
                        ann['translation'][0] + 1.2,
                        ann['translation'][1] + 1.2
                    ])

            #### Plot pedestrians ####
            if 'pedestrian' in plot_list and 'pedestrian' in category and not 'stroller' in category and not 'wheelchair' in category and self.in_my_patch(
                    pos, my_patch):
                valid_agent = True
                agent_yaw = Quaternion(ann['rotation'])
                agent_yaw = quaternion_yaw(agent_yaw)
                agent_yaw = angle_of_rotation(agent_yaw)
                agent_yaw = np.rad2deg(agent_yaw)

                self.plot_elements(pos, agent_yaw, 'pedestrian', ax)
                if text_box:
                    self.plot_text_box(
                        ax, category,
                        [ann['translation'][0] + 1.2, ann['translation'][1]])
                    self.plot_text_box(ax, attribute, [
                        ann['translation'][0] + 1.2,
                        ann['translation'][1] - 1.2
                    ])

            if valid_agent and plot_traj:
                agent_future = self.helper.get_future_for_agent(
                    instance_token,
                    sample_token,
                    self.na_config['pred_horizon'],
                    in_agent_frame=False,
                    just_xy=True)

                agent_past = self.helper.get_past_for_agent(
                    instance_token,
                    sample_token,
                    self.na_config['obs_horizon'],
                    in_agent_frame=False,
                    just_xy=True)

                if len(agent_future) > 0:
                    ax.scatter(agent_future[:, 0],
                               agent_future[:, 1],
                               s=10,
                               c='y',
                               alpha=1.0,
                               zorder=200)
                if len(agent_past) > 0:
                    ax.scatter(agent_past[:, 0],
                               agent_past[:, 1],
                               s=10,
                               c='k',
                               alpha=0.2,
                               zorder=200)

        return road_agents_in_patch
示例#30
0
文件: main.py 项目: gotian/rjohn
def print_status(event):
    """Wypisuje aktualnie przetwarzaną przez program parę login i hasło, zwraca None.
    
    Argumenty:
    event -- zdarzenie informujące moduł attack o wypisaniu informacji
    
    """
    event.set()
    
    p_timer = threading.Timer(config["info_time"], print_status, [event])
    p_timer.setDaemon(True)
    if config["info_time"] != 0:
        p_timer.start() 

if __name__ == '__main__':    
    configUtils = Configuration()
    parser = configUtils.getOpt()
    
    session_filename_1 = configUtils.getConfig()["session_attack"]
    session_filename_2 = configUtils.getConfig()["session_queue1"]
    session_filename_3 = configUtils.getConfig()["session_queue2"]
    session_filename_4 = configUtils.getConfig()["session_config"]
    
    (options, args) = parser.parse_args()
    if options.restore == True:
        restore_session()
        msg = _("Odzyskanie sesji")
        if verbose:
            print(datetime.today().isoformat(' ') + ": " + msg)
        log.info(datetime.today().isoformat(' ') + ": " + msg)
        main()
示例#31
0
class VideoPipeline(QObject):
    processed = Signal(object)

    def __init__(self, pipeline_file):
        QObject.__init__(self)

        self.pipeline_conf = Configuration(pipeline_file)
        self.filters = []
        self.pipeline = self.pipeline_conf.sections()

        icon_file = pipeline_file.replace(".ini", ".png")
        if os.path.isfile(icon_file):
            self.icon_path = icon_file

        # the input format for the first filter element
        input_format = None

        for pipeline_section in self.pipeline:
            # ignore the common section
            if pipeline_section == "common":
                continue

            instance = clazz.instance_by_name(pipeline_section, pipeline_section, self.pipeline_conf)
            instance.index = len(self.filters)

            # check that the output format if the predecessor filter matches with the input if this
            # filter
            meta = instance.meta()
            if not input_format==None and not meta["input"] == input_format:
                print("Filter '{}' is unable to process input format '{}'. Expected was '{}'".format(python_file, input_format, meta["input"]))
                print("Wrong pipeline definition. Exit")
                exit_process()

            instance.param_changed.connect(self.process)

            # the output if this filter is the input of the next filter
            input_format = meta["output"]

            self.filters.append(instance)


    def meta(self):
        meta_info = []
        for instance in self.filters:
            menu = self.pipeline_conf.get_boolean("menu", instance.conf_section)
            meta = instance.meta()
            meta["menu"] = menu
            meta_info.append(meta)
        return {
            "name": self.pipeline_conf.get("name"),
            "description": self.pipeline_conf.get("description"),
            "author": self.pipeline_conf.get("author"),
            "filters": meta_info
        }

    def filter(self, index):
        return self.filters[index]

    def filter_count(self):
        return len(self.filters)

    def gcode(self, contour_3d):
        return self.filters[len(self.filters) - 1].gcode(contour_3d)

    def process(self):
        result = []
        image = None
        cnt = []

        for instance in self.filters:
            try:
                image, cnt = instance.process(image, cnt)
                result.append({"filter": instance.conf_section, "image": image, "contour": cnt})
                print("------------------------")
            except Exception as exc:
                exc_type, exc_obj, exc_tb = sys.exc_info()
                fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
                print(exc_type, fname, exc_tb.tb_lineno)
                print(type(instance), exc)

        self.processed.emit(result)