Example #1
0
 def __init__(self):
     self.configuration = Configuration()
     self.torrentAPIHandler = TorrentAPIHandler()
     self.file_handler = FileHandler()
     self.audit = Audit(type(self).__name__)
     self.torrent_list = []
     self.utorrent_client = None
Example #2
0
    def pipelines():
        from os import listdir
        from os.path import isfile, join, basename, splitext
        onlyfiles = [
            join(PIPELINE_FOLDER, f) for f in listdir(PIPELINE_FOLDER)
            if isfile(join(PIPELINE_FOLDER, f))
        ]
        pipelines = []
        for f in onlyfiles:
            if not f.endswith(".ini"):
                continue
            conf = Configuration(f)

            pipeline_metadata = {
                "basename": splitext(basename(f))[0],
                "name": conf.get("name"),
                "description": conf.get("description"),
                "author": conf.get("author")
            }

            svg_file = f.replace(".ini", ".svg")
            if os.path.isfile(svg_file):
                with open(svg_file, "rb") as image_file:
                    encoded_string = base64.b64encode(
                        image_file.read()).decode('utf-8')
                    pipeline_metadata[
                        "icon"] = "data:image/svg+xml;base64," + encoded_string

            pipelines.append(pipeline_metadata)

        response = make_response(json.dumps(pipelines))
        response.headers['Content-Type'] = 'application/json'
        return response
Example #3
0
    def __init__(self, config={}, helper=None, py_logger=None, tb_logger=None):
        #### common setup ####
        self.config = Configuration({'NuScenesAgent_config': {}})
        self.config.update(config)

        super().__init__(config=self.config['NuScenesAgent_config'],
                         helper=helper,
                         py_logger=py_logger,
                         tb_logger=tb_logger)
        self.name = 'SceneGraphics'
        #######
        self.map_layers = [
            'road_divider',
            'lane_divider',
            'drivable_area',
            #'road_segment',
            #'road_block',
            'lane',
            #'ped_crossing',
            'walkway',
            #'stop_line',
            #'carpark_area',
            #'traffic_light'
        ]

        self.plot_list = [
            'ego', 'other_cars', 'pedestrian', 'cam', 'map_info',
            'labeled_map', 'sensing_patch', 'sensor_info'
        ]
    def __init__(self, config={}, port_num=19997):
        self.VrepEnvBase_config = Configuration(default_config)
        self.VrepEnvBase_config.update(config)

        print ('Program started')
        vrep.simxFinish(-1)  # just in case, close all opened connections
        self.clientID = vrep.simxStart('127.0.0.1', port_num, True,
                                       True, 5000, 5)  # Connect to V-REP
        if self.clientID != -1:
            print ('Connected to remote API server')
            vrep.simxStartSimulation(
                self.clientID, vrep.simx_opmode_oneshot_wait)
            vrep.simxSynchronous(self.clientID, True)
            # Now try to retrieve data in a blocking fashion (i.e. a service
            # call):
            res, objs = vrep.simxGetObjects(
                self.clientID, vrep.sim_handle_all, vrep.simx_opmode_blocking)
            if res == vrep.simx_return_ok:
                print ('Number of objects in the scene: ', len(objs))
            else:
                print ('Remote API function call returned with error code: ', res)
            print("connected through port number: {}".format(port_num))
    
            # used to connect multiple clients in synchronous mode http://www.coppeliarobotics.com/helpFiles/en/remoteApiModusOperandi.htm
            return_code, iteration = vrep.simxGetIntegerSignal(self.clientID, "iteration", vrep.simx_opmode_streaming)
            time.sleep(2)
Example #5
0
 def __init__(self):
     """
     Tworzy instancję serwera.
     """
     self.__configuration = Configuration()
     TCPServer.__init__(
         self, (self.__configuration.host, self.__configuration.port),
         ClientServerHandler)
 def __init__(self, plist_path):
     """
     :param plist_path: plist 文件路径
     """
     super().__init__(plist_path)
     self.app_config = Configuration(self.content,
                                     conf=EnvEnum.CONFIGURATION.value)
     self.export_plist_path = f'{EnvEnum.SCRIPT_PATH.value}/plist/{self.app_config.method}.plist'
     self.icon_url = f'{EnvEnum.SCRIPT_URL.value}{self.app_config.icon_path}'
Example #7
0
    def __init__(self, workDirectory=""):
        """
        Pobiera konfigurację serwera oraz zapisuje katalog roboczy. Tworzy też pustą listę plikó do usunięcia.

        Args:
            workDirectory (Optional(str)): Katalog roboczy (id użytkownika, nazwa grupy)
        """
        self.__configuration = Configuration()
        self.__dir = "".join(["/", workDirectory])
        self.__usedFiles = []
Example #8
0
 def __init__(self, subtitle):
     self.configuration = Configuration()
     self.file_handler = FileHandler()
     self.title = subtitle[0:len(subtitle)-4]
     self.subtitle = subtitle
     self.subtitle_path = self.set_subtitle_path()
     self.magnet_link = ''
     self.directory = ''
     self.video_file = ''
     self.video_path = ''
     self.ready = False
     self.done = False
    def __init__(self, environment):
        # Logging configuration
        logging.basicConfig()
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.INFO)

        # Application configuration
        self.config = Configuration(self.logger, environment)

        # Tools configuration
        self.kugawana_tool = KugawanaInventoryTool(self.config.slack_bot_token)
        self.sc = SlackClient(self.config.slack_app_token)
Example #10
0
    def __init__(self, environment, token, user_id, callback_id, form,
                 response_url):
        # Logging configuration
        logging.basicConfig()
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.INFO)

        # Application configuration
        self.config = Configuration(self.logger, environment)

        # Tools configuration
        self.sc = SlackClient(self.config.slack_app_token)

        # Interactive messages informations
        self.token = token
        self.user_id = user_id
        self.callback_id = callback_id
        self.form = form
        self.response_url = response_url
def getUDIDforEmails():

    slack_token = Configuration().slack_token
    print('--- INFO: Connect to Slack')
    slack = Slacker(slack_token)

    udids_and_emails = {}
    print('--- INFO: Call Slack API to list Users')
    response = slack.users.list()
    users = response.body['members']

    for user in users:
        if not user['deleted']:
            try:
                email = user['profile']['email']
            except KeyError:
                email = "*****@*****.**"
            udids_and_emails[email] = user['id']

    return udids_and_emails
Example #12
0
    def __init__(self, environment, token, user_id, command, parameter,
                 response_url, trigger_id):
        # Logging configuration
        logging.basicConfig()
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.INFO)

        # Application configuration
        self.config = Configuration(self.logger, environment)

        # Tools configuration
        self.sc = SlackClient(self.config.slack_app_token)

        # Slash command informations
        self.token = token
        self.user_id = user_id
        self.command = command
        self.parameter = parameter
        self.response_url = response_url
        self.trigger_id = trigger_id
Example #13
0
    def __init__(self, *args, **kwargs):
        super(PipelinesModel, self).__init__(*args, **kwargs)
        pipeline_folder = "resources/pipelines"
        self.pipelines = []
        onlyfiles = [
            join(pipeline_folder, f) for f in listdir(pipeline_folder)
            if isfile(join(pipeline_folder, f))
        ]
        for f in onlyfiles:
            if not f.endswith(".ini"):
                continue
            pipeline_conf = Configuration(f)

            pipeline_metadata = {
                "filename": f,
                "basename": splitext(basename(f))[0],
                "name": pipeline_conf.get("name"),
                "description": pipeline_conf.get("description"),
                "author": pipeline_conf.get("author")
            }
            self.pipelines.append(pipeline_metadata)
Example #14
0
    def __init__(self, global_conf, pipeline_file ):
        self.pipeline_conf = Configuration(pipeline_file)
        self.filters = []
        self.pipeline = self.pipeline_conf.sections()
        self.source = ImageSource()
        self.source.configure(global_conf, "source", self.pipeline_conf)
        # the input format for the first filter element
        input_format = "image"

        for pipeline_section in self.pipeline:
            print(pipeline_section)
            # ignore the common section
            if pipeline_section == "common":
                continue
            # ignore the source section
            if pipeline_section == "source":
                continue

            instance = clazz.instance_by_name(pipeline_section)
            instance.configure(global_conf, pipeline_section, self.pipeline_conf)
            # try to load an image/icon for the give filter
            python_file = inspect.getfile(instance.__class__)
            svg_file = python_file.replace(".py", ".svg")
            if os.path.isfile(svg_file):
                with open(svg_file, "rb") as image_file:
                    encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
                    instance.icon = "data:image/svg+xml;base64,"+encoded_string

            # check that the output format if the predecessor filter matches with the input if this
            # filter
            meta = instance.meta()
            if not meta["input"] == input_format:
                print("Filter '{}' is unable to process input format '{}'. Expected was '{}'".format(python_file, input_format, meta["input"] ))
                print("Wrong pipeline definition. Exit")
                exit_process()

            # the output if this filter is the input of the next filter
            input_format = meta["output"]

            self.filters.append(instance)
Example #15
0
    def __init__(self):
        """
        Tworzy całe CA. Wczytuje konfiguracje CA z pliku. Ładuje lub (w przypadku gdy nie istnieją)
        tworzy certyfikat i klucz prywatny.

        Args:
            certificatesDir (str): Katalog, w którym będą trzymane certyfikaty użytkowników.
            keysDir (str): Katalog, w któym będą trzymane klucze publiczne użytkowników.

        Raises:
            IOError: Jeśli nie można utworzyć klucza, albo odczytać go z pliku.
        """
        self.__configuration = Configuration()
        if not self.__checkFiles():
            self.__certificate, self.__keys = self.__newCaCertificate()
        else:
            self.__certificate = self.__loadCertificateFromFile(
                self.__configuration.certificateFile)
            self.__keys = self.__loadPrivateKeyFromFile(
                self.__configuration.keysFile)
        if not self.__keys:
            raise IOError
    def launch(self, event):
        # Manage 'challenge' from Slack to validate the lambda.
        if "challenge" in event:
            return event["challenge"]

        slack_event = event['event']

        # Ignore message from bot.
        if not "bot_id" in slack_event \
           and slack_event['type'] == 'user_change' \
           and 'XfELFP2WL9' in slack_event['user']['profile']['fields']:

            # Application configuration
            config = Configuration(self.logger, self.environment)

            # Check input token
            if not event['token'] in config.slack_event_token:
                return "403 Forbidden"

            self.logger.info(slack_event['user']['real_name'] + " gets " +
                             slack_event['user']['profile']['fields']
                             ['XfELFP2WL9']['value'] + " certification!")

            user_udid = slack_event['user']['id']
            user_level_name = re.search(
                ' \((.+?) level\)', slack_event['user']['profile']['fields']
                ['XfELFP2WL9']['value'].lower()).group(1)

            user = User.get(user_udid)
            level = Level.getByName(user_level_name)

            if user and level:
                for user_certification in user.user_certifications:
                    user_certification.passesCertification(level)

        return "200 OK"
Example #17
0
    def __init__(self, pipeline_file):
        QObject.__init__(self)

        self.pipeline_conf = Configuration(pipeline_file)
        self.filters = []
        self.pipeline = self.pipeline_conf.sections()

        icon_file = pipeline_file.replace(".ini", ".png")
        if os.path.isfile(icon_file):
            self.icon_path = icon_file

        # the input format for the first filter element
        input_format = None

        for pipeline_section in self.pipeline:
            # ignore the common section
            if pipeline_section == "common":
                continue

            instance = clazz.instance_by_name(pipeline_section, pipeline_section, self.pipeline_conf)
            instance.index = len(self.filters)

            # check that the output format if the predecessor filter matches with the input if this
            # filter
            meta = instance.meta()
            if not input_format==None and not meta["input"] == input_format:
                print("Filter '{}' is unable to process input format '{}'. Expected was '{}'".format(python_file, input_format, meta["input"]))
                print("Wrong pipeline definition. Exit")
                exit_process()

            instance.param_changed.connect(self.process)

            # the output if this filter is the input of the next filter
            input_format = meta["output"]

            self.filters.append(instance)
Example #18
0
from utils.save_video_ffmpeg import VideoSaver

save_directory = "play_results"
image_extension = "png"
zoom_factor = 10
framerate = 5

if __name__ == "__main__":
    # Loads configuration file
    parser = argparse.ArgumentParser()
    parser.add_argument("--config", type=str, required=True)
    arguments = parser.parse_args()

    config_path = arguments.config

    configuration = Configuration(config_path)
    configuration.check_config()
    configuration.create_directory_structure()

    config = configuration.get_config()

    logger = Logger(config)
    search_name = config["model"]["architecture"]
    model = getattr(importlib.import_module(search_name), 'model')(config)
    model.cuda()

    datasets = {}

    dataset_splits = DatasetSplitter.generate_splits(config)
    transformations = TransformsGenerator.get_final_transforms(config)
from config import config_dict
from datasets.generateData import generate_dataset
from net.generateNet import generate_net
import torch.optim as optim
from PIL import Image
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from net.sync_batchnorm.replicate import patch_replication_callback
from utils.configuration import Configuration
from utils.finalprocess import writelog
from utils.imutils import img_denorm
from net.sync_batchnorm import SynchronizedBatchNorm2d
from utils.visualization import generate_vis, max_norm
from tqdm import tqdm

cfg = Configuration(config_dict)
def train_net():
	period = 'train'
	transform = 'weak'
	dataset = generate_dataset(cfg, period=period, transform=transform)
	def worker_init_fn(worker_id):
		np.random.seed(1 + worker_id)
	dataloader = DataLoader(dataset, 
				batch_size=cfg.TRAIN_BATCHES, 
				shuffle=cfg.TRAIN_SHUFFLE, 
				num_workers=cfg.DATA_WORKERS,
				pin_memory=True,
				drop_last=True,
				worker_init_fn=worker_init_fn)
	
	if cfg.GPUS > 1:
Example #20
0
def main(argv):

    ## Create and load client configuration
    cfg_ = Configuration()
    cfg_.load(CONFIG_FILE)
    print cfg_

    ## Create UnrealCV client and connect
    ucv_client_ = ucv.connect_client(cfg_.m_unrealengine_host,
                                     cfg_.m_unrealengine_port)

    ## Load sequence configuration file and properties and save description
    seq_ = Sequence()
    seq_.load(cfg_.m_sequence_filename)
    print seq_
    seq_.save("../data/" + seq_.m_name + "/sequence.json")

    ## Generate camera description
    cam_ = Camera()
    cam_.m_fx = cfg_.m_camera_fx
    cam_.m_fy = cfg_.m_camera_fy
    cam_.m_cx = cfg_.m_camera_cx
    cam_.m_cy = cfg_.m_camera_cy
    cam_.m_fov = cfg_.m_camera_fov
    cam_.m_depthmin = cfg_.m_camera_depthmin
    cam_.m_depthmax = cfg_.m_camera_depthmax
    print cam_
    cam_.save("../data/" + seq_.m_name + "/camera.json")

    ## Generate objects description
    object_list_ = ucv_client_.request("vget /objects").split(' ')

    objects_ = {}
    objects_["total_number"] = len(object_list_)
    objects_["objects"] = {}

    print("There are " + str(objects_["total_number"]) +
          " objects in this scene...")

    ### Load object instance to object class mapping
    instance_class_ = {}
    with open("../config/instance_class.json") as f:
        instance_class_ = json.load(f)

    ### Load class information
    classes_ = {}
    with open("../config/classes.json") as f:
        classes_json_ = json.load(f)

        for i_classid in classes_json_.keys():
            obj_class_ = ObjectClass()
            obj_class_.parse_json(classes_json_[i_classid])
            classes_[i_classid] = obj_class_

    ### Describe each object
    for i_objname in object_list_:

        object_ = SceneObject()

        object_.m_instance_name = i_objname
        print("Getting color for {0}".format(i_objname))
        object_color_ = Color(
            ucv_client_.request("vget /object/{0}/color".format(i_objname)))
        print("Object {0} has color {1}".format(i_objname, object_color_))
        object_.m_instance_color = object_color_

        if i_objname in instance_class_:
            object_.m_class = classes_[instance_class_[i_objname]]
        else:
            object_.m_class = classes_["none"]

        objects_["objects"][i_objname] = object_.to_json()

    with open("../data/" + seq_.m_name + "/objects.json", 'w') as f:
        json.dump(objects_, f, indent=2)

    ## Get frames
    for i in range(seq_.m_total_frames):

        print("Getting frame {0} out of {1}...".format(i, seq_.m_total_frames))

        if i < FRAME_START:
            print("Skipping frame " + str(i))
            continue

        frame_ = seq_.m_frames[i]

        frame_id_ = frame_["id"]
        frame_timestamp_ = frame_["timestamp"]
        frame_camera_ = frame_["camera"]
        frame_objects_ = frame_["objects"]

        print frame_timestamp_
        res_camera_ = ucv.place_camera(ucv_client_, frame_camera_)
        while (res_camera_ is None):
            print("ERROR: Trying to place camera again...")
            ucv_client_.disconnect()
            ucv_client_ = ucv.connect_client(cfg_.m_unrealengine_host,
                                             cfg_.m_unrealengine_port)
            res_camera_ = ucv.place_camera(ucv_client_, frame_camera_)

        ucv.place_objects(ucv_client_, frame_objects_)

        frame_rgb_ = cli_rgb.get_rgb(ucv_client_)
        while (frame_rgb_ is None):
            print("ERROR: Trying to get RGB frame again...")
            ucv_client_.disconnect()
            ucv_client_ = ucv.connect_client(cfg_.m_unrealengine_host,
                                             cfg_.m_unrealengine_port)
            frame_rgb_ = cli_rgb.get_rgb(ucv_client_)

        frame_rgb_im_ = Image.fromarray(frame_rgb_)
        frame_rgb_im_.save("../data/" + seq_.m_name + "/rgb/" + frame_id_ +
                           ".png")

        frame_mask_ = cli_sgm.get_object_mask(ucv_client_)
        while (frame_mask_ is None):
            print("ERROR: Trying to get mask frame again...")
            ucv_client_.disconnect()
            ucv_client_ = ucv.connect_client(cfg_.m_unrealengine_host,
                                             cfg_.m_unrealengine_port)
            frame_mask_ = cli_sgm.get_object_mask(ucv_client_)

        frame_mask_im_ = Image.fromarray(frame_mask_)
        frame_mask_im_.save("../data/" + seq_.m_name + "/mask/" + frame_id_ +
                            ".png")

        frame_depth_ = cli_dpt.get_depth(ucv_client_)
        while (frame_depth_ is None):
            print("ERROR: Trying to get depth frame again...")
            ucv_client_.disconnect()
            ucv_client_ = ucv.connect_client(cfg_.m_unrealengine_host,
                                             cfg_.m_unrealengine_port)
            frame_depth_ = cli_dpt.get_depth(ucv_client_)

        frame_depth_im_ = Image.fromarray(frame_depth_)
        cli_utils.save_16bit_png(
            frame_depth_,
            "../data/" + seq_.m_name + "/depth/" + frame_id_ + ".png",
            cam_.m_depthmin, cam_.m_depthmax)
Example #21
0
import os
import cv2
from utils.image import image_resize
from processing.filter import BaseFilter


from utils.configuration import Configuration
conf = Configuration("resources/config/configuration.ini")

class Filter(BaseFilter):
    def __init__(self, conf_section, conf_file):
        BaseFilter.__init__(self, conf_section, conf_file)
        self.path = conf.get("path","common")
        if not os.access(self.path, os.R_OK):
            self.path="resources/default-image.png"

    def meta(self):
        return {
            "name": "Select Image",
            "description": "Select the Input Image to process",
            "parameters": [
                {
                    "name": "path",
                    "label": "File",
                    "type": "string",
                    "value": self.path
                }
            ],
            "input": "filepicker",
            "output": "image"
        }
Example #22
0
### How to start to work with A.R.M.O.R
    1. helm repository is hosted here: https://yaroslavnikolaev.github.io/A.R.M.O.R./
    2. Deploy to your central cluster or to 
'''

COLLECTORS = '''
### ARMOR supports following collectors:
<table style="width:100%">  <tr>    <th>Application</th>    <th>Armor annotation key</th>    <th>Description</th>  </tr>
'''

STORAGES = '''\n### ARMOR supports following storages: 
- Prometheus \n'''

if __name__ == '__main__':
    '''Automatically generate Readme.md'''
    configuration = Configuration()
    factory = CollectorFactory(configuration)
    with open("./docs/README.md", "w") as readme:
        readme.write(INTRO)
        readme.write(STRUCTURE)
        readme.write(HOWTO)
        readme.write(COLLECTORS)
        # todo add description to collectors and storages use __doc__
        description = ""
        for key in sorted(factory.collectors.keys()):
            application = key.split(".")[-1]
            readme.write(
                f'''<tr>    <th>{application}</th>    <th>armor.io/{key}</th>    <th>{description}</th>  </tr>\n'''
            )
        readme.write("""</table> \n""")
        readme.write(STORAGES)
Example #23
0
    def __init__(self,
                 config: dict = {},
                 helper: PredictHelper = None,
                 py_logger=None,
                 tb_logger=None):
        self.na_config = Configuration({
            'debug': False,
            'pred_horizon': 6,
            'obs_horizon': 2,
            'freq': 2,
            'load_dataset': False,
            'version': 'v1.0-mini',
            'debug': False,
            'py_logging_path': None,
            'tb_logging_path': None
        })

        self.na_config.update(config)
        self.name = None

        self.py_logger = py_logger
        self.tb_logger = tb_logger

        self.dataroot = None
        if 'mini' in self.na_config['version']:
            self.dataroot = mini_path
        else:
            self.dataroot = full_path
        if self.py_logger is None and self.na_config[
                'py_logging_path'] is not None:
            print(f"py logging path: {self.na_config['py_logging_path']}")
            self.py_logger = logger
            self.py_logger.add(self.na_config['py_logging_path'] + "/log.txt")

        #     self.py_logger = logging.getLogger(self.name)
        #     print(f"py logging path: {self.na_config['py_logging_path']}")
        #     self.py_logger.addHandler(logging.FileHandler(os.path.join(self.na_config['py_logging_path'], 'log.txt'),  mode='w'))
        # if self.py_logger is not None:
        #     self.py_logger.propagate = False

        if self.tb_logger is None and self.na_config[
                'tb_logging_path'] is not None:
            self.tb_logger = SummaryWriter(
                log_dir=os.path.join(self.na_config['tb_logging_path']))

        self.helper = helper
        self.nusc = None
        if self.helper is not None:
            self.nusc = self.helper.data
        else:
            if self.dataroot is not None and self.na_config[
                    'version'] is not None and self.na_config[
                        'load_dataset'] and self.helper is None:
                self.nusc = NuScenes(dataroot=self.dataroot,
                                     version=self.na_config['version'],
                                     verbose=True)
                self.helper = PredictHelper(self.nusc)

        #### Initialize Map ####
        self.nusc_map_dict = {
            'boston-seaport':
            NuScenesMap(dataroot=self.dataroot, map_name='boston-seaport'),
            'singapore-hollandvillage':
            NuScenesMap(dataroot=self.dataroot,
                        map_name='singapore-hollandvillage'),
            'singapore-onenorth':
            NuScenesMap(dataroot=self.dataroot, map_name='singapore-onenorth'),
            'singapore-queenstown':
            NuScenesMap(dataroot=self.dataroot,
                        map_name='singapore-queenstown'),
        }

        #### Initialize CAN ####
        self.nusc_can = NuScenesCanBus(dataroot=self.dataroot)

        ####
        self.all_info = {'config': self.na_config}
Example #24
0
import os
import base64
import time

from flask import Flask, render_template, make_response, send_file, request

from utils.webgui import FlaskUI  # get the FlaskUI class

from pipeline import VideoPipeline
from grbl import GrblWriter
from utils.configuration import Configuration

configuration_dir = os.path.abspath(
    os.path.join(os.path.dirname(__file__), "..", "config",
                 "configuration.ini"))
conf = Configuration(configuration_dir)

POOL_TIME = conf.get_int("image-read-ms") / 1000  # convert to Seconds
PIPELINE_FOLDER = os.path.abspath(
    os.path.join(os.path.dirname(__file__), conf.get("pipelines")))
SERIAL_PORT = conf.get("serial-port")
SERIAL_BAUD = conf.get_int("serial-baud")

grbl = GrblWriter(SERIAL_PORT, SERIAL_BAUD)

log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)

app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
ui = FlaskUI(app=app, port=8080)
 def __init__(self, request, client_address, server):
     """
     Tworzy instancję klasy i pobiera konfiguracje serwera.
     """
     self._configuration = Configuration()
     super().__init__(request, client_address, server)
    def __init__(self,
                 env_params={},
                 seed=0,
                 base_env=None,
                 suffix="",
                 reset=None,
                 logger=None,
                 port_num=None):
        self.FsaAugmentedEnv_config = Configuration(default_config)
        self.FsaAugmentedEnv_config.update(env_params)

        #### construct base env ####
        self.base_env = base_env
        if self.base_env is None and self.FsaAugmentedEnv_config.get(
            ['base_env', 'type']) is not None:
            # construct base
            self.base_env = self.FsaAugmentedEnv_config.get([
                'base_env', 'type'
            ])(self.FsaAugmentedEnv_config.get(['base_env', 'config']),
               port_num=port_num,
               suffix=suffix,
               reset=reset,
               seed=seed,
               logger=logger)

        if self.base_env is not None:
            if not os.path.isdir(
                    self.FsaAugmentedEnv_config.get('fsa_save_dir')):
                os.makedirs(self.FsaAugmentedEnv_config.get('fsa_save_dir'))
            self.base_env.set_seed(seed)
            self.logger = logger
            # construct fsa reward
            self.spec = self.FsaAugmentedEnv_config.get('spec')
            self.predicate_robustness = self.spec['predicate_robustness']
            self.fsa = Fsa()
            self.fsa.from_formula(
                self.FsaAugmentedEnv_config.get(['spec', 'predicate_form']))
            self.fsa.add_trap_state()
            self.fsa.visualize(
                draw='pydot',
                save_path=self.FsaAugmentedEnv_config.get('fsa_save_dir'),
                dot_file_name=self.FsaAugmentedEnv_config.get('dot_file_name'),
                svg_file_name=self.FsaAugmentedEnv_config.get('svg_file_name'))

            self.fsa_reward = FsaReward(self.fsa,
                                        self.FsaAugmentedEnv_config.get(
                                            ['spec', 'predicate_robustness']),
                                        logger=self.logger)

            self.qs = [
                v for k, v in viewitems(self.fsa_reward.aut_states_dict)
                if v != 1
            ]
            self.q = None  # this is the numerical representation of the automata state (we use Q to represent the string version)

        # seed the environment
        self.seed(seed)

        self.all_info = {}

        self.FsaAugmentedEnv_reset = reset

        self.state = None

        #### hack ####
        # self.load_switchon_policy()
        self.condimentapplied = -10

        from tl_utils.tl_config import TLConfig
        from utils.utils import get_object_goal_pose

        self.tl_conf = TLConfig(
            config={
                'robot': self.FsaAugmentedEnv_config.get('robot'),
                'mode': 'sim'
            })
        self.OBJECT_RELATIVE_POSE = self.tl_conf.OBJECT_RELATIVE_POSE
        self.get_object_goal_pose = get_object_goal_pose
import time

from config import config_dict
from datasets.generateData import generate_dataset
from net.generateNet import generate_net
import torch.optim as optim
from net.sync_batchnorm.replicate import patch_replication_callback
from torch.utils.data import DataLoader
from utils.configuration import Configuration
from utils.finalprocess import writelog
from utils.imutils import img_denorm
from utils.DenseCRF import dense_crf
from utils.test_utils import single_gpu_test
from utils.imutils import onehot

cfg = Configuration(config_dict, False)

def ClassLogSoftMax(f, category):
	exp = torch.exp(f)
	exp_norm = exp/torch.sum(exp*category, dim=1, keepdim=True)
	softmax = exp_norm*category
	logsoftmax = torch.log(exp_norm)*category
	return softmax, logsoftmax

def test_net():
	period = 'val'
	dataset = generate_dataset(cfg, period=period, transform='none')
	def worker_init_fn(worker_id):
		np.random.seed(1 + worker_id)
	dataloader = DataLoader(dataset, 
				batch_size=1, 
Example #28
0
 def __init__(self):
     self.configuration = Configuration()
Example #29
0
    def __init__(self, config={}, port_num=19997, suffix="", reset=None, seed=None, logger=None):

        super(CookingEnv, self).__init__(config, port_num)
        
        self.CookingEnv_config = Configuration(default_config)
        self.CookingEnv_config.update(config)

        
        self.CookingEnv_reset = reset
        self.object_handles = None
        
        self.logger = logger
        
        self.suffix = suffix
        if not suffix:
            self.suffix = self.CookingEnv_config.get('suffix')

        self.all_info = {}

        self.robot = self.CookingEnv_config.get('arm')
        ####
        if self.CookingEnv_config.get('arm') == 'baxter':
            rh = robot_handles['Baxter']
            self.init_angles = [-2.69, -61.47, -3.35, -23.27, 89.80, -2.68]
        elif self.CookingEnv_config.get('arm') == 'jaco':
            rh = robot_handles['Jaco']
            self.init_angles = [-2.69, -61.47, -3.35, -23.27, 89.80, -2.68]
        else:
            raise ValueError('arm not supported')

        #### world frame ####
        rc, self.world_frame_handle = vrep.simxGetObjectHandle(self.clientID, rh['world_frame_handle'], vrep.simx_opmode_blocking)

        #### joint handles ####
        self.joint_handles = []
        for jh in rh['joint_handles']:
            rc, h = vrep.simxGetObjectHandle(self.clientID, jh, vrep.simx_opmode_blocking) 
            self.joint_handles.append(h)
            
        #### gripper handles ####
        self.gripper_toggle_signal_name = rh['gripper_handles']['toggle_handle']
        
        _, self.gripper_attachpoint_handle = vrep.simxGetObjectHandle(self.clientID,
                                                                      rh['gripper_handles']['attachpoint_handle'],
                                                                      vrep.simx_opmode_blocking)
       
        rc, self.gripper_prox_sensor_handle = vrep.simxGetObjectHandle(self.clientID,
                                                                       rh['gripper_handles']['prox_sensor_handle'],
                                                                       vrep.simx_opmode_blocking)
        
        # rc, ds, dp, dh, dn = vrep.simxReadProximitySensor(self.clientID, self.gripper_prox_sensor_handle, vrep.simx_opmode_streaming)
        # while rc != 0:
        #     rc, ds, dp, dh, dn = vrep.simxReadProximitySensor(self.clientID, self.gripper_prox_sensor_handle, vrep.simx_opmode_buffer)
        self.gripper_state = 0
        self.set_gripper_state(self.gripper_state)    

        #### ee handle ####
        if self.CookingEnv_config.get('particle_test'):
            _, self.ee_handle = vrep.simxGetObjectHandle(self.clientID,
                                                         rh['particle_handle'],
                                                         vrep.simx_opmode_blocking)
        else:
            _, self.ee_handle = vrep.simxGetObjectHandle(self.clientID,
                                                         rh['ee_handle'],
                                                         vrep.simx_opmode_blocking)
        

        #### target handle ####
        if self.CookingEnv_config.get('particle_test'):    
            _, self.target_handle = vrep.simxGetObjectHandle(self.clientID,
                                                             rh['particle_target_handle'],
                                                             vrep.simx_opmode_blocking)
        else:
            _, self.target_handle = vrep.simxGetObjectHandle(self.clientID,
                                                             rh['ee_target_handle'],
                                                             vrep.simx_opmode_blocking)
            
        #### goal handle ####
        _, self.goal_handle = vrep.simxGetObjectHandle(self.clientID,
                                                         rh['goal_handle'],
                                                         vrep.simx_opmode_blocking)
  
        
        #### object handles ####
        self.object_handles = {}
        for oh in object_handles:
            _, h = vrep.simxGetObjectHandle(self.clientID, oh['handle'], vrep.simx_opmode_blocking)
            self.object_handles[oh['name']] = h
        

        #### obstacle handles ####
        self.obstacle_handles = []
        for obs_h in rh['obstacle_handles']:
            _, h = vrep.simxGetObjectHandle(self.clientID, obs_h['handle'], vrep.simx_opmode_blocking)
            self.obstacle_handles.append(dict(name=obs_h['name'], handle=h))
        
        
        #### ee sample region ####
        _, self.ee_sample_region_handle = vrep.simxGetObjectHandle(self.clientID, rh['ee_sample_region_handle'], vrep.simx_opmode_blocking)

        #### ee motion region ####
        _, self.ee_motion_region_handle = vrep.simxGetObjectHandle(self.clientID, rh['ee_motion_region_handle'], vrep.simx_opmode_blocking)