Beispiel #1
0
class RPCProxy(object):
    
    def __init__(self, uri, method=None, namespaces=None, sid=''):
        self.__serviceURL = uri
        self.__serviceName = method
        self.namespaces = isinstance(namespaces, ClassLoader) and namespaces or \
                              ClassLoader(namespaces or [])
        self._seril = Serializer(self.namespaces)
        self.sid = sid
        self.logger = None
        self.start_call_listener = []
        self.end_call_listener = []
    
    def __call__(self, *args, **kw):
        args = self._seril.serialize((args, kw))
        
        post_data = {"method": self.__serviceName, 
                     'params': args, 
                     'id':'httprpc',
                     'sid':self.sid}
        
        #@todo xx
        for l in self.start_call_listener:
            l(name=self.__serviceName, args=args, kw_args=kw)
        
        rpc_response = self.post_request(self.__serviceURL, post_data)
        try:
            respdata = rpc_response.read()
            ret, e = self._seril.deserialize(respdata)
        except Exception, e:
            raise RPCException("Failed to deserialize response data:%s, exception:%s" % 
                               (respdata, e))
        finally:
 def testSimpleSerialization(self):
     m = self.createTestMessage()
     serialized = StringIO(Serializer.serialize(m))
     
     deserialized = Serializer.deserialize(serialized, TestMessage1)
     
     self.assertDictEqual(m.__dict__, deserialized.__dict__)
Beispiel #3
0
    def __init__(self):
        tag = 5007

        fields = [{
            'name': 'blockLength',
            'fmt': 'H'
        }, {
            'name': 'templateId',
            'fmt': 'H'
        }, {
            'name': 'schemaId',
            'fmt': 'H'
        }, {
            'name': 'version',
            'fmt': 'H'
        }, {
            'name': 'ClOrdID',
            'fmt': 'Q'
        }, {
            'name': 'QueueSize',
            'fmt': 'I'
        }, {
            'name': 'PenaltyRemain',
            'fmt': 'I'
        }]

        Serializer.__init__(self, tag, fields)
Beispiel #4
0
def fit_implementation(project_name, version, registry_add):

    path = Serializer().get_folder_name(project_name)
    json = Serializer().read_model(path, version)

    if not 'error' in json:
        estimator, model, version = get_estimator(path, json, version)
        messages = []
        try:
            producer = estimator.fit(model)
            if inspect.isgenerator(producer):
                for message in producer:
                    messages.append(message)
            else:
                messages = producer
            Serializer().save_changes(path, version, model, add=False)
            Serializer().save_fit_logs(path, version, messages)
            if model.get_dashboard():
                Serializer().set_dashboard(path, model.get_dashboard())
        except:
            return {'exception': traceback.format_exc()}
        if model.registered_instance is not None:
            registry_add(project_name, model, model.registered_instance)
        return messages if messages is not None else {}
    return {'error': True}
Beispiel #5
0
class Logger:
    def __init__(self, db_connection, job_id=None):
        self.log_serializer = Serializer(db_connection, job_log)
        self.job_id = job_id

    def set_job_id(self, job_id):
        self.job_id = job_id

    def log(self, summary, details='', level=LogLevel.INFO):
        self.log_serializer.post({
            'job_id': self.job_id,
            'logged_at': datetime.now(),
            'log_level': level,
            'summary': summary,
            'details': details,
        })

    def warn(self, summary, details=''):
        self.log(summary, details, LogLevel.WARNING)

    def info(self, summary, details=''):
        self.log(summary, details, LogLevel.INFO)

    def error(self, summary, details=''):
        self.log(summary, details, LogLevel.ERROR)

    def exception(self):
        try:
            etype, eval, tb = exc_info()
            summary = str(eval)[:512]
            details = traceback.format_exception(etype, eval, tb)
            self.error(summary, details)
        except Exception:
            self.error('Exception occurred while catching exception.')
Beispiel #6
0
    def __init__(self):
        tag = 5001

        fields = [{
            'name': 'blockLength',
            'fmt': 'H'
        }, {
            'name': 'templateId',
            'fmt': 'H'
        }, {
            'name': 'schemaId',
            'fmt': 'H'
        }, {
            'name': 'version',
            'fmt': 'H'
        }, {
            'name': 'RequestTimestamp',
            'fmt': 'Q'
        }, {
            'name': 'KeepaliveInterval',
            'fmt': 'L'
        }, {
            'name': 'NextSeqNo',
            'fmt': 'Q'
        }]

        Serializer.__init__(self, tag, fields)
Beispiel #7
0
    def __init__(self, public_key, private_key):
        # save inputs
        self.private_key = private_key
        self.public_key = public_key
        self.public_key_string = public_key.exportKey().decode("utf-8")

        # build wallet
        self.wallet = Wallet(self.public_key_string)

        # register miner
        pseudonym = resolve_pseudonym(self.public_key_string)
        if pseudonym == "":
            pseudonym = input("\nChoose a pseudonym: ")
        else:
            print(
                "\nThis key has already been registered.\nRegistered pseudonym: "
                + pseudonym)
        self.miner = Miner(pseudonym, self.public_key_string)
        self.s = Serializer()

        # build menu
        self.menu = "\nchoose an number:"
        self.menu += "\n1.) mine"
        self.menu += "\n2.) check balance"
        self.menu += "\n3.) make transaction"
        self.menu += "\nchoice: "
Beispiel #8
0
 def getAudit(self):
     try:
         status = self.session.query(IsAudit).filter(IsAudit.id == 1).all()
         status_maJia = self.session.query(IsAuditPro).filter(IsAuditPro.id == 1).all()
         moments_control = self.session.query(MomentsControl).filter(MomentsControl.id == 1).all()
         moments_control_android = self.session.query(MomentsControlAndroid).filter(MomentsControlAndroid.id == 1).all()
         statusResult = Serializer(status, many=True).data
         statusPro = Serializer(status_maJia, many=True).data
         s_mc = Serializer(moments_control, many=True).data
         s_mca = Serializer(moments_control_android, many=True).data
         # iOS分享开关
         iOSShareSwitch = s_mc[0]['control_status']
         # 安卓分享开关
         androidShareSwitch = s_mca[0]['control_status']
         # iOS审核开关
         iOSReviewSwitch = statusResult[0]['is_audit']
         # iOS马甲包审核开关
         iOSMaJiaReviewSwitch = statusPro[0]['is_audit']
         allStatus = {
             'isAudit':iOSReviewSwitch,
             'iOSMaJiaAudit':iOSMaJiaReviewSwitch,
             'iOSShare':iOSShareSwitch,
             'androidShare':androidShareSwitch
         }
         # print allStatus
     except:
         self.session.rollback()
     self.session.close()
     return allStatus
Beispiel #9
0
def serialize_file(filename, action):
    target_path = None
    if action == 'encrypt':
        target_path = Serializer.encrypt(filename)
    elif action == 'decrypt':
        target_path = Serializer.decrypt(filename)
    return target_path
Beispiel #10
0
    def __init__(self):
        tag = 7011

        fields = [{
            'name': 'blockLength',
            'fmt': 'H'
        }, {
            'name': 'templateId',
            'fmt': 'H'
        }, {
            'name': 'schemaId',
            'fmt': 'H'
        }, {
            'name': 'version',
            'fmt': 'H'
        }, {
            'name': 'Timestamp',
            'fmt': 'Q'
        }, {
            'name': 'TradingSessionID',
            'fmt': 'i'
        }, {
            'name': 'TradSesEvent',
            'fmt': 'B'
        }]

        Serializer.__init__(self, tag, fields)
Beispiel #11
0
def updateUTXO():
    db = TinyDB('db/blk.json')
    lstBlk = (db.all()[-1:])[0]['Transactions']
    dlt = []
    apd = []
    for i in lstBlk:
        tmp = Param(i)
        obj = Serializer(tmp)
        seri = obj.make()
        hash = (sha256(sha256(
            binascii.unhexlify(seri)).digest()).digest()[::-1]).hex()
        for y in i['tx_in']:
            if y['Previous txid'] != "0000000000000000000000000000000000000000000000000000000000000000":
                dlt.append({
                    'txid': y['Previous txid'],
                    'index': y['Previous Tx Index']
                })
        g = 0
        for z in i['tx_out']:
            apd.append({
                'value': z['value'],
                'txid': hash,
                'index': g,
                'Public_Script': z['Public Script']
            })
            g = g + 1
    deleteUTXO(dlt)
    appendUTXO(apd)
Beispiel #12
0
    def __init__(self):
        tag = 6002

        fields = [{
            'name': 'blockLength',
            'fmt': 'H'
        }, {
            'name': 'templateId',
            'fmt': 'H'
        }, {
            'name': 'schemaId',
            'fmt': 'H'
        }, {
            'name': 'version',
            'fmt': 'H'
        }, {
            'name': 'ClOrdID',
            'fmt': 'Q'
        }, {
            'name': 'OrderID',
            'fmt': 'q'
        }, {
            'name': 'Account',
            'fmt': '7s'
        }]

        Serializer.__init__(self, tag, fields)
Beispiel #13
0
    def prepare_different_items_list(self, rest_client, headers):
        serializer = Serializer()
        url = self.config["global"]["xml_url"]
        xml = rest_client.send_get_binary(url, None, None)
        read_item_list = list()
        if xml[1] != 200:
            logging.info("No file found under the URI: %s, exiting now..." %
                         str(url))
            exit(-1)

        root_item = serializer.deserialize_xml(str(xml[0], "utf8"))
        commodities = root_item

        magento_product_list = list(self.get_product_list(headers)[0]["items"])
        magento_product_dict = dict()

        for magento_product in magento_product_list:
            magento_product_dict[magento_product["sku"]] = magento_product

        for commodity in commodities:
            if commodity[1].text:
                if commodity[1].text in magento_product_dict:
                    magento_product = magento_product_dict[commodity[1].text]
                    result_magento_product = self.link_products(
                        magento_product, commodity)

                    if result_magento_product:
                        read_item_list.append(result_magento_product)
                else:
                    logging.warning(
                        "There is no product of sku {0} in Magento database.".
                        format(commodity[1].text))

        return read_item_list
Beispiel #14
0
    def __init__(self):
        tag = 5008

        fields = [{
            'name': 'blockLength',
            'fmt': 'H'
        }, {
            'name': 'templateId',
            'fmt': 'H'
        }, {
            'name': 'schemaId',
            'fmt': 'H'
        }, {
            'name': 'version',
            'fmt': 'H'
        }, {
            'name': 'ClOrdID',
            'fmt': 'Q'
        }, {
            'name': 'RefTagID',
            'fmt': 'I'
        }, {
            'name': 'SessionRejectReason',
            'fmt': 'B'
        }]

        Serializer.__init__(self, tag, fields)
Beispiel #15
0
def start_workflow(shared_state, start_date, review_number=0):
    db_connection = setup_db().connect()
    logger = Logger(db_connection)

    shared_state.job_id = None
    shared_state.completed = False

    max_downloads = environ.get('MAX_DOWNLOADS')
    if max_downloads is not None:
        max_downloads = int(max_downloads)
    max_upload_workers = int(environ.get('MAX_UPLOADERS', 20))

    try:
        workflow = Workflow(
            db_connection, logger,
            start_date,
            max_downloads, max_upload_workers,
            environ.get('ALLOW_REPEAT', 'FALSE') == 'TRUE'
        )
        workflow.start(shared_state)
    except Exception:
        logger.exception()
        if shared_state.job_id is not None:
            job_serializer = Serializer(db_connection, job)
            job_serializer.put(shared_state.job_id, {
                'status': JobStatus.FAILED,
            })
Beispiel #16
0
    def __init__(self):
        tag = 5005

        fields = [{
            'name': 'blockLength',
            'fmt': 'H'
        }, {
            'name': 'templateId',
            'fmt': 'H'
        }, {
            'name': 'schemaId',
            'fmt': 'H'
        }, {
            'name': 'version',
            'fmt': 'H'
        }, {
            'name': 'NextSeqNo',
            'fmt': 'Q'
        }, {
            'name': 'RequestTimestamp',
            'fmt': 'Q'
        }, {
            'name': 'Count',
            'fmt': 'I'
        }]

        Serializer.__init__(self, tag, fields)
Beispiel #17
0
    def __init__(self):
        tag = 5000

        fields = [{
            'name': 'blockLength',
            'fmt': 'H'
        }, {
            'name': 'templateId',
            'fmt': 'H'
        }, {
            'name': 'schemaId',
            'fmt': 'H'
        }, {
            'name': 'version',
            'fmt': 'H'
        }, {
            'name': 'Timestamp',
            'fmt': 'Q'
        }, {
            'name': 'KeepaliveInterval',
            'fmt': 'L'
        }, {
            'name': 'Credentials',
            'fmt': '20s'
        }]

        Serializer.__init__(self, tag, fields)
Beispiel #18
0
def blockReturnHash():
    hash = request.form['data']
    db = TinyDB('db/blk.json')
    lst = db.all()

    response = app.response_class(response=json.dumps({"status": "not find"}),
                                  status=404,
                                  mimetype='application/json')
    for i in lst:
        data = sha256(
            ((i["Block Header"]['Version']).to_bytes(4, "little") +
             binascii.unhexlify(i["Block Header"]['Previous Block Hash']) +
             binascii.unhexlify(i["Block Header"]['Merkle Root']) +
             (i["Block Header"]['Timestamp']).to_bytes(4, "little") +
             (binascii.unhexlify(
                 i["Block Header"]["Difficulty Target"])[::-1]) +
             (i["Block Header"]['Nonce']).to_bytes(4, "little"))).hexdigest()
        if data == hash:
            tranHashLst = []
            for y in i['Transactions']:
                tmp = Param(y)
                obj = Serializer(tmp)
                data = sha256(binascii.unhexlify(obj.make())).hexdigest()
                tranHashLst.append(data)
            i["Transaction Data"] = tranHashLst
            response = app.response_class(response=json.dumps(i),
                                          status=200,
                                          mimetype='application/json')
            break
    return response
Beispiel #19
0
 def expectRepeat(self, tokens, level=0, debug=False):
     yield Serializer(True, [], None)
     # if we get to this point we need at least one instance of this pattern.
     index = tokens.getIndex()
     exhausted = tokens.isExhausted()
     for value in self.children[0].expect(tokens, level + 1, debug):
         if value and (not tokens.isExhausted()):
             newIndex = tokens.getIndex()
             newExhaust = tokens.isExhausted()
             tValue = value.transform(lambda x: [x])
             # create a new instance of the repeat pattern
             # since the first thing this does is yield True,
             # we don't do it here.
             for newValue in self.expectRepeat(tokens, level, debug):
                 if newValue:
                     tNewValue = newValue.transform(
                         lambda x: tValue.getArgs() + x)
                     yield tNewValue
                 tokens.setIndex(newIndex, newExhaust)
         elif value and tokens.isExhausted():
             tValue = value.transform(lambda x: [x])
             yield tValue
         tokens.setIndex(index, exhausted)
     err = "Unknown Error in expectRepeat"
     if not value:
         err = value.getError()
     else:
         if not newValue:
             err = newValue.getError()
     yield Serializer(False, None, err)
Beispiel #20
0
    def __init__(self):
        tag = 7006
        fields = [{
            'name': 'blockLength',
            'fmt': 'H'
        }, {
            'name': 'templateId',
            'fmt': 'H'
        }, {
            'name': 'schemaId',
            'fmt': 'H'
        }, {
            'name': 'version',
            'fmt': 'H'
        }, {
            'name': 'ClOrdID',
            'fmt': 'Q'
        }, {
            'name': 'Timestamp',
            'fmt': 'Q'
        }, {
            'name': 'OrdRejReason',
            'fmt': 'i'
        }]

        Serializer.__init__(self, tag, fields)
Beispiel #21
0
 def __init__(self, problem, config):
     self.abs_path = os.path.dirname(os.path.abspath(__file__))
     self.serializer = Serializer()
     self.config_file = config
     self.config = self.serializer.read_config(self.abs_path + "/" + config)
     self.ik_solution_generator = IKSolutionGenerator()
     self.clear_stats(problem)
     self.run(problem)
Beispiel #22
0
class Collect:
    def __init__(self):
        cv2.namedWindow('stimulus', cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty('stimulus', cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        self.serializer = Serializer(int(os.getenv("EBC_SUBJECT_ID")),
                                     int(os.getenv("EBC_SESSION_ID")))

    def __del__(self):
        cv2.destroyWindow('stimulus')

    def run(self):
        with Manager() as manager:
            capture_index = manager.Value(int, value=0)
            with PicutureCapture(
                    float(os.getenv("EBC_PICTURE_CAPTURE_INTERVAL")),
                    os.getenv("EBC_PICTURE_CAPTURE_OUTPUT_PATH"),
                    capture_index, manager.Event(),
                    manager.Event()) as picture_capture:
                with SignalCapture(os.getenv("EBC_SIGNAL_PORT"),
                                   float(os.getenv("EBC_SIGNAL_BOUND_RATE")),
                                   manager.list(), 0, capture_index,
                                   manager.Event(),
                                   manager.Event()) as signal_capture:
                    stimulus = Stimulus(
                        int(os.getenv("EBC_STIMULUS_EXPOSITION_TIME")),
                        os.getenv("EBC_STIMULUS_IAPS_PATH"),
                        list(
                            map(
                                str,
                                json.loads(
                                    os.getenv("EBC_STIMULUS_SELECTED_SET")))))
                    sam = SelfAssessmentManikin(
                        int(os.getenv('EBC_SAM_TIME_LIMIT', "15")))
                    for i, picture in enumerate(stimulus.stimulus_list):
                        capture_index.value = i
                        logger.info('Capture {} - Picture {}'.format(
                            i, picture))
                        signal_capture.start_capture()
                        picture_capture.start_capture()
                        stimulus.exhibit_stimulus()
                        picture_capture.stop_capture()
                        signal_capture.stop_capture()
                        signals = signal_capture.pop_buffer()
                        try:
                            sam.apply_test()
                        except ValueError:
                            logger.warning(
                                "Incorrect response in SAM, missed capture {:04}",
                                capture_index.value)
                        logger.info("{:03} Singnals: {}", len(signals),
                                    signals)
                        logger.info("Valence: {} | Arousal: {}", sam.valence,
                                    sam.arousal)
                        self.serializer.serialize(capture_index.value,
                                                  stimulus.stimulus,
                                                  sam.valence, sam.arousal,
                                                  signals)
Beispiel #23
0
 def save_to_file(self):
     self.update_data()
     try:
         save_file_path = QtWidgets.QFileDialog.getSaveFileName(
             self, 'Save file...', os.path.dirname(os.getcwd()))[0]
         Serializer.serialize(save_file_path, self.data)
     except OSError:
         QtWidgets.QMessageBox.critical(
             self, 'Error', 'Во время сохранения файла произошла ошибка')
Beispiel #24
0
 def __init__(self, pseudonym, public_key):
     self.public_key = public_key
     self.serializer = Serializer()
     data = {
         "pseudonym": pseudonym,
         "public_key": public_key,
     }
     r = requests.post("http://localhost:5000/register_miner", data=data)
     if r.text != "":
         raise ValueError(r.text)
Beispiel #25
0
 def on_get(self, request, response):
     if not check_token(request):
         response.media = {'error': True, 'message': 'Not authorized'}
         return
     data = json.loads(request.stream.read().decode())
     result = Serializer().read_project(Serializer().get_folder_name(
         data['project'] if 'project' in data else ''
     ))
     response.media = {"predictions_model": result['predictions_model']} \
         if 'predictions_model' in result else result
    def play(self):
        """
        Function where whole game is happening.

        :returns: None.
        :rtype: None.
        """

        custom_log.logger.info(
            "---------------------------------------------------")

        while True:

            valid_direction = self.game_map.get_valid_directions()

            custom_log.logger.info(
                "Input 'save'/'load' to save/load the game.")
            custom_log.logger.info(f"Valid directions - {valid_direction}")

            self.player.check_status(self.game_map)
            self.player.print_status()

            self.game_map.print_map()

            direction = DungeonInput.get_direction()

            if direction == "save":
                Serializer.save_map(self.game_map)
                continue

            elif direction == "load":
                new_game_map = Serializer.load_map()

                if new_game_map == "":
                    custom_log.logger.error("Failed to Load Game")
                    continue

                self.game_map = new_game_map
                continue

            if direction not in valid_direction:
                custom_log.logger.warning("Can't move there!")
                continue

            self.player.move(self.game_map, direction)

            custom_log.logger.info(
                "---------------------------------------------------")

            if self.player.hp == 0:
                self.end_game()
                break
            elif self.player.bag == Game.TREASURE_TO_WIN:
                self.win_game()
                break
Beispiel #27
0
    def __init__(self):
        tag = 7008
        fields = [{
            'name': 'blockLength',
            'fmt': 'H'
        }, {
            'name': 'templateId',
            'fmt': 'H'
        }, {
            'name': 'schemaId',
            'fmt': 'H'
        }, {
            'name': 'version',
            'fmt': 'H'
        }, {
            'name': 'ClOrdID',
            'fmt': 'Q'
        }, {
            'name': 'Timestamp',
            'fmt': 'Q'
        }, {
            'name': 'OrderID',
            'fmt': 'q'
        }, {
            'name': 'TrdMatchID',
            'fmt': 'q'
        }, {
            'name': 'Flags',
            'fmt': 'q'
        }, {
            'name': 'LastPx',
            'fmt': 'q'
        }, {
            'name': 'LastQty',
            'fmt': 'I'
        }, {
            'name': 'OrderQty',
            'fmt': 'I'
        }, {
            'name': 'TradingSessionID',
            'fmt': 'i'
        }, {
            'name': 'ClOrdLinkID',
            'fmt': 'i'
        }, {
            'name': 'SecurityID',
            'fmt': 'i'
        }, {
            'name': 'Side',
            'fmt': 'B'
        }]

        Serializer.__init__(self, tag, fields)
def test_serializer_for_no_chinese_split():
    text1 = "\nI\'m  his pupp\'peer, and i have a ball\t"
    text2 = '\t叫Stam一起到nba打篮球\n'
    text3 = '\n\n现在时刻2014-04-08\t\t'

    serializer = Serializer(do_chinese_split=False)
    serial_text1 = serializer.serialize(text1)
    serial_text2 = serializer.serialize(text2)
    serial_text3 = serializer.serialize(text3)

    assert serial_text1 == ['i', "'", 'm', 'his', 'pupp', "'", 'peer', ',', 'and', 'i', 'have', 'a', 'ball']
    assert serial_text2 == ['叫', 'stam', '一', '起', '到', 'nba', '打', '篮', '球']
    assert serial_text3 == ['现', '在', '时', '刻', '2014', '-', '04', '-', '08']
class Calculator:
    def __init__(self, side_length):
        self._serializer = Serializer(side_length)
        self._file_name = "serializer.bin"

    @property
    def serializer(self):
        return self._serializer

    def calculate_sum_area(self):
        sum_areas = 0
        for figure in self._serializer.figures_list:
            figure.calc_area()
            sum_areas += figure.area
        self._serializer.sum_areas = round(sum_areas)

    def save(self):
        with open(self._file_name, "wb") as file:
            file.write(self._serializer.serialize())
            file.flush()
            file.close()

    def restore(self):
        with open(self._file_name, "rb") as file:
            self._serializer = Serializer.deserialize(file.read())
            file.close()

    def show(self):
        self._serializer.print_objects()

    def update_figures(self, side_length):
        self._serializer.update_figures(side_length)
        if self._serializer.sum_areas != 0:
            self.calculate_sum_area()

    @staticmethod
    def binary_to_integer(binary_value):
        """
        This function converts binary value as a string to decimal integer value
        :param binary_value: string binary value
        :return: decimal integer value
        """
        integer_value = 0
        binary_index = 0
        for char in binary_value[::-1]:
            if char == '1':
                integer_value += pow(2, binary_index)
            binary_index += 1

        return int(integer_value)
Beispiel #30
0
 def __init__(self, device, dataset, verbose, save, save_aux):
     self.dataset_name = dataset
     self.train_loader, self.valid_loader = get_data(dataset=dataset)
     self.device = device
     self.serializer = Serializer()
     self.target = 'reconstruction'
     self.trainer_basis = None
     self.writer = None
     self.verbose = verbose
     self.save = save
     self.save_aux = save_aux
     self.model_name = None
     self.config_dict = None
     self.transformer = None
Beispiel #31
0
    def check_serialized_name(self, name, expected_indexes_with_ones):
        serializer = Serializer(['Germany', 'France'])
        serialized_example = serializer.serialize_example(name)

        if expected_indexes_with_ones is None:
            self.assertIsNone(serialized_example)
        else:
            self.assertEqual(len(expected_indexes_with_ones), len(serialized_example))

            for i in range(len(expected_indexes_with_ones)):
                serialized_char = serialized_example[i]

                self.assertEqual(sum(serialized_char), 1.0)
                self.assertEqual(serialized_char[expected_indexes_with_ones[i]], 1)
Beispiel #32
0
def record_voice():
    # globals
    FORMAT = pyaudio.paInt16
    CHANNELS = 1
    RATE = 44100
    CHUNK = 1024
    RECORD_SECONDS = 3
    button = Button(27)
    led = LED(18)

    # start recording
    audio = pyaudio.PyAudio()
    stream = audio.open(format=FORMAT,
                        channels=CHANNELS,
                        rate=RATE,
                        input=True,
                        frames_per_buffer=CHUNK)
    # print("recording...")
    frames = []

    # record for RECORD_SECONDS
    button.wait_for_press()
    led.on()
    while button.is_pressed:
        # for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
        data = stream.read(CHUNK, exception_on_overflow=False)
        frames.append(data)
    led.off()
    # print("finished recording")

    # stop Recording
    stream.stop_stream()
    stream.close()
    audio.terminate()

    # write your new .wav file with built in Python 3 Wave module
    waveFile = wave.open("out.wav", 'wb')
    waveFile.setnchannels(CHANNELS)
    waveFile.setsampwidth(audio.get_sample_size(FORMAT))
    waveFile.setframerate(RATE)
    waveFile.writeframes(b''.join(frames))
    waveFile.close()

    # stt
    s = Serializer()
    with open("out.wav", "rb") as fin:
        r = requests.post(url="http://192.168.1.172:8000/transcribe",
                          data={"audio": s.serialize(fin.read())})
    print(r.json())
    return r.json()["msg"]
Beispiel #33
0
class RPCStub(object):
    def __init__(self, uri, stub, namespace=None):
        self.uri = uri
        self.stub = stub
        self.namespace = namespace
        self._seril = Serializer(namespace)
        self.logger = None
        
    def process(self, method, data):
        ret = exception = None
        try:
            args, kw = self._seril.deserialize(data)
            try:
                self.logger and self.logger.info(u"method:%s, args:%s, kw:%s" % (method,
                                                args, kw))
            except Exception, e:
                #@todo:  fix the decode problem.
                self.logger and self.logger.info(str(e))
                
            
            h = self._local_service(self.stub, method)
            if h is None: raise RPCException(u"Not found interface '%s'" % method)
            
            ret = h(*args, **kw)
            
            self.logger and self.logger.info("return:%s" % (ret, ))
        except BaseException, e:
            exception = e
            self.logger and self.logger.exception(e)
Beispiel #34
0
class NodeMaker:
    def __init__(self):
        self.ser = Serializer()
        self.words = defaultdict(int)
        self.graph = Graph()
    def PosNo(self, morph):
        for i, p in enumerate([u'形容詞', u'名詞']):
            if p in morph.pos():
                return i+2
        return 0
    def regist(self, text):
        lines = text.split('\n')
        lst = []
        for lnum, line in enumerate(lines):
            morphs = wakachi.parse(text)
            for morph in morphs:
                if self.PosNo(morph):
                    lst.append(morph)
                    self.words[(morph.posid, morph.original)] += 1
                else:
                    lst.append(None)
            lst += [None]*5
            if line == '':
                self.consume(lst)
                lst = []
        self.consume(lst)
    def consume(self, lst, back=3, fore=10): #0:N, 1:V, 2:Y
        size = len(lst)
        for i in xrange(size):
            if lst[i] is None: continue
            posno = self.PosNo(lst[i])
            node = []
            for x in xrange(posno):
                node.append(self.ser.encode((lst[i].posid, lst[i].original(), x)))
                self.graph.registerNode(node[x])
            #for node = V
            for j in xrange(max(0,i-fore), min(size,i+back)):
                if lst[j] is None or self.PosNo(lst[j]) == 2: continue
                ny = self.ser.encode((lst[j].posid, lst[j].original(), 2))
                self.graph.addEdge(node[1], ny)
            #for node = Y
            if posno == 3:
                for j in xrange(max(0,i-back), min(size,i+fore)):
                    if lst[j] is None: continue
                    nv = self.ser.encode((lst[j].posid, lst[j].original(), 1))
                    self.graph.addEdge(node[2],nv)
Beispiel #35
0
 def __init__(self, problem, config):
     self.abs_path = os.path.dirname(os.path.abspath(__file__))
     self.serializer = Serializer()
     self.config_file = config
     self.config = self.serializer.read_config(self.abs_path + "/" + config)        
     self.ik_solution_generator = IKSolutionGenerator()        
     self.clear_stats(problem)
     self.run(problem)
    def _fetch_metadata_by_json(self, json_obj):
        """ parses incoming json object representation and fetches related 
        object metadata from the server. Returns None or the Metadata object.

        This method is called only when core object is not found in cache, and 
        if metadata should be requested anyway, it could be done faster with 
        this method that uses GET /neo/<obj_type>/198272/metadata/

        Currently not used, because inside the pull function (in cascade mode) 
        it may be faster to collect all metadata after the whole object tree is 
        fetched. For situations, when, say, dozens of of objects tagged with the
        same value are requested, it's faster to fetch this value once at the 
        end rather than requesting related metadata for every object.
        """
        if not json_obj['fields'].has_key('metadata') or \
            not json_obj['fields']['metadata']:
            return None # no metadata field or empty metadata

        url = json_obj['permalink']

        # TODO move requests to the remote class
        resp = requests.get( url + 'metadata' , cookies=self._meta.cookie_jar )
        raw_json = get_json_from_response( resp )

        if not resp.status_code == 200:
            message = '%s (%s)' % (raw_json['message'], raw_json['details'])
            raise errors.error_codes[resp.status_code]( message )

        if not raw_json['metadata']: # if no objects exist return empty result
            return None

        mobj = Metadata()
        for p, v in raw_json['metadata']:
            prp = Serializer.deserialize(p, self)
            val = Serializer.deserialize(v, self)
            prp.append( val )

            # save both objects to cache
            self._cache.add_object( prp )
            self._cache.add_object( val )

            setattr( mobj, prp.name, prp )

        return mobj # Metadata object with list of properties (tags)
Beispiel #37
0
 def __init__(self, uri, method=None, namespaces=None, sid=''):
     self.__serviceURL = uri
     self.__serviceName = method
     self.namespaces = isinstance(namespaces, ClassLoader) and namespaces or \
                           ClassLoader(namespaces or [])
     self._seril = Serializer(self.namespaces)
     self.sid = sid
     self.logger = None
     self.start_call_listener = []
     self.end_call_listener = []
Beispiel #38
0
 def add(self, product_id, unit_cost, quantity=1, **extra_data_dict):
     """
         Returns True if the addition of the product of the given product_id and unit_cost with given quantity 
         is succesful else False.
         Can also add extra details in the form of dictionary.
     """
     product_dict = self.__product_dict(
         unit_cost, quantity, extra_data_dict)
     self.redis_connection.hset(
         self.user_redis_key, product_id, Serializer.dumps(product_dict))
     self.user_cart_exists = self.cart_exists(self.user_id)
     self.set_ttl()
Beispiel #39
0
 def get_product(self, product_id):
     """
         Returns the cart details as a Dictionary for the given product_id
     """
     if self.user_cart_exists:
         product_string = self.redis_connection.hget(
             self.user_redis_key, product_id)
         if product_string:
             return Serializer.loads(product_string)
         else:
             return {}
     else:
         raise ErrorMessage("The user cart is Empty")
Beispiel #40
0
    def write(self, buf, offset=0):
        """
        Updates object information 

        :param buf:     a YAML representation of an object.
        :type buf:      str

        :return:        0 on success or and negative error code.
        """
        try:
            new = Serializer.deserialize(self.model_instance.__class__, buf)
            new.id = self.model_instance.id

        except Exception, e:
            return -1 # TODO find a way to handle expceptions better..
Beispiel #41
0
 def __init__(self, model, **kwargs):
     self.model = model
     self.fields = kwargs.get("fields") or self.model._meta.get_all_field_names()
     self.name = kwargs.get("name")
     self.parent_name = kwargs.get("parent_name", False)
     if self.parent_name:
         self.parent_id = self.parent_name + "_id"
     else:
         self.parent_id = ""
     self.id = self.name + "_id"
     self.set_name = self.name + "_set"
     self.relations = kwargs.get("relations") or [
         rel.get_accessor_name() for rel in model._meta.get_all_related_objects()
     ]
     self.parent_model = kwargs.get("parent_model")
     self.serialize = Serializer(self.fields)
    def __push_data_from_obj(self, obj):
        """ saves array data to disk in HDF5s and uploads new datafiles to the 
        server according to the arrays of the given obj. Saves datafile objects 
        to cache """
        data_refs = {} # collects all references to the related data - output
        model_name = self._meta.get_type_by_obj( obj )

        data_attrs = self._meta.get_array_attr_names( model_name )
        attrs_to_sync = self._cache.detect_changed_data_fields( obj )

        for attr in data_attrs: # attr is like 'times', 'signal' etc.

            arr = None
            if attr in attrs_to_sync:
                # 1. get current array and units
                fname = self._meta.app_definitions[model_name]['data_fields'][attr][2]
                if fname == 'self':
                    arr = obj # some NEO objects like signal inherit array
                else:
                    arr = getattr(obj, fname)

            if not type(arr) == type(None): # because of NEO __eq__
                units = Serializer.parse_units(arr)

                datapath = self._cache.save_data(arr)
                json_obj = self._remote.save_data(datapath)

                # update cache data map
                datalink = json_obj['permalink']
                fid = str(get_id_from_permalink( datalink ))

                folder, tempname = os.path.split(datapath)
                new_path = os.path.join(folder, fid + tempname[tempname.find('.'):])
                os.rename(datapath, new_path)
                self._cache.update_data_map(fid, new_path)

                data_refs[ attr ] = {'data': datalink, 'units': units}

            else:
                data_refs[ attr ] = None

        return data_refs
def fix_peewee_obj(obj):
    """
    Serializes a single peewee object
    """
    ser = Serializer()
    return ser.serialize_object(obj)
Beispiel #44
0
    def __init__(self, selfNodeAddr, otherNodesAddrs, conf=None):

        if conf is None:
            self.__conf = SyncObjConf()
        else:
            self.__conf = conf

        self.__selfNodeAddr = selfNodeAddr
        self.__otherNodesAddrs = otherNodesAddrs
        self.__unknownConnections = {}  # descr => _Connection
        self.__raftState = _RAFT_STATE.FOLLOWER
        self.__raftCurrentTerm = 0
        self.__votesCount = 0
        self.__raftLeader = None
        self.__raftElectionDeadline = time.time() + self.__generateRaftTimeout()
        self.__raftLog = []  # (command, logID, term)
        self.__raftLog.append((None, 1, self.__raftCurrentTerm))
        self.__raftCommitIndex = 1
        self.__raftLastApplied = 1
        self.__raftNextIndex = {}
        self.__raftMatchIndex = {}
        self.__lastSerializedTime = time.time()
        self.__forceLogCompaction = False
        self.__socket = None
        self.__resolver = DnsCachingResolver(self.__conf.dnsCacheTime, self.__conf.dnsFailCacheTime)
        self.__serializer = Serializer(self.__conf.fullDumpFile, self.__conf.logCompactionBatchSize)
        self.__poller = createPoller()
        self.__isInitialized = False
        self.__lastInitTryTime = 0

        self._methodToID = {}
        self._idToMethod = {}
        methods = sorted([m for m in dir(self) if callable(getattr(self, m))])
        for i, method in enumerate(methods):
            self._methodToID[method] = i
            self._idToMethod[i] = getattr(self, method)

        self.__thread = None
        self.__mainThread = None
        self.__initialised = None
        self.__commandsQueue = Queue.Queue(self.__conf.commandsQueueSize)
        self.__nodes = []
        self.__newAppendEntriesTime = 0

        self.__commandsWaitingCommit = collections.defaultdict(list)  # logID => [(termID, callback), ...]
        self.__commandsLocalCounter = 0
        self.__commandsWaitingReply = {}  # commandLocalCounter => callback

        self.__properies = set()
        for key in self.__dict__:
            self.__properies.add(key)

        if self.__conf.autoTick:
            self.__mainThread = threading.current_thread()
            self.__initialised = threading.Event()
            self.__thread = threading.Thread(target=SyncObj._autoTickThread, args=(weakref.proxy(self),))
            self.__thread.start()
            while not self.__initialised.is_set():
                pass
        else:
            self.__initInTickThread()
Beispiel #45
0
 def __init__(self, uri, stub, namespace=None):
     self.uri = uri
     self.stub = stub
     self.namespace = namespace
     self._seril = Serializer(namespace)
     self.logger = None
Beispiel #46
0
 def __init__(self):
     Serializer.__init__(self)
Beispiel #47
0
 def __init__(self, save):
     logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
     if not os.path.isdir("stats"):
         os.makedirs("stats")      
     self.save = save
     self.utils = Utils()
     logging.info("Loading cartesian coordinates...") 
     serializer = Serializer()
     
     rave_env = Environment()
     f = "stats/model/test.urdf"
     rave_env.initializeEnvironment(f)
     self.link_dimensions = rave_env.getRobotLinkDimensions()
     rave_env.destroy()
     
     self.setup_kinematics(serializer, "stats")
     self.process_covariances = []
     for logfile in glob.glob("stats" + "/*.log"):
         self.process_covariances.append(serializer.read_process_covariance(logfile))
     #arr = serializer.deserialize_joint_angles(path="stats", file="state_path1.txt")        
     #self.plot_state_path(serializer, arr)
     #sleep()
     
     self.create_video(serializer, "stats")
     logging.info("plotting paths")  
     try:  
         self.plot_paths(serializer)
     except:
         logging.warn("Paths could not be plotted")        
     logging.info("Reading config") 
     config = serializer.read_config("config.yaml", path="stats")
         
     #try:
     #self.plot_end_effector_paths(serializer, plot_scenery=True, plot_manipulator=True)
     #except:
     #    print "Error. End effector paths could not be plotted"
     
     logging.info("Plotting mean planning times")
     self.plot_mean_planning_times(serializer, 
                                   dir="stats", 
                                   filename="mean_planning_times_per_step*.yaml", 
                                   output="mean_planning_times_per_step.pdf")
     self.plot_mean_planning_times(serializer, 
                                   dir="stats", 
                                   filename="mean_planning_times_per_run*.yaml", 
                                   output="mean_planning_times_per_run.pdf")
     
     self.plot_particles(serializer, particle_limit=config['particle_plot_limit'])
     
     try:
         self.plot_paths(serializer, best_paths=True)
     except:
         logging.warn("Best_paths could not be plotted")
         
     try:
         cart_coords = serializer.load_cartesian_coords(path="stats")
     except:
         logging.warn("Cartesian_coords could not be plotted")
     
     logging.info("Plotting average distance to goal" )       
     try:
         self.plot_average_dist_to_goal(serializer, cart_coords)
     except:
         logging.warn("Average distance to goal could not be plotted")
     
     logging.info("plotting mean rewards")
     try:
         self.plot_mean_rewards(serializer)
     except Exception as e:
         logging.warn("Mean rewards could not be plotted: " + str(e))       
     
     logging.info("Plotting EMD graph...")       
     try:
         self.plot_emd_graph(serializer, cart_coords) 
     except:
         logging.warn("EMD graphs could not be plotted")
     
     logging.info("Plotting histograms...") 
     try:            
         self.save_histogram_plots(serializer, cart_coords) 
     except:
         logging.warn("Histograms could not be plotted")  
Beispiel #48
0
 def read(self, size=-1, offset=0):
     """
     Returns an attached object representation as YAML file (bytestring).
     """
     return Serializer.serialize(self.model_instance) # binary?
Beispiel #49
0
class LQG:
    def __init__(self, show_scene, deserialize, append_paths):
        self.abs_path = os.path.dirname(os.path.abspath(__file__))
        """ Reading the config """
        warnings.filterwarnings("ignore")
        self.init_serializer()
        config = self.serializer.read_config("config_lqg.yaml", path=self.abs_path)
        self.set_params(config)
        if self.seed < 0:
            """
            Generate a random seed that will be stored
            """
            self.seed = np.random.randint(0, 4294967)
        np.random.seed(self.seed)  
        
        logging_level = logging.WARN
        if config['verbose']:
            logging_level = logging.DEBUG
        logging.basicConfig(format='%(levelname)s: %(message)s', level=logging_level)        
        np.set_printoptions(precision=16)
        dir = self.abs_path + "/stats/lqg"
        tmp_dir = self.abs_path + "/tmp/lqg"        
        self.utils = Utils()
        if not self.init_robot(self.robot_file):
            logging.error("LQG: Couldn't initialize robot")
            return               
        if not self.setup_scene(self.environment_file, self.robot):
            return
        #self.show_state_distribution(urdf_model_file, environment_file)
        #self.check_continuous_collide(self.robot_file, self.environment_file)
        if show_scene:
            self.test_sensor(self.robot_file, self.environment_file, self.abs_path + "/model/sensor.xml")            
            self.run_viewer(self.robot_file, self.environment_file)
        self.clear_stats(dir)
        logging.info("Start up simulator")
        sim = Simulator()
        path_evaluator = PathEvaluator()
        path_planner = PathPlanningInterface()
        
                
        logging.info("LQG: Generating goal states...")        
        goal_states = get_goal_states("lqg",
                                      self.abs_path,
                                      self.serializer, 
                                      self.obstacles,                                                                           
                                      self.robot,                                    
                                      self.max_velocity,
                                      self.delta_t,
                                      self.start_state,
                                      self.goal_position,
                                      self.goal_radius,
                                      self.planning_algortihm,
                                      self.path_timeout,
                                      self.num_generated_goal_states,
                                      self.continuous_collision,
                                      self.environment_file,
                                      self.num_cores)  
        if len(goal_states) == 0:
            logging.error("LQG: Couldn't generate any goal states. Problem seems to be infeasible")
            return
        logging.info("LQG: Generated " + str(len(goal_states)) + " goal states")         
        sim.setup_reward_function(self.discount_factor, self.step_penalty, self.illegal_move_penalty, self.exit_reward)  
        path_planner.setup(self.robot,                           
                           self.obstacles,  
                           self.max_velocity, 
                           self.delta_t, 
                           self.use_linear_path,
                           self.planning_algortihm,
                           self.path_timeout,
                           self.continuous_collision,
                           self.num_cores)
        
        if self.dynamic_problem:
            path_planner.setup_dynamic_problem(self.robot_file,
                                               self.environment_file,
                                               self.simulation_step_size,
                                               self.num_control_samples,
                                               self.min_control_duration,
                                               self.max_control_duration,
                                               self.add_intermediate_states,
                                               self.rrt_goal_bias,
                                               self.control_sampler)       
        path_planner.set_start_and_goal(self.start_state, goal_states, self.goal_position, self.goal_radius)         
        A, H, B, V, W, C, D, M_base, N_base = self.problem_setup(self.delta_t, self.robot_dof)
        time_to_generate_paths = 0.0
        if check_positive_definite([C, D]):
            m_covs = None
            if self.inc_covariance == "process":
                m_covs = np.linspace(self.min_process_covariance, 
                                     self.max_process_covariance, 
                                     self.covariance_steps)                 
            elif self.inc_covariance == "observation":          
                m_covs = np.linspace(self.min_observation_covariance, 
                                     self.max_observation_covariance,
                                     self.covariance_steps)            
            emds = []
            mean_planning_times = []
            time_to_generate_paths = 0.0            
            paths = []
            if ((not append_paths) and deserialize):
                paths = self.serializer.deserialize_paths(self.abs_path + "/paths.txt", self.robot_dof)
                #paths = [paths[26]]
            if len(paths) == 0:
                print "LQG: Generating " + str(self.num_paths) + " paths from the inital state to the goal position..."
                t0 = time.time()
                paths = path_planner.plan_paths(self.num_paths, 0)
                if len(paths) == 0:
                    logging.error("LQG: Couldn't create any paths within the given time.")
                    return
                time_to_generate_paths = time.time() - t0 
                print "LQG: Time to generate paths: " + str(time_to_generate_paths) + " seconds"
                if self.plot_paths:                
                    self.serializer.save_paths(paths, "paths.yaml", self.overwrite_paths_file, path=dir)                
                if deserialize or append_paths:                    
                    self.serializer.serialize_paths(self.abs_path + "/paths.txt", paths, append_paths, self.robot_dof)                    
                    paths = self.serializer.deserialize_paths(self.abs_path + "/paths.txt", self.robot_dof)
            
            """ Determine average path length """
            avg_path_length = self.get_avg_path_length(paths)            
            self.serializer.save_avg_path_lengths(avg_path_length, path=dir)                                       
            cart_coords = []  
            best_paths = []
            all_rewards = []
            successes = []            
                                           
            for j in xrange(len(m_covs)):
                print "LQG: Evaluating " + str(len(paths)) + " paths for covariance value " + str(m_covs[j]) + "..."
                M = None
                N = None
                if self.inc_covariance == "process":
                    """ The process noise covariance matrix """
                    M = self.calc_covariance_value(self.robot, m_covs[j], M_base)
                    #M = m_covs[j] * M_base
                    
                    """ The observation error covariance matrix """
                    N = self.calc_covariance_value(self.robot, 
                                                   self.min_observation_covariance, 
                                                   N_base, 
                                                   covariance_type='observation')                    
                elif self.inc_covariance == "observation":
                    M = self.calc_covariance_value(self.robot, 
                                                   self.min_process_covariance,
                                                   M_base)
                    N = self.calc_covariance_value(self.robot, 
                                                   m_covs[j],
                                                   N_base, 
                                                   covariance_type='observation')                    
                P_t = np.array([[0.0 for k in xrange(2 * self.robot_dof)] for l in xrange(2 * self.robot_dof)]) 
                path_evaluator.setup(A, B, C, D, H, M, N, V, W,                                     
                                     self.robot, 
                                     self.sample_size, 
                                     self.obstacles,
                                     self.goal_position,
                                     self.goal_radius,
                                     self.show_viewer_evaluation,
                                     self.robot_file,
                                     self.environment_file,
                                     self.num_cores)
                if self.dynamic_problem:
                    path_evaluator.setup_dynamic_problem()
                path_evaluator.setup_reward_function(self.step_penalty, self.illegal_move_penalty, self.exit_reward, self.discount_factor)
                t0 = time.time()
                 
                (path_index,
                 xs, 
                 us, 
                 zs, 
                 control_durations, 
                 objective, 
                 state_covariances,
                 deviation_covariances,
                 estimated_deviation_covariances) = path_evaluator.evaluate_paths(paths, P_t, 0)
                 
                te = time.time() - t0
                print "LQG: Time to evaluate " + str(len(paths)) + " paths: " + str(te) + "s"
                mean_planning_time = time_to_generate_paths + te
                print "LQG: Best objective value: " + str(objective)
                print "LQG: Length of best path: " + str(len(xs))
                print "LQG: Best path has index " + str(path_index)  
                best_paths.append([[xs[i] for i in xrange(len(xs))], 
                                   [us[i] for i in xrange(len(us))],
                                   [zs[i] for i in xrange(len(zs))],
                                   [control_durations[i] for i in xrange(len(control_durations))]])
                
                sim.setup_problem(A, B, C, D, H, V, W, M, N,
                                  self.robot, 
                                  self.enforce_control_constraints,
                                  self.obstacles, 
                                  self.goal_position, 
                                  self.goal_radius,
                                  self.max_velocity,                                  
                                  self.show_viewer_simulation,
                                  self.robot_file,
                                  self.environment_file)
                if self.dynamic_problem:
                    sim.setup_dynamic_problem(self.simulation_step_size)                
                successes = 0
                num_collisions = 0 
                rewards_cov = []
                final_states= []
                num_steps = 0
                collided_num = 0
                print "LQG: Running " + str(self.num_simulation_runs) + " simulations..."              
                for k in xrange(self.num_simulation_runs):
                    self.serializer.write_line("log.log", tmp_dir, "RUN #" + str(k + 1) + " \n")
                    print "simulation run: " + str(k)
                    (x_true, 
                     x_tilde,
                     x_tilde_linear, 
                     x_estimate, 
                     P_t, 
                     current_step, 
                     total_reward,
                     terminal,
                     estimated_s,
                     estimated_c,                     
                     history_entries) = sim.simulate_n_steps(xs, us, zs,                                                             
                                                             control_durations,
                                                             xs[0],
                                                             np.array([0.0 for i in xrange(2 * self.robot_dof)]),
                                                             np.array([0.0 for i in xrange(2 * self.robot_dof)]),
                                                             xs[0],
                                                             np.array([[0.0 for k in xrange(2 * self.robot_dof)] for l in xrange(2 * self.robot_dof)]),
                                                             0.0,                                                           
                                                             0,
                                                             len(xs) - 1,
                                                             deviation_covariances,
                                                             estimated_deviation_covariances)
                    if terminal:
                        successes += 1
                    rewards_cov.append(total_reward)
                    #n, min_max, mean, var, skew, kurt = scipy.stats.describe(np.array(rewards_cov))                    
                    collided = False
                    for l in xrange(len(history_entries)):
                        history_entries[l].set_estimated_covariance(state_covariances[l])                        
                        history_entries[l].serialize(tmp_dir, "log.log")
                        if history_entries[l].collided:                            
                            num_collisions += 1
                            collided = True                        
                    if collided:
                        collided_num += 1
                    num_steps += history_entries[-1].t
                    final_states.append(history_entries[-1].x_true)                                         
                    self.serializer.write_line("log.log", tmp_dir, "Reward: " + str(total_reward) + " \n") 
                    self.serializer.write_line("log.log", tmp_dir, "\n")
                    
                """ Calculate the distance to goal area
                """  
                ee_position_distances = [] 
                for state in final_states:
                    joint_angles = v_double()
                    joint_angles[:] = [state[y] for y in xrange(len(state) / 2)]
                    ee_position = v_double()
                    self.robot.getEndEffectorPosition(joint_angles, ee_position)
                    ee_position = np.array([ee_position[y] for y in xrange(len(ee_position))])
                    dist = np.linalg.norm(np.array(self.goal_position - ee_position))
                    if dist < self.goal_radius:
                        dist = 0.0
                    ee_position_distances.append(dist)
                n, min_max, mean_distance_to_goal, var, skew, kurt = scipy.stats.describe(np.array(ee_position_distances))                         
                
                self.serializer.write_line("log.log", tmp_dir, "################################# \n")
                self.serializer.write_line("log.log",
                                           tmp_dir,
                                           "inc_covariance: " + str(self.inc_covariance) + "\n")
                if self.inc_covariance == "process":                  
                    self.serializer.write_line("log.log",
                                               tmp_dir,
                                               "Process covariance: " + str(m_covs[j]) + " \n")
                    self.serializer.write_line("log.log",
                                               tmp_dir,
                                               "Observation covariance: " + str(self.min_observation_covariance) + " \n")
                    
                elif self.inc_covariance == "observation":                    
                    self.serializer.write_line("log.log",
                                               tmp_dir,
                                               "Process covariance: " + str(self.min_process_covariance) + " \n")
                    self.serializer.write_line("log.log",
                                               tmp_dir,
                                               "Observation covariance: " + str(m_covs[j]) + " \n")
                    
                num_steps /= self.num_simulation_runs
                self.serializer.write_line("log.log", tmp_dir, "Mean number of steps: " + str(num_steps) + " \n")
                self.serializer.write_line("log.log", tmp_dir, "Objective value of best path: " + str(objective) + " \n")                
                self.serializer.write_line("log.log", tmp_dir, "Mean num collisions per run: " + str(float(num_collisions) / float(self.num_simulation_runs)) + " \n")
                print "collision_prob " + str(collided_num / float(self.num_simulation_runs))
                print "total num collisions " + str(num_collisions)    
                print "mean num collisions " + str(float(num_collisions) / float(self.num_simulation_runs))
                
                self.serializer.write_line("log.log", tmp_dir, "Length best path: " + str(len(xs)) + " \n")
                self.serializer.write_line("log.log", tmp_dir, "Index of best path: " + str(path_index) + " \n")
                self.serializer.write_line("log.log", 
                                           tmp_dir, 
                                           "Average distance to goal area: " + str(mean_distance_to_goal) + " \n")
                self.serializer.write_line("log.log", tmp_dir, "Num successes: " + str(successes) + " \n")
                print "succ " + str((100.0 / self.num_simulation_runs) * successes)
                self.serializer.write_line("log.log", tmp_dir, "Percentage of successful runs: " + str((100.0 / self.num_simulation_runs) * successes) + " \n")
                self.serializer.write_line("log.log", tmp_dir, "Mean planning time: " + str(mean_planning_time) + " \n")
                
                n, min_max, mean, var, skew, kurt = scipy.stats.describe(np.array(rewards_cov))
                print "mean_rewards " + str(mean)
                #plt.plot_histogram_from_data(rewards_cov)
                #sleep
                self.serializer.write_line("log.log", tmp_dir, "Mean rewards: " + str(mean) + " \n")
                self.serializer.write_line("log.log", tmp_dir, "Reward variance: " + str(var) + " \n")
                self.serializer.write_line("log.log", 
                                      tmp_dir, 
                                      "Reward standard deviation: " + str(np.sqrt(var)) + " \n")
                self.serializer.write_line("log.log", tmp_dir, "Seed: " + str(self.seed) + " \n")
                cmd = "mv " + tmp_dir + "/log.log " + dir + "/log_lqg_" + str(m_covs[j]) + ".log"
                os.system(cmd)
                
            
            if self.plot_paths:
                self.serializer.save_paths(best_paths, 'best_paths.yaml', True, path=dir)                      
            #self.serializer.save_stats(stats, path=dir)
            
            cmd = "cp " + self.abs_path + "/config_lqg.yaml " + dir           
            os.system(cmd)
            
            if not os.path.exists(dir + "/environment"):
                os.makedirs(dir + "/environment") 
            
            cmd = "cp " + self.abs_path + "/" + str(self.environment_file) + " " + str(dir) + "/environment"
            os.system(cmd) 
            
            if not os.path.exists(dir + "/model"):
                os.makedirs(dir + "/model")
                
            cmd = "cp " + self.abs_path + "/" + self.robot_file + " " + dir + "/model"
            os.system(cmd)
        print "LQG: Time to generate paths: " + str(time_to_generate_paths) + " seconds"                   
        print "Done"
        
    def calc_covariance_value(self, robot, error, covariance_matrix, covariance_type='process'):
        active_joints = v_string()
        robot.getActiveJoints(active_joints)        
        if covariance_type == 'process':
            if self.dynamic_problem:            
                torque_limits = v_double()
                robot.getJointTorqueLimits(active_joints, torque_limits)
                torque_limits = [torque_limits[i] for i in xrange(len(torque_limits))]
                for i in xrange(len(torque_limits)):
                    torque_range = 2.0 * torque_limits[i]
                    covariance_matrix[i, i] = np.square((torque_range / 100.0) * error)
            else:
                for i in xrange(self.robot_dof):
                    covariance_matrix[i, i] = np.square((self.max_velocity / 100.0) * error)
        else:
            lower_position_limits = v_double()
            upper_position_limits = v_double()
            velocity_limits = v_double()            
            robot.getJointLowerPositionLimits(active_joints, lower_position_limits)
            robot.getJointUpperPositionLimits(active_joints, upper_position_limits)
            robot.getJointVelocityLimits(active_joints, velocity_limits)            
            for i in xrange(self.robot_dof):
                position_range = upper_position_limits[i] - lower_position_limits[i]
                covariance_matrix[i, i] = np.square((position_range / 100.0) * error)            
                velocity_range = 2.0 * velocity_limits[i]
                covariance_matrix[i + self.robot_dof, i + self.robot_dof] = np.square((velocity_range / 100.0) * error)          
        return covariance_matrix
        
        
    def init_robot(self, urdf_model_file):
        self.robot = Robot(self.abs_path + "/" + urdf_model_file)
        self.robot.enforceConstraints(self.enforce_constraints)
        self.robot.setGravityConstant(self.gravity_constant)
        self.robot.setAccelerationLimit(self.acceleration_limit)
        """ Setup operations """
        self.robot_dof = self.robot.getDOF()        
        if len(self.start_state) != 2 * self.robot_dof:
            logging.error("LQG: Start state dimension doesn't fit to the robot state space dimension")
            return False
        return True
        
    """
    Analyzing functions (will be removed later)
    =====================================================================
    """
           
           
    def sample_control_error(self, M):
        mu = np.array([0.0 for i in xrange(2 * self.robot_dof)])
        return np.random.multivariate_normal(mu, M)
    
    def show_state_distribution(self, model_file, env_file):
        self.robot.setupViewer(model_file, env_file)       
        M = 30.0 * np.identity(2 * self.robot_dof)
        active_joints = v_string()
        self.robot.getActiveJoints(active_joints)
        self.robot_dof = len(active_joints)
        
        #x = [0.0, -np.pi / 2.0, 0.0, 0.0, 0.0, 0.0]
        states = []
        for z in xrange(2000):
            u = [70.0, 70.0, 70.0, 0.0, 0.0, 0.0]
            current_state = v_double()
            current_state[:] = x
            control = v_double()
            control[:] = u            
            control_error = v_double()
            ce = self.sample_control_error(M)
            #ce = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
            control_error[:] = ce
            x = None
            for k in xrange(1):
                result = v_double()
                self.robot.propagate(current_state,
                                     control,
                                     control_error,
                                     self.simulation_step_size,
                                     self.delta_t,
                                     result)                               
                x = [result[i] for i in xrange(len(result))]
                
                current_state[:] = x
                print x
            states.append(np.array(x[3:6]))
            x = [0.0, -np.pi/ 2.0, 0.0, 0.0, 0.0, 0.0]
            
            cjvals = v_double()
            cjvels = v_double()
            cjvals_arr = [x[i] for i in xrange(len(x) / 2)]
            cjvels_arr = [x[i] for i in xrange(len(x) / 2, len(x))]
            cjvals[:] = cjvals_arr
            cjvels[:] = cjvels_arr
            particle_joint_values = v2_double()
            particle_joint_colors = v2_double()
            self.robot.updateViewerValues(cjvals, 
                                          cjvels,
                                          particle_joint_values,
                                          particle_joint_values)
        from plot import plot_3d_points
        mins = []
        maxs = []
        
        x_min = min([states[i][0] for i in xrange(len(states))])
        x_max = max([states[i][0] for i in xrange(len(states))])
        y_min = min([states[i][1] for i in xrange(len(states))])
        y_max = max([states[i][1] for i in xrange(len(states))])
        z_min = min([states[i][2] for i in xrange(len(states))])
        z_max = max([states[i][2] for i in xrange(len(states))])
        
        scale = [-0.2, 0.2]
        plot_3d_points(np.array(states), 
                       x_scale = [x_min, x_max], 
                       y_scale = [y_min, y_max], 
                       z_scale=  [z_min, z_max])
        sleep
        
    def check_continuous_collide(self, model_file, env_file):
        self.robot.setViewerBackgroundColor(0.6, 0.8, 0.6)
        self.robot.setViewerSize(1280, 768)
        self.robot.setupViewer(model_file, env_file)        
        x1 = [0.0115186,
              0.1404386,
              -0.17235397,
              -0.22907283,
              0.88922755,
              1.35271925,
              -3.38231963,
              -1.51419397]
        x2 = [0.04028825,
              0.18442375,
              -0.25677913,
              -0.33145854,
              0.81281939,
              1.51040733,
              -2.37524124,
              -3.43838968]
        #x1 = [np.pi - np.pi / 4, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        #x1 = [np.pi / 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        #x2 = [np.pi / 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        
        coll_t = 0.0
        k = 0
        while True:
            k += 1            
            cjvals = v_double()
            cjvels = v_double()
            cjvals_arr = [x1[i] for i in xrange(len(x1) / 2)]
            cjvels_arr = [x1[i] for i in xrange(len(x1) / 2, len(x1))]
            cjvals[:] = cjvals_arr
            cjvels[:] = cjvels_arr
            particle_joint_values = v2_double()
            
            ja_start = v_double()
            ja_goal = v_double()
            ja_start[:] = [x1[i] for i in xrange(len(x1) / 2)]
            ja_goal[:] = [x2[i] for i in xrange(len(x2) / 2)]
            collision_objects_start = self.robot.createRobotCollisionObjects(ja_start)
            collision_objects_goal = self.robot.createRobotCollisionObjects(ja_goal)
            in_collision_discrete = False
            in_collision_continuous = False             
            t0 = time.time()
            breaking = False      
            for o in self.obstacles:                
                #in_collision_discrete_start = o.inCollisionDiscrete(collision_objects_start)
                #in_collision_discrete_goal = o.inCollisionDiscrete(collision_objects_goal)
                for i in xrange(len(collision_objects_start)):
                    in_collision_continuous = o.inCollisionContinuous([collision_objects_start[i], collision_objects_goal[i]])
                    if in_collision_continuous:                        
                        break
                if in_collision_continuous:
                    break                              
                '''if in_collision_discrete_start:                    
                    print "start collides discrete"
                    in_collision_discrete = True
                    break
                if in_collision_discrete_goal:
                    print "goal collides discrete"
                    in_collision_discrete = True
                    break
                if in_collision_continuous:
                    print "collides continuous"
                    break                     
            print "in collision discrete " + str(in_collision_discrete)
            print "in collision continuous " + str(in_collision_continuous)'''
            print "in collision continuous " + str(in_collision_continuous)
            coll_t += time.time() - t0
            if k == 10000:
                print coll_t / k
                sleep
            self.robot.updateViewerValues(cjvals, 
                                          cjvels,
                                          particle_joint_values,
                                          particle_joint_values)
            #time.sleep(10)
            time.sleep(0.03)
             
    def test_sensor(self, model_file, env_file, sensor_file):
        show_viewer = True
        if show_viewer:
            self.robot.setViewerBackgroundColor(0.6, 0.8, 0.6)
            self.robot.setViewerSize(1280, 768)
            self.robot.setupViewer(model_file, env_file) 
            #self.robot.addSensor(sensor_file)
        x = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        u = [0.0, 0.0, 0.0]
        u_err = [0.0, 0.0, 0.0]
        
        current_state = v_double()
        control = v_double()
        control[:] = u
        
        control_error = v_double()
        control_error[:] = u_err
        
        result = v_double()
        angles = v_double()
        self.robot.setSensorTransform(current_state)
        while True:
            current_state[:] = x
            self.robot.propagate(current_state,
                                 control,
                                 control_error,
                                 self.simulation_step_size,
                                 0.03,
                                 result)            
            angles[:] = [result[i] for i in xrange(len(result) / 2)]
            self.robot.setSensorTransform(angles) 
            x = np.array([result[i] for i in xrange(len(result))])
            cjvals = v_double()
            cjvels = v_double()
            cjvals_arr = [x[i] for i in xrange(len(x) / 2)]
            cjvels_arr = [x[i] for i in xrange(len(x) / 2, len(x))]
            cjvals[:] = cjvals_arr
            cjvels[:] = cjvels_arr
            particle_joint_values = v2_double()
            if show_viewer:                
                self.robot.updateViewerValues(cjvals, 
                                              cjvels,
                                              particle_joint_values,
                                              particle_joint_values)
            time.sleep(0.03)
            
            print "hello"   
        
    def run_viewer(self, model_file, env_file):        
        show_viewer = True
        rot = v_double()
        trans = v_double()
        rot[:] = [-1.0, 0.0, 0.0, 0.0]
        trans[:] = [0.0, 0.0, 3.0]        
        if show_viewer:
            self.robot.setViewerBackgroundColor(0.6, 0.8, 0.6)
            self.robot.setViewerSize(1280, 768)
            self.robot.setupViewer(model_file, env_file)
        fx = 0.0
        fy = 0.0
        fz = 0.0
        f_roll = 0.0
        f_pitch = 0.0
        f_yaw = 0.0         
        x = [0.0 for i in xrange(2 * self.robot_dof)]
        ''''x = [0.0, 0.0, 0.0, 0.0,
             0.0,
             0.0,
             0.0,
             0.0]'''
        #x = [1.78349, 0.868347, -0.927761]
        x[1] = 0.6 
        x_true = [0.0 for i in xrange(2 * self.robot_dof)]        
        integration_times = [] 
        collision_check_times1 = []
        collision_check_times2 = []     
        current_state = v_double()
        y = 0
        while True:
            u_in = [0.0 for i in xrange(self.robot_dof)]
            u_in[0] = 150.0
                        
            current_state[:] = x            
            control = v_double()            
            control[:] = u_in
            control_error = v_double()
            ce = [0.0 for i in xrange(self.robot_dof)]
            control_error[:] = ce
            result = v_double()
            
            
            ###########################
            ee_jacobian = v2_double()            
            self.robot.getEndEffectorJacobian(current_state, ee_jacobian);            
            ###########################
            t0 = time.time()                    
            self.robot.propagate(current_state,
                                 control,
                                 control_error,
                                 self.simulation_step_size,
                                 0.03,
                                 result) 
            #print result[11]          
            t = time.time() - t0
            integration_times.append(t)
            if y == 1000:
                t_sum = sum(integration_times)
                t_mean = t_sum / len(integration_times)
                print "mean integration times: " + str(t_mean)
                t_sum = sum(collision_check_times1)
                t_mean = t_sum / len(collision_check_times1)
                print "mean collision check times old " + str(t_mean)
                t_mean = sum(collision_check_times2) / len(collision_check_times2)
                print "mean collision check times new " + str(t_mean)
                sleep            
            
            ja_start = v_double()            
            ja_start[:] = [current_state[i] for i in xrange(len(current_state) / 2)]                     
            collision_objects_start = self.robot.createRobotCollisionObjects(ja_start)
            joint_angles = v_double()
            joint_angles[:] = [result[i] for i in xrange(len(result) / 2)]
            collision_objects_goal = self.robot.createRobotCollisionObjects(joint_angles)
            #print "ee_velocity " + str([ee_velocity[i] for i in xrange(len(ee_velocity))])
            t_coll_check1 = time.time()            
            t2 = time.time() - t_coll_check1
            collision_check_times2.append(t2)            
            
            '''
            Get the end effector position
            '''
            #ee_position = v_double()            
            #self.robot.getEndEffectorPosition(joint_angles, ee_position)                      
            #ee_collision_objects = self.robot.createEndEffectorCollisionObject(joint_angles)
            in_collision = False
            t_c = time.time()            
            for o in self.obstacles:                
                if o.inCollisionDiscrete(collision_objects_goal):                                        
                    in_collision = True
                    print "IN COLLISION"
                    break                              
                '''for i in xrange(len(collision_objects_start)):                        
                    if o.inCollisionContinuous([collision_objects_start[i], collision_objects_goal[i]]):
                        in_collision = True
                        break'''               
                if in_collision:
                    break
                    
            t = time.time() - t_c            
            collision_check_times1.append(t)          
                                
                                                                          
            x = np.array([result[i] for i in xrange(len(result))])
            cjvals = v_double()
            cjvels = v_double()
            cjvals_arr = [x[i] for i in xrange(len(x) / 2)]
            cjvels_arr = [x[i] for i in xrange(len(x) / 2, len(x))]
            cjvals[:] = cjvals_arr
            cjvels[:] = cjvels_arr
            particle_joint_values = v2_double()
            if show_viewer:
                self.robot.updateViewerValues(cjvals, 
                                              cjvels,
                                              particle_joint_values,
                                              particle_joint_values)
            #time.sleep(0.03)
            time.sleep(0.03) 
            
            y += 1
            print y
            
    """
    ================================================================
    """
        
        
    def setup_scene(self,                    
                    environment_file,
                    robot):
        """ Load the obstacles """         
        self.obstacles = self.utils.loadObstaclesXML(self.abs_path + "/" + environment_file)      
        
        """ Load the goal area """
        goal_area = v_double()
        self.utils.loadGoalArea(self.abs_path + "/" + environment_file, goal_area)
        if len(goal_area) == 0:
            print "ERROR: Your environment file doesn't define a goal area"
            return False
        self.goal_position = [goal_area[i] for i in xrange(0, 3)]
        self.goal_radius = goal_area[3]
        return True
            
    def init_serializer(self):
        self.serializer = Serializer()
        self.serializer.create_temp_dir(self.abs_path, "lqg")        
            
    def clear_stats(self, dir):        
        if os.path.isdir(dir):
            cmd = "rm -rf " + dir + "/*"            
            os.system(cmd)
        else:
            os.makedirs(dir)
            
    def get_average_distance_to_goal_area(self, goal_position, goal_radius, cartesian_coords):        
        avg_dist = 0.0
        goal_pos = np.array(goal_position)        
        for i in xrange(len(cartesian_coords)):            
            cart = np.array(cartesian_coords[i])            
            dist = np.linalg.norm(goal_pos - cart)            
            if dist < goal_radius:
                dist = 0.0
            avg_dist += dist
        if avg_dist == 0.0:
            return avg_dist        
        return np.asscalar(avg_dist) / len(cartesian_coords)          
            
    def problem_setup(self, delta_t, num_links):
        A = np.identity(num_links * 2)
        H = np.identity(num_links * 2)
        W = np.identity(num_links * 2)
        C = self.path_deviation_cost * np.identity(num_links * 2)
        
        '''B = delta_t * np.identity(num_links * 2)                
        V = np.identity(num_links * 2)
        D = self.control_deviation_cost * np.identity(num_links * 2)        
        M_base = np.identity(2 * self.robot_dof)
        N_base = np.identity(2 * self.robot_dof)'''
        
        B = delta_t * np.vstack((np.identity(num_links),
                                 np.zeros((num_links, num_links))))
        V = np.vstack((np.identity(num_links),
                       np.zeros((num_links, num_links))))
        D = self.control_deviation_cost * np.identity(num_links)
        M_base = np.identity(self.robot_dof)
        N_base = np.identity(2 * self.robot_dof)
        
        
        return A, H, B, V, W, C, D, M_base, N_base
            
    def get_avg_path_length(self, paths):
        avg_length = 0.0
        for path in paths:            
            avg_length += len(path[0])
        return avg_length / len(paths)
        
    def set_params(self, config):        
        self.num_paths = config['num_generated_paths']
        self.use_linear_path = config['use_linear_path']        
        self.max_velocity = config['max_velocity']
        self.delta_t = 1.0 / config['control_rate']
        self.start_state = config['start_state']        
        self.num_simulation_runs = config['num_simulation_runs']
        self.num_bins = config['num_bins']
        self.min_process_covariance = config['min_process_covariance']
        self.max_process_covariance = config['max_process_covariance']
        self.covariance_steps = config['covariance_steps']
        self.min_observation_covariance = config['min_observation_covariance']
        self.max_observation_covariance = config['max_observation_covariance']       
        self.discount_factor = config['discount_factor']
        self.illegal_move_penalty = config['illegal_move_penalty']
        self.step_penalty = config['step_penalty']
        self.exit_reward = config['exit_reward']
        self.stop_when_terminal = config['stop_when_terminal']        
        self.enforce_constraints = config['enforce_constraints']
        self.enforce_control_constraints = config['enforce_control_constraints']
        self.sample_size = config['sample_size']
        self.plot_paths = config['plot_paths']
        self.planning_algortihm = config['planning_algorithm']
        self.dynamic_problem = config['dynamic_problem'] 
        self.simulation_step_size = config['simulation_step_size']        
        self.path_timeout = config['path_timeout'] 
        self.continuous_collision = config['continuous_collision_check']
        self.show_viewer_evaluation = config['show_viewer_evaluation']
        self.show_viewer_simulation = config['show_viewer_simulation']   
        self.path_deviation_cost = config['path_deviation_cost'] 
        self.control_deviation_cost = config['control_deviation_cost']
        self.num_control_samples = config['num_control_samples'] 
        self.min_control_duration = config['min_control_duration']
        self.max_control_duration = config['max_control_duration']   
        self.inc_covariance = config['inc_covariance'] 
        self.add_intermediate_states = config['add_intermediate_states']
        self.gravity_constant = config['gravity']
        self.num_generated_goal_states = config['num_generated_goal_states']
        self.robot_file = config['robot_file']        
        self.environment_file = config['environment_file']
        self.rrt_goal_bias = config['rrt_goal_bias']
        self.control_sampler = config['control_sampler']
        self.seed = config['seed']
        self.num_cores = config['num_cores']
        self.acceleration_limit = config['acceleration_limit']
class TestSerialize(unittest.TestCase):

    @classmethod
    def _clearMsgDefs(self):
        # Clear all non-test messages
        try:
            while True:
                messages.MESSAGES.pop()
        except IndexError:
            pass

    @classmethod
    def setUpClass(self):
        self._clearMsgDefs()
        messages.MESSAGES.append(__import__("test_pb2", globals(), locals(), [], -1))
        self.blank_args = [1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, True, 0, "a", "b", "c", "d"]
        self.neg_args = [-1.0, 0, -1.0, 0, -1, -1, -1, -1, 0, 0, 0, 0, -1, -1, -1, -1, 0, 0, 0, 0, -1, -1, -1, -1, 1, 1, "", "", "", ""]
        self.illegal_len_args = [
                                [0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", ""]
                            ]
        self.too_long_args = [
                                [0, 0, 0, 0, 0, 32768, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 32768, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 32768, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32768, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32768, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32768, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32768, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32768, 0, 0, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32768, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32768, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, -32769, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, -32769, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -32769, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -32769, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -32769, 0, 0, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -32769, 0, 0, "", "", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "aaa", "", ""],
                                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", "", "", "aaa"]
                            ]

    def setUp(self):
        self.s = Serializer()

    def test_id(self):
        enc = self.s.serialize("Test", *self.blank_args)
        (name, ) = struct.unpack_from('20s', enc)

        self.assertTrue(name.startswith('test_pb2Test'))

    def test_values(self):
        enc = self.s.serialize("Test", *self.blank_args)

        def f(obj):
            self.assertTrue(obj.IsInitialized())
            for (_, value) in obj.ListFields():
                self.assertIn(value, self.blank_args)

        self.s.add_handler("Test", f)
        self.s.unserialize(enc)

    def test_explicit_call(self):
        enc = self.s.serialize("test.Test", *self.blank_args)

        def f(obj):
            self.assertTrue(obj.IsInitialized())
            for (_, value) in obj.ListFields():
                self.assertIn(value, self.blank_args)

        self.s.add_handler("Test", f)
        self.s.unserialize(enc)

    def test_negative_values(self):
        enc = self.s.serialize("Test", *self.neg_args)

        def f(obj):
            self.assertTrue(obj.IsInitialized())
            for (_, value) in obj.ListFields():
                self.assertIn(value, self.neg_args)

        self.s.add_handler("Test", f)
        self.s.unserialize(enc)

    def test_illegal_length_option(self):
        for args in self.illegal_len_args:
            with self.assertRaises(FieldLengthUnsupportedException):
                self.s.serialize("Test", *args)

    def test_too_long_option(self):
        for args in self.too_long_args:
            with self.assertRaises(FieldTooLongException):
                self.s.serialize("Test", *args)

    def test_unknown_message(self):
        with self.assertRaises(UnknownMessageException):
            self.s.serialize("Unknown")

    def test_unknown_message_explicit(self):
        with self.assertRaises(UnknownMessageException):
            self.s.serialize("test.Unknown")

    def test_unknown_message_explicit2(self):
        with self.assertRaises(UnknownMessageException):
            self.s.serialize("t.Unknown")

    def test_incomplete_definition_empty(self):
        with self.assertRaises(FieldNotDefinedException):
            self.s.serialize("Test")

    def test_incomplete_definition_unnamed(self):
        with self.assertRaises(FieldNotDefinedException):
            self.s.serialize("Test", 0)

    def test_incomplete_definition_named(self):
        with self.assertRaises(FieldNotDefinedException):
            self.s.serialize("Test", boolnolen = 0)

    def test_incomplete_definition_hybrid(self):
        with self.assertRaises(FieldNotDefinedException):
            self.s.serialize("Test", 0, boolnolen = 0)

    def test_wrong_type(self):
        args = self.blank_args[:]
        args[0] = ""
        with self.assertRaises(FieldWrongTypeException):
            self.s.serialize("Test", *args)

    def test_wrong_type_value(self):
        args = self.blank_args[:]
        args[5] = 9999999999999999
        with self.assertRaises(FieldWrongTypeException):
            self.s.serialize("Test", *args)

    def test_hash_name(self):
        name = "test"
        for i in range(1,9):
            self.assertEqual(len(self.s._hash_name(name, i)),
                                i)

    def test_reload_definitions(self):
        class T(logging.Filter):
            def filter(self, record):
                self.called = True
                return 0
        filter = T()
        filter.called = False
        logging.getLogger().addFilter(filter)

        self.assertIn("'test_pb2Test\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'", self.s.messages)
        self.s.load_definitions()
        self.assertIn("'test_pb2Test\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'", self.s.messages)
        self.assertTrue(filter.called)

        logging.getLogger().removeFilter(filter)

    def test_wrong_type_value_check(self):
        args = self.blank_args[:]
        args[1] = 1.10
        with self.assertRaises(FieldLengthUnsupportedException):
            self.s.serialize("Test", *args)
 def setUp(self):
     self.s = Serializer()
Beispiel #52
0
 def init_serializer(self):        
     self.serializer = Serializer()
     self.serializer.create_temp_dir(self.abs_path, "mpc") 
Beispiel #53
0
class MPC:
    def __init__(self, plot):
        self.abs_path = os.path.dirname(os.path.abspath(__file__))
        """ Reading the config """
        warnings.filterwarnings("ignore")
        self.init_serializer()
        config = self.serializer.read_config("config_mpc.yaml", path=self.abs_path)
        self.set_params(config)
        if self.seed < 0:
            """
            Generate a random seed that will be stored
            """
            self.seed = np.random.randint(0, 4294967295)
        np.random.seed(self.seed)
        
        logging_level = logging.WARN
        if config['verbose']:
            logging_level = logging.DEBUG
        logging.basicConfig(format='%(levelname)s: %(message)s', level=logging_level)        
        np.set_printoptions(precision=16)
        dir = self.abs_path + "/stats/mpc"
        tmp_dir = self.abs_path + "/tmp/mpc"       
        self.utils = Utils()
        if not self.init_robot(self.robot_file):
            logging.error("MPC: Couldn't initialize robot")
            return               
        if not self.setup_scene(self.environment_file, self.robot):
            return   
        #self.run_viewer(self.robot_file, self.environment_file)     
        self.clear_stats(dir)
        logging.info("Start up simulator")
        sim = Simulator()        
        path_evaluator = PathEvaluator()
        path_planner = PathPlanningInterface()
        logging.info("MPC: Generating goal states...")        
        goal_states = get_goal_states("mpc",
                                      self.abs_path,
                                      self.serializer, 
                                      self.obstacles,                                                                           
                                      self.robot,                                    
                                      self.max_velocity,
                                      self.delta_t,
                                      self.start_state,
                                      self.goal_position,
                                      self.goal_radius,
                                      self.planning_algortihm,
                                      self.path_timeout,
                                      self.num_generated_goal_states,
                                      self.continuous_collision,
                                      self.environment_file,
                                      self.num_cores)
        
        if len(goal_states) == 0:
            logging.error("MPC: Couldn't generate any goal states. Problem seems to be infeasible")
            return
        logging.info("MPC: Generated " + str(len(goal_states)) + " goal states")
        sim.setup_reward_function(self.discount_factor, self.step_penalty, self.illegal_move_penalty, self.exit_reward)
        path_planner.setup(self.robot,                         
                           self.obstacles,  
                           self.max_velocity, 
                           self.delta_t, 
                           self.use_linear_path,
                           self.planning_algortihm,
                           self.path_timeout,
                           self.continuous_collision,
                           self.num_cores)
        if self.dynamic_problem:
            path_planner.setup_dynamic_problem(self.robot_file,
                                               self.environment_file,
                                               self.simulation_step_size,
                                               self.num_control_samples,
                                               self.min_control_duration,
                                               self.max_control_duration,
                                               self.add_intermediate_states,
                                               self.rrt_goal_bias,
                                               self.control_sampler)
             
        A, H, B, V, W, C, D, M_base, N_base = self.problem_setup(self.delta_t, self.robot_dof)
        time_to_generate_paths = 0.0
        if check_positive_definite([C, D]):
            m_covs = None
            if self.inc_covariance == "process":
                m_covs = np.linspace(self.min_process_covariance, 
                                     self.max_process_covariance, 
                                     self.covariance_steps)                 
            elif self.inc_covariance == "observation":          
                m_covs = np.linspace(self.min_observation_covariance, 
                                     self.max_observation_covariance,
                                     self.covariance_steps)
        for j in xrange(len(m_covs)):
            M = None
            N = None
            if self.inc_covariance == "process":
                """ The process noise covariance matrix """
                M = self.calc_covariance_value(self.robot, m_covs[j], M_base)
                #M = m_covs[j] * M_base
                    
                """ The observation error covariance matrix """
                N = self.calc_covariance_value(self.robot, 
                                               self.min_observation_covariance, 
                                               N_base, 
                                               covariance_type='observation')                    
            elif self.inc_covariance == "observation":
                M = self.calc_covariance_value(self.robot, 
                                               self.min_process_covariance,
                                               M_base)
                N = self.calc_covariance_value(self.robot, 
                                               m_covs[j],
                                               N_base, 
                                               covariance_type='observation')                    
            P_t = np.array([[0.0 for k in xrange(2 * self.robot_dof)] for l in xrange(2 * self.robot_dof)])
            
            path_planner.setup_path_evaluator(A, B, C, D, H, M, N, V, W,                                     
                                              self.robot, 
                                              self.sample_size, 
                                              self.obstacles,
                                              self.goal_position,
                                              self.goal_radius,                                              
                                              self.robot_file,
                                              self.environment_file)
            sim.setup_problem(A, B, C, D, H, V, W, M, N,
                              self.robot, 
                              self.enforce_control_constraints,
                              self.obstacles, 
                              self.goal_position, 
                              self.goal_radius,
                              self.max_velocity,                                  
                              self.show_viewer_simulation,
                              self.robot_file,
                              self.environment_file)
            sim.set_stop_when_colliding(self.replan_when_colliding)
            if self.dynamic_problem:
                path_evaluator.setup_dynamic_problem()
                sim.setup_dynamic_problem(self.simulation_step_size)
            path_planner.setup_reward_function(self.step_penalty, self.exit_reward, self.illegal_move_penalty, self.discount_factor)
            mean_number_planning_steps = 0.0
            number_of_steps = 0.0
            mean_planning_time = 0.0
            num_generated_paths_run = 0.0
            successful_runs = 0
            num_collisions = 0.0
            final_states= []
            rewards_cov = []
            for k in xrange(self.num_simulation_runs):
                print "MPC: Run " + str(k + 1)                                
                self.serializer.write_line("log.log", tmp_dir, "RUN #" + str(k + 1) + " \n")
                current_step = 0
                x_true = self.start_state
                x_estimate = self.start_state
                x_tilde_linear = np.array([0.0 for i in xrange(2 * self.robot_dof)])
                P_t = np.array([[0.0 for i in xrange(2 * self.robot_dof)] for i in xrange(2 * self.robot_dof)]) 
                deviation_covariance = np.array([[0.0 for i in xrange(2 * self.robot_dof)] for i in xrange(2 * self.robot_dof)])
                estimated_deviation_covariance = np.array([[0.0 for i in xrange(2 * self.robot_dof)] for i in xrange(2 * self.robot_dof)])              
                total_reward = 0.0
                terminal = False
                while current_step < self.max_num_steps and not terminal:
                    path_planner.set_start_and_goal(x_estimate, goal_states, self.goal_position, self.goal_radius)
                    t0 = time.time()
                    (xs, 
                     us, 
                     zs,
                     control_durations, 
                     num_generated_paths, 
                     best_val,
                     state_covariances,
                     deviation_covariances,
                     estimated_deviation_covariances, 
                     mean_gen_times, 
                     mean_eval_times,
                     total_gen_times,
                     total_eval_times) = path_planner.plan_and_evaluate_paths(self.num_paths, 
                                                                              0, 
                                                                              current_step, 
                                                                              self.evaluation_horizon, 
                                                                              P_t,
                                                                              deviation_covariance,
                                                                              estimated_deviation_covariance, 
                                                                              self.timeout)
                    mean_planning_time += time.time() - t0
                    mean_number_planning_steps += 1.0  
                    num_generated_paths_run += num_generated_paths
                    if len(xs) == 0:
                        logging.error("MPC: Couldn't find any paths from start state" + 
                                      str(x_estimate) + 
                                      " to goal states") 
                                      
                        total_reward = np.array([-self.illegal_move_penalty])[0]
                        current_step += 1                                             
                        break
                    x_tilde = np.array([0.0 for i in xrange(2 * self.robot_dof)])
                    n_steps = self.num_execution_steps
                    
                    if n_steps > len(xs) - 1:
                       n_steps = len(xs) - 1
                    if current_step + n_steps > self.max_num_steps:
                        n_steps = self.max_num_steps - current_step
                    (x_true, 
                     x_tilde,
                     x_tilde_linear, 
                     x_estimate, 
                     P_t, 
                     current_step, 
                     total_reward,
                     terminal,
                     estimated_s,
                     estimated_c,
                     history_entries) = sim.simulate_n_steps(xs, us, zs,
                                                             control_durations,
                                                             x_true,                                                              
                                                             x_tilde,
                                                             x_tilde_linear,
                                                             x_estimate,
                                                             P_t,
                                                             total_reward,                                                                 
                                                             current_step,
                                                             n_steps,
                                                             0.0,
                                                             0.0,
                                                             max_num_steps=self.max_num_steps)
                    #print "len(hist) " + str(len(history_entries))
                    #print "len(deviation_covariances) " + str(len(deviation_covariances))
                    #print "len(estimated_deviation_covariances) " + str(len(estimated_deviation_covariances))
                    deviation_covariance = deviation_covariances[len(history_entries) - 1]
                    estimated_deviation_covariance = estimated_deviation_covariances[len(history_entries) - 1]
                    
                    if (current_step == self.max_num_steps) or terminal:
                        final_states.append(history_entries[-1].x_true)
                        print "len " + str(len(history_entries))
                        print "t " + str(history_entries[-1].t)
                        if terminal:
                            successful_runs += 1
                        
                    history_entries[0].set_replanning(True)                        
                    for l in xrange(len(history_entries)):
                        history_entries[l].set_estimated_covariance(state_covariances[l])                        
                        history_entries[l].serialize(tmp_dir, "log.log")
                        if history_entries[l].collided:                            
                            num_collisions += 1
                            collided = True
                    #logging.warn("MPC: Execution finished. True state is " + str(x_true))
                    #logging.warn("MPC: Estimated state is " + str(x_estimate))
                    logging.warn("MPC: Executed " + str(current_step) + " steps") 
                    logging.warn("MPC: terminal " + str(terminal))
                    if terminal:
                        print "MPC: Final state: " + str(x_true)
                rewards_cov.append(total_reward)
                self.serializer.write_line("log.log", 
                                           tmp_dir,
                                           "Reward: " + str(total_reward) + " \n") 
                self.serializer.write_line("log.log", 
                                           tmp_dir,
                                           "\n")
                number_of_steps += current_step
            mean_planning_time_per_step = mean_planning_time / mean_number_planning_steps 
            mean_planning_time /= self.num_simulation_runs
                                
            """ Calculate the distance to goal area
            """  
            ee_position_distances = [] 
            for state in final_states:
                joint_angles = v_double()
                joint_angles[:] = [state[y] for y in xrange(len(state) / 2)]
                ee_position = v_double()
                self.robot.getEndEffectorPosition(joint_angles, ee_position)
                ee_position = np.array([ee_position[y] for y in xrange(len(ee_position))])
                dist = np.linalg.norm(np.array(self.goal_position - ee_position))
                if dist < self.goal_radius:
                    dist = 0.0
                ee_position_distances.append(dist)
            logging.info("MPC: Done. total_reward is " + str(total_reward))
            try:
                n, min_max, mean_distance_to_goal, var, skew, kurt = scipy.stats.describe(np.array(ee_position_distances))
            except:
                print ee_position_distances
                sleep                         
                
            self.serializer.write_line("log.log", tmp_dir, "################################# \n")
            self.serializer.write_line("log.log",
                                       tmp_dir,
                                       "inc_covariance: " + str(self.inc_covariance) + "\n")
            if self.inc_covariance == "process":                  
                self.serializer.write_line("log.log",
                                           tmp_dir,
                                           "Process covariance: " + str(m_covs[j]) + " \n")
                self.serializer.write_line("log.log",
                                           tmp_dir,
                                           "Observation covariance: " + str(self.min_observation_covariance) + " \n")
            elif self.inc_covariance == "observation":                    
                self.serializer.write_line("log.log",
                                           tmp_dir,
                                           "Process covariance: " + str(self.min_process_covariance) + " \n")
                self.serializer.write_line("log.log",
                                           tmp_dir,
                                           "Observation covariance: " + str(m_covs[j]) + " \n")
            number_of_steps /= self.num_simulation_runs
            self.serializer.write_line("log.log", tmp_dir, "Mean number of steps: " + str(number_of_steps) + " \n")                            
            self.serializer.write_line("log.log", tmp_dir, "Mean num collisions per run: " + str(float(num_collisions) / float(self.num_simulation_runs)) + " \n")
            self.serializer.write_line("log.log", 
                                       tmp_dir, 
                                       "Average distance to goal area: " + str(mean_distance_to_goal) + " \n")
            self.serializer.write_line("log.log", tmp_dir, "Num successes: " + str(successful_runs) + " \n")
            print "succ " + str((100.0 / self.num_simulation_runs) * successful_runs)
            self.serializer.write_line("log.log", tmp_dir, "Percentage of successful runs: " + str((100.0 / self.num_simulation_runs) * successful_runs) + " \n")
            self.serializer.write_line("log.log", tmp_dir, "Mean planning time per run: " + str(mean_planning_time) + " \n")
            self.serializer.write_line("log.log", tmp_dir, "Mean planning time per planning step: " + str(mean_planning_time_per_step) + " \n")
            
            n, min_max, mean, var, skew, kurt = scipy.stats.describe(np.array(rewards_cov))
            print "mean_rewards " + str(mean)
            #plt.plot_histogram_from_data(rewards_cov)
            #sleep
            self.serializer.write_line("log.log", tmp_dir, "Mean rewards: " + str(mean) + " \n")
            self.serializer.write_line("log.log", tmp_dir, "Reward variance: " + str(var) + " \n")
            self.serializer.write_line("log.log", 
                                       tmp_dir, 
                                       "Reward standard deviation: " + str(np.sqrt(var)) + " \n")
            self.serializer.write_line("log.log", tmp_dir, "Seed: " + str(self.seed) + " \n")
            cmd = "mv " + tmp_dir + "/log.log " + dir + "/log_mpc_" + str(m_covs[j]) + ".log"
            os.system(cmd)
        
        cmd = "cp " + self.abs_path + "/config_mpc.yaml " + dir           
        os.system(cmd)
        
        if not os.path.exists(dir + "/environment"):
            os.makedirs(dir + "/environment") 
            
        cmd = "cp " + self.abs_path + "/" + str(self.environment_file) + " " + str(dir) + "/environment"
        os.system(cmd) 
            
        if not os.path.exists(dir + "/model"):
            os.makedirs(dir + "/model")
                
        cmd = "cp " + self.abs_path + "/" + self.robot_file + " " + dir + "/model"
        os.system(cmd)
        print "Done."
        
                
    def init_robot(self, urdf_model_file):
        self.robot = Robot(self.abs_path + "/" + urdf_model_file)
        self.robot.enforceConstraints(self.enforce_constraints)
        self.robot.setGravityConstant(self.gravity_constant)
        self.robot.setAccelerationLimit(self.acceleration_limit)
        """ Setup operations """
        self.robot_dof = self.robot.getDOF()        
        if len(self.start_state) != 2 * self.robot_dof:
            logging.error("MPC: Start state dimension doesn't fit to the robot state space dimension")
            return False
        return True
    
    def run_viewer(self, model_file, env_file):
        show_viewer = True
        rot = v_double()
        trans = v_double()
        rot[:] = [-1.0, 0.0, 0.0, 0.0]
        trans[:] = [0.0, 0.0, 3.0]
        if show_viewer:
            self.robot.setViewerBackgroundColor(0.6, 0.8, 0.6)
            self.robot.setViewerSize(1280, 768)
            self.robot.setupViewer(model_file, env_file)
        fx = 0.0
        fy = 0.0
        fz = 0.0
        f_roll = 0.0
        f_pitch = 0.0
        f_yaw = 0.0         
        x = [0.0 for i in xrange(2 * self.robot_dof)]
        x = [0.8110760662014179,
             -0.2416850600576381,
             1.5922944779127346,
             5.1412306972285471,
             -10.0,
             2.5101115097604483]
                
        integration_times = [] 
        collision_check_times1 = []
        collision_check_times2 = []     
        
        y = 0
        while True:                    
            #u_in = [3.0, 1.5, 0.0, 0.0, 0.0, 0.0]
            u_in = [0.0 for i in xrange(self.robot_dof)]
            #u_in[0] = 150.0
            #u_in[1] = 70.0
            current_state = v_double()            
            current_state[:] = x            
            control = v_double()            
            control[:] = u_in
                       
            control_error = v_double()
            ce = [0.0 for i in xrange(self.robot_dof)]
            control_error[:] = ce
            result = v_double()            
            
            t0 = time.time()                    
            self.robot.propagate(current_state,
                                 control,
                                 control_error,
                                 self.simulation_step_size,
                                 0.03,
                                 result)
            t = time.time() - t0
            integration_times.append(t)
            if y == 10000:
                t_sum = sum(integration_times)
                t_mean = t_sum / len(integration_times)
                print "mean integration times: " + str(t_mean)
                t_sum = sum(collision_check_times1)
                t_mean = t_sum / len(collision_check_times1)
                print "mean collision check times old " + str(t_mean)
                t_mean = sum(collision_check_times2) / len(collision_check_times2)
                print "mean collision check times new " + str(t_mean)
                sleep            
            
            ja_start = v_double()            
            ja_start[:] = [current_state[i] for i in xrange(len(current_state) / 2)]                     
            collision_objects_start = self.robot.createRobotCollisionObjects(ja_start)
            joint_angles = v_double()
            joint_angles[:] = [result[i] for i in xrange(len(result) / 2)]
            collision_objects_goal = self.robot.createRobotCollisionObjects(joint_angles)
            #print "ee_velocity " + str([ee_velocity[i] for i in xrange(len(ee_velocity))])
            t_coll_check1 = time.time()            
            t2 = time.time() - t_coll_check1
            collision_check_times2.append(t2)            
            
            '''
            Get the end effector position
            '''
            #ee_position = v_double()            
            #self.robot.getEndEffectorPosition(joint_angles, ee_position)                      
            #ee_collision_objects = self.robot.createEndEffectorCollisionObject(joint_angles)
            in_collision = False
            t_c = time.time()                      
            for o in self.obstacles:                
                if o.inCollisionDiscrete(collision_objects_goal):                                                            
                    in_collision = True
                    break                              
                '''for i in xrange(len(collision_objects_start)):                        
                    if o.inCollisionContinuous([collision_objects_start[i], collision_objects_goal[i]]):
                        in_collision = True
                        break'''               
            if in_collision:
                print "COLLISION"
                
                
                    
            t = time.time() - t_c            
            collision_check_times1.append(t)          
                                
                                                                          
            x = np.array([result[i] for i in xrange(len(result))])
            cjvals = v_double()
            cjvels = v_double()
            cjvals_arr = [x[i] for i in xrange(len(x) / 2)]
            cjvels_arr = [x[i] for i in xrange(len(x) / 2, len(x))]
            cjvals[:] = cjvals_arr
            cjvels[:] = cjvels_arr
            particle_joint_values = v2_double()
            if show_viewer:
                self.robot.updateViewerValues(cjvals, 
                                              cjvels,
                                              particle_joint_values,
                                              particle_joint_values)
            time.sleep(0.03) 
            
            y += 1
            print y
    
    def setup_scene(self,                    
                    environment_file,
                    robot):
        """ Load the obstacles """         
        self.obstacles = self.utils.loadObstaclesXML(self.abs_path + "/" + environment_file)      
        
        """ Load the goal area """
        goal_area = v_double()
        self.utils.loadGoalArea(self.abs_path + "/" + environment_file, goal_area)
        if len(goal_area) == 0:
            print "ERROR: Your environment file doesn't define a goal area"
            return False
        self.goal_position = [goal_area[i] for i in xrange(0, 3)]
        self.goal_radius = goal_area[3]
        return True
                
    def init_serializer(self):        
        self.serializer = Serializer()
        self.serializer.create_temp_dir(self.abs_path, "mpc") 
                
    def clear_stats(self, dir):
        if os.path.isdir(dir):
            cmd = "rm -rf " + dir + "/*"            
            os.system(cmd)
        else:
            os.makedirs(dir)
                
    def get_average_distance_to_goal_area(self, goal_position, goal_radius, cartesian_coords):        
        avg_dist = 0.0
        goal_pos = np.array(goal_position)        
        for i in xrange(len(cartesian_coords)):            
            cart = np.array(cartesian_coords[i])            
            dist = np.linalg.norm(goal_pos - cart)            
            if dist < goal_radius:
                dist = 0.0
            avg_dist += dist
        if avg_dist == 0.0:
            return avg_dist        
        return np.asscalar(avg_dist) / len(cartesian_coords)
            
    def calc_covariance_value(self, robot, error, covariance_matrix, covariance_type='process'):
        active_joints = v_string()
        robot.getActiveJoints(active_joints)        
        if covariance_type == 'process':
            if self.dynamic_problem:            
                torque_limits = v_double()
                robot.getJointTorqueLimits(active_joints, torque_limits)
                torque_limits = [torque_limits[i] for i in xrange(len(torque_limits))]
                for i in xrange(len(torque_limits)):
                    torque_range = 2.0 * torque_limits[i]
                    covariance_matrix[i, i] = np.square((torque_range / 100.0) * error)
            else:
                for i in xrange(self.robot_dof):
                    covariance_matrix[i, i] = np.square((self.max_velocity / 100.0) * error)
        else:
            lower_position_limits = v_double()
            upper_position_limits = v_double()
            velocity_limits = v_double()            
            robot.getJointLowerPositionLimits(active_joints, lower_position_limits)
            robot.getJointUpperPositionLimits(active_joints, upper_position_limits)
            robot.getJointVelocityLimits(active_joints, velocity_limits)            
            for i in xrange(self.robot_dof):
                position_range = upper_position_limits[i] - lower_position_limits[i]
                covariance_matrix[i, i] = np.square((position_range / 100.0) * error)            
                velocity_range = 2.0 * velocity_limits[i]
                covariance_matrix[i + self.robot_dof, i + self.robot_dof] = np.square((velocity_range / 100.0) * error)          
        return covariance_matrix            
        
            
    def problem_setup(self, delta_t, num_links):
        A = np.identity(num_links * 2)
        H = np.identity(num_links * 2)
        W = np.identity(num_links * 2)
        C = self.path_deviation_cost * np.identity(num_links * 2)
        
        '''B = delta_t * np.identity(num_links * 2)                
        V = np.identity(num_links * 2)
        D = self.control_deviation_cost * np.identity(num_links * 2)        
        M_base = np.identity(2 * self.robot_dof)
        N_base = np.identity(2 * self.robot_dof)'''
        
        B = delta_t * np.vstack((np.identity(num_links),
                                 np.zeros((num_links, num_links))))
        V = np.vstack((np.identity(num_links),
                       np.zeros((num_links, num_links))))
        D = self.control_deviation_cost * np.identity(num_links)
        M_base = np.identity(self.robot_dof)
        N_base = np.identity(2 * self.robot_dof)
        
        
        return A, H, B, V, W, C, D, M_base, N_base
        
    def set_params(self, config):
        self.num_paths = config['num_generated_paths']
        self.use_linear_path = config['use_linear_path']        
        self.max_velocity = config['max_velocity']
        self.delta_t = 1.0 / config['control_rate']
        self.start_state = config['start_state']        
        self.num_simulation_runs = config['num_simulation_runs']
        self.num_bins = config['num_bins']
        self.min_process_covariance = config['min_process_covariance']
        self.max_process_covariance = config['max_process_covariance']
        self.covariance_steps = config['covariance_steps']
        self.min_observation_covariance = config['min_observation_covariance']
        self.max_observation_covariance = config['max_observation_covariance']       
        self.discount_factor = config['discount_factor']
        self.illegal_move_penalty = config['illegal_move_penalty']
        self.step_penalty = config['step_penalty']
        self.exit_reward = config['exit_reward']
        self.stop_when_terminal = config['stop_when_terminal']        
        self.enforce_constraints = config['enforce_constraints']
        self.enforce_control_constraints = config['enforce_control_constraints']
        self.sample_size = config['sample_size']
        self.plot_paths = config['plot_paths']
        self.planning_algortihm = config['planning_algorithm']
        self.dynamic_problem = config['dynamic_problem'] 
        self.simulation_step_size = config['simulation_step_size']        
        self.path_timeout = config['path_timeout'] 
        self.continuous_collision = config['continuous_collision_check']
        self.show_viewer_evaluation = config['show_viewer_evaluation']
        self.show_viewer_simulation = config['show_viewer_simulation']   
        self.path_deviation_cost = config['path_deviation_cost'] 
        self.control_deviation_cost = config['control_deviation_cost']
        self.num_control_samples = config['num_control_samples'] 
        self.min_control_duration = config['min_control_duration']
        self.max_control_duration = config['max_control_duration']   
        self.inc_covariance = config['inc_covariance'] 
        self.add_intermediate_states = config['add_intermediate_states']
        self.gravity_constant = config['gravity']
        self.num_generated_goal_states = config['num_generated_goal_states']
        self.robot_file = config['robot_file']
        self.environment_file = config['environment_file']
        self.rrt_goal_bias = config['rrt_goal_bias']
        self.control_sampler = config['control_sampler']
        self.max_num_steps = config['max_num_steps']
        self.evaluation_horizon = config['horizon']
        self.timeout = config['timeout']
        self.seed = config['seed']
        self.num_cores = config['num_cores']
        self.replan_when_colliding = config['replan_when_colliding']
        self.acceleration_limit = config['acceleration_limit']
        
        
        """
        The number of steps a path is being executed before a new path gets calculated
        """
        self.num_execution_steps = config['num_execution_steps']
Beispiel #54
0
class ApiController:
    def __init__(self, model, **kwargs):
        self.model = model
        self.fields = kwargs.get("fields") or self.model._meta.get_all_field_names()
        self.name = kwargs.get("name")
        self.parent_name = kwargs.get("parent_name", False)
        if self.parent_name:
            self.parent_id = self.parent_name + "_id"
        else:
            self.parent_id = ""
        self.id = self.name + "_id"
        self.set_name = self.name + "_set"
        self.relations = kwargs.get("relations") or [
            rel.get_accessor_name() for rel in model._meta.get_all_related_objects()
        ]
        self.parent_model = kwargs.get("parent_model")
        self.serialize = Serializer(self.fields)

    @csrf_exempt
    def nested_all(self, request, **kwargs):
        if request.method == "POST":
            return run_response(self.create, request, kwargs)
        else:
            return run_response(self.nested_list, request, kwargs)

    @csrf_exempt
    def all(self, request, **kwargs):
        if request.method == "POST":
            return run_response(self.create, request, kwargs)
        else:
            return run_response(self.list, request, kwargs)

    @csrf_exempt
    def all_or_authenticate(self, request, **kwargs):
        if request.method == "POST":
            return run_response(self.create_user, request, kwargs)
        else:
            return run_response(self.list, request, kwargs)

    @csrf_exempt
    def single(self, request, **kwargs):
        if request.method == "POST" or request.method == "PATCH" or request.method == "PUT":
            return run_response(self.update, request, kwargs)
        elif request.method == "DELETE":
            return run_response(self.destroy, request, kwargs)
        else:
            return run_response(self.show, request, kwargs)

    def create_user(self, request, params):
        attrs = self.serialize.to_dict(request.data)
        if params.get(self.parent_id):
            attrs[self.parent_id] = params[self.parent_id]
        user = self.model(**attrs)
        user.save()
        token = jwt.encode({"user_id": user.id}, SECRET_KEY)
        return {"success": True, "token": token, "user": self.serialize.model_to_dict(user)}

    def create(self, request, params):
        attrs = self.serialize.to_dict(request.data)
        if params.get(self.parent_id):
            attrs[self.parent_id] = params[self.parent_id]
        record = self.model(**attrs)
        record.save()
        return self.serialize.model(record)

    def update(self, request, params):
        filtered = self.model.objects.filter(pk=params[self.id])
        filtered.update(**self.serialize.to_dict(request.data))
        return self.serialize.model(filtered[0])

    def destroy(self, request, params):
        record = self.model.objects.get(pk=params[self.id])
        record.delete()
        return {"status": "success", "message": "Record deleted."}

    def list(self, request, params):
        records = self.model.objects.all()
        return self.serialize.list(records)

    def nested_list(self, request, params):
        getattr(self.parent_model, self.set_name)
        parent = self.parent_model.objects.get(pk=params.get(self.parent_id))
        records = getattr(parent, self.set_name).all()
        return self.serialize.list(records)

    def show(self, request, params):
        record = self.model.objects.get(pk=params[self.id])
        relations = self.add_relations(record)
        return self.serialize.model(record, relations)

    def add_relations(self, record):
        relations = {}
        print >>sys.stderr, "relations", self.relations
        if self.relations:
            for name in self.relations:
                relations[name] = self.serialize.list_to_dict(getattr(record, name).all())
                print >>sys.stderr, "DATA:", "add relations", getattr(record, name).all()
        print >>sys.stderr, "relations", relations
        return relations
 def setUp(self):
     self.calls = 0
     self.s = Serializer()
Beispiel #56
0
class SimulationRunner:
    def __init__(self, problem, config):
        self.abs_path = os.path.dirname(os.path.abspath(__file__))
        self.serializer = Serializer()
        self.config_file = config
        self.config = self.serializer.read_config(self.abs_path + "/" + config)        
        self.ik_solution_generator = IKSolutionGenerator()        
        self.clear_stats(problem)
        self.run(problem)
        
    def clear_stats(self, problem):
        for file in glob.glob(self.abs_path + "/../problems/" + problem + "/log.log"):
            os.remove(file)
        for file in glob.glob(self.abs_path + "/../problems/" + problem + "/log.log"):
            os.remove(file)
        if os.path.isdir(self.abs_path + "/stats"):
            cmd = "rm -rf " + self.abs_path + "/stats/*"        
            os.system(cmd)
        else:
            os.makedirs(self.abs_path + "/stats")
        
    def write_rewards(self, log_file_path):
        rewards = []
        for line in tuple(open(log_file_path, 'r')):
            if "Reward" in line:                
                rewards.append(float(line.split(":")[1][1:]))        
        n, min_max, mean_rewards, reward_variances, skew, kurt = stats.describe(np.array(rewards))            
        if np.isnan(np.asscalar(reward_variances)):
            reward_variances = np.array(0.0)
            
        with open(log_file_path, "a+") as f:
            f.write("Reward variance: " + str(np.asscalar(reward_variances)) + " \n")
        with open(log_file_path, "a+") as f:
            f.write("Reward standard deviation: " + str(np.asscalar(np.sqrt(reward_variances))) + " \n")
    
    def read_float_data(self, log_file_path, searchstring):
        data = 0.0
        for line in tuple(open(log_file_path, 'r')):
            if searchstring in line:
                data = float(line.split(":")[1].split(" ")[1].rstrip("\n"))
        return data
    
    def read_int_data(self, log_file_path, searchstring):
        data = 0.0
        for line in tuple(open(log_file_path, 'r')):
            if searchstring in line:
                data = int(float(line.split(":")[1].split(" ")[1].rstrip("\n")))
        return data      
    
    def read_avg_path_length(self, log_file_path, num_runs):
        l = 0
        for line in tuple(open(log_file_path, 'r')):
            if "S:" in line:
                l += 1
        return l / num_runs
                
    def replace_option_in_config(self, option_name, value, problem, is_bool=False):
        bool_str = None
        if is_bool:
            bool_str = "false"
            if value:
                bool_str = "true"    
        lines = list(open(self.abs_path + "/../problems/" + problem + "/default.cfg", 'r'))
        idx = -1       
        for i in xrange(len(lines)):
            if option_name in lines[i]:
                idx = i 
        if not idx == -1:
            if is_bool:
                lines[idx] = option_name + " = " + str(bool_str) + "\n"
            else:
                lines[idx] = option_name + " = " + str(value) + "\n"
        os.remove(self.abs_path + "/../problems/" + problem + "/default.cfg")
        with open(self.abs_path + "/../problems/" + problem + "/default.cfg", 'a+') as f:
            for line in lines:
                f.write(line)
                
    def compareEnvironmentToTmpFiles(self, problem, model_file, environment_path, environment_file):
        if not os.path.exists(self.abs_path + "/tmp/" + problem):
            os.makedirs(self.abs_path + "/tmp/" + problem)
            return False
        
        if not (os.path.exists(self.abs_path + '/tmp/' + problem + '/' + environment_file) and
                os.path.exists(self.abs_path + '/tmp/' + problem + '/' + self.config_file) and
                os.path.exists(self.abs_path + '/tmp/' + problem + '/' + model_file)):            
            return False
        
        with open(self.abs_path + '/../problems/' + problem + "/environment/" + environment_file, 'r') as f1, open(self.abs_path + '/tmp/' + problem + '/' + environment_file, 'r') as f2:
            missing_from_b = [
                diff[2:] for diff in Differ().compare(f1.readlines(), f2.readlines())
                if diff.startswith('-')
            ]
            if len(missing_from_b) != 0:
                return False
            
        with open(self.abs_path + '/../problems/' + problem + "/model/" + model_file, 'r') as f1, open(self.abs_path + '/tmp/' + problem + '/' + model_file, 'r') as f2:
            missing_from_b = [
                diff[2:] for diff in Differ().compare(f1.readlines(), f2.readlines())
                if diff.startswith('-')
            ]
            if len(missing_from_b) != 0:
                return False
            
        
        with open(self.abs_path + "/" + self.config_file, 'r') as f1, open(self.abs_path + '/tmp/' + problem + '/' + self.config_file, 'r') as f2:
            missing_from_b = [
                diff[2:] for diff in Differ().compare(f1.readlines(), f2.readlines())
                if diff.startswith('-')
            ]
            
            for i in xrange(len(missing_from_b)):
                if ("num_links" in missing_from_b[i] or
                    "workspace_dimensions" in missing_from_b[i] or
                    "goal_position" in missing_from_b[i] or
                    "goal_radius" in missing_from_b[i]):
                    return False
        
        """ If same, use existing goalstates """
        try:        
            shutil.copy2(self.abs_path + '/tmp/' + problem + '/goalstates.txt', self.abs_path + '/../problems/' + problem + "/goalstates.txt")
        except:
            return False
        return True        
        
    def copyToTmp(self, problem, model_file, environment_file):
        shutil.copy2(self.abs_path + '/../problems/' + problem + "/environment/" + environment_file, self.abs_path + '/tmp/' + problem + '/' + environment_file)
        shutil.copy2(self.abs_path + '/../problems/' + problem + "/model/" + model_file, self.abs_path + '/tmp/' + problem + '/' + model_file)
        shutil.copy2(self.abs_path + '/../problems/' + problem + "/goalstates.txt", self.abs_path + '/tmp/' + problem + '/goalstates.txt')
        shutil.copy2(self.abs_path + "/" + self.config_file, self.abs_path + '/tmp/' + problem + '/' + self.config_file) 
        
    def get_average_distance_to_goal_area(self, goal_position, goal_radius, cartesian_coords):        
        avg_dist = 0.0
        goal_pos = np.array(goal_position)        
        for i in xrange(len(cartesian_coords)):            
            cart = np.array(cartesian_coords[i])            
            dist = np.linalg.norm(goal_pos - cart)            
            if dist < goal_radius:
                dist = 0.0
            avg_dist += dist
        if avg_dist == 0.0:
            return avg_dist        
        return np.asscalar(avg_dist) / len(cartesian_coords)
    
    def write_mean_num_collisions(self, log_file_path, num_runs):
        num_collisions = 0.0        
        with open(log_file_path, 'r') as f:
            for line in f:
                if "Trans: Collision detected: True" in line:
                    num_collisions += 1
        mean_num_collisions = num_collisions / num_runs
        with open(log_file_path, "a+") as f:
            f.write("Mean num collisions per run: " + str(mean_num_collisions) + " \n")
        
    def run(self, problem):        
        alg = "alg"
        if "manipulator_discrete" in problem:
            alg = "abt"
        elif "manipulator_continuous" in problem:
            alg = "gps"
        num_runs = self.config['num_simulation_runs']
        init_state = self.config['init_state']        
        discount = self.config['discount_factor']
        control_rate = self.config['control_rate']        
        illegal_move_penalty = self.config['illegal_move_penalty']
        illegal_action_penalty = self.config['illegal_action_penalty']
        step_penalty = self.config['step_penalty']
        exit_reward = self.config['exit_reward']
        planning_time = self.config['planning_time']
        particle_count = self.config['particle_count']
        num_steps = self.config['num_steps']
        save_particles = self.config['save_particles']
        plot_particles = self.config['plot_particles']        
        verbose = self.config['verbose']
        verbose_rrt = self.config['verbose_rrt']
        rrt_stretching_factor = self.config['rrt_stretching_factor']        
        histories_per_step = self.config['histories_per_step']
        prune_every_step = self.config['prune_every_step']
        continuous_collision = self.config['continuous_collision_check']        
        check_linear_path = self.config['check_linear_path']
        enforce_constraints = self.config['enforce_constraints']
        planner = self.config['planner']
        dynamic_problem = self.config['dynamic_problem']        
        simulation_step_size = self.config['simulation_step_size']
        planning_velocity = self.config['planning_velocity']
        show_viewer = self.config['show_viewer']
        max_observation_distance = self.config['max_observation_distance']
        particle_replenish_timeout = self.config['particle_replenish_timeout']
        state_sampling_strategy = self.config['state_sampling_strategy']
        num_samples_weighted_lazy = self.config['num_samples_weighted_lazy']
        gravity = self.config['gravity']
        particle_plot_limit = self.config['particle_plot_limit']
        num_effective_particles = self.config['num_effective_particles']
        save_policy = self.config['save_policy']
        load_initial_policy = self.config['load_initial_policy']
        initial_planning_time = self.config['initial_planning_time']
        particle_filter = self.config['particle_filter']
        num_actions_per_joint = self.config['num_actions_per_joint']
        environment_file = self.config['environment_file']
        robot_file = self.config['robot_file']
        num_generated_goal_states = self.config['num_generated_goal_states']
        
        model_file = "test.xml"
        environment_path = os.path.abspath(self.abs_path + "/../problems/" + problem + "/environment/" + environment_file)
        utils = Utils()      
        if not os.path.exists(environment_path):
            print "FILE DOES NOT EXISTS" 
        print "load obstacles" 
        obstacles = utils.loadObstaclesXML(environment_path)
        print "loaded obstacles"   
        
        goal_area = v_double()        
        utils.loadGoalArea(environment_path, goal_area)        
        if len(goal_area) == 0:
            print "ERROR: Your environment file doesn't define a goal area"
            return False
        goal_position = [goal_area[i] for i in xrange(0, 3)]
        goal_radius = goal_area[3]      
        print goal_position
        print goal_radius
        logging_level = logging.WARN
        if verbose:
            logging_level = logging.DEBUG
        logging.basicConfig(format='%(levelname)s: %(message)s', level=logging_level)
        if not os.path.exists(self.abs_path + "/../problems/" + problem + "/model/" + robot_file):
            print "what"
        robot = Robot(os.path.abspath(self.abs_path + "/../problems/" + problem + "/model/" + robot_file))
        
        
        ja = v_double()
        init_angles = [init_state[i] for i in xrange(len(init_state) / 2)]
        ja[:] = init_angles
        
        
        print [goal_position[i] for i in xrange(len(goal_position))]
class TestSerialize(unittest.TestCase):

    @classmethod
    def _clearMsgDefs(self):
        # Clear all non-test messages
        try:
            while True:
                messages.MESSAGES.pop()
        except IndexError:
            pass

    @classmethod
    def setUpClass(self):
        self._clearMsgDefs()
        messages.MESSAGES.append(__import__("test_complex_pb2", globals(), locals(), [], -1))

    def setUp(self):
        self.calls = 0
        self.s = Serializer()

    def test_unnamed_serialize(self):
        enc = self.s.serialize("TestComplex", [("normal", ["m1", "m2"])])        

        def f(obj):
            self.assertTrue(obj.IsInitialized())
            self.assertEqual(obj.nesteds[0].normal, "normal")
            self.assertEqual(obj.nesteds[0].multiple[0], "m1")
            self.assertEqual(obj.nesteds[0].multiple[1], "m2")
            self.calls += 1
        
        self.s.add_handler("TestComplex", f)
        self.s.unserialize(enc)
        self.assertEqual(self.calls, 1)

    def test_named_serialize(self):
        enc = self.s.serialize("TestComplex",
                            nesteds = [{
                                            "normal": "normal",
                                            "multiple": [
                                                        "m1", 
                                                        "m2"
                                                        ]
                                        }]
                            )        

        def f(obj):
            self.assertTrue(obj.IsInitialized())
            self.assertEqual(obj.nesteds[0].normal, "normal")
            self.assertEqual(obj.nesteds[0].multiple[0], "m1")
            self.assertEqual(obj.nesteds[0].multiple[1], "m2")
            self.calls += 1
        
        self.s.add_handler("TestComplex", f)
        self.s.unserialize(enc)
        self.assertEqual(self.calls, 1)

    def test_nested_length(self):
        with self.assertRaises(FieldTooLongException):
            self.s.serialize("TestComplex", [("", ["longerthantwentycharacters"])])

    def test_obj_onto_repeated(self):
        with self.assertRaises(FieldWrongTypeException):
            self.s.serialize("TestComplex", [("", ("m1",))])

    def test_repeated_onto_obj(self):
        with self.assertRaises(FieldWrongTypeException):
            self.s.serialize("TestComplex", [["", ["m1"]]])

    def test_unicode_strings(self):
        sbs = '}\xc8A\xc1\x8a}D\xe8\xea\x93}'
        enc = self.s.serialize("TestComplex", [(sbs, ["m1", "m2"])])

        def f(obj):
            self.assertTrue(obj.IsInitialized())
            for i in range(len(sbs)):
                self.assertEqual(ord(obj.nesteds[0].normal[i]), ord(sbs[i]))
            self.assertEqual(obj.nesteds[0].multiple[0], "m1")
            self.assertEqual(obj.nesteds[0].multiple[1], "m2")
            self.calls += 1

        self.s.add_handler("TestComplex", f)
        self.s.unserialize(enc)
        self.assertEqual(self.calls, 1)

    def test_optional_missing(self):
        enc = self.s.serialize("TestOptional")

        def f(obj):
            self.assertTrue(obj.IsInitialized())
            self.assertTrue(not obj.field)
            self.calls += 1

        self.s.add_handler("TestOptional", f)
        self.s.unserialize(enc)
        self.assertEqual(self.calls, 1)

    def test_optional_set(self):
        enc = self.s.serialize("TestOptional", "test")

        def f(obj):
            self.assertTrue(obj.IsInitialized())
            self.assertEqual(obj.field, "test")
            self.calls += 1

        self.s.add_handler("TestOptional", f)
        self.s.unserialize(enc)
        self.assertEqual(self.calls, 1)
Beispiel #58
0
 def parse_from_string(self, serialized):
     serializer = Serializer()
     return serializer.deserialize(serialized, self.__class__)
Beispiel #59
0
class SyncObj(object):
    def __init__(self, selfNodeAddr, otherNodesAddrs, conf=None):

        if conf is None:
            self.__conf = SyncObjConf()
        else:
            self.__conf = conf

        self.__selfNodeAddr = selfNodeAddr
        self.__otherNodesAddrs = otherNodesAddrs
        self.__unknownConnections = {}  # descr => _Connection
        self.__raftState = _RAFT_STATE.FOLLOWER
        self.__raftCurrentTerm = 0
        self.__votesCount = 0
        self.__raftLeader = None
        self.__raftElectionDeadline = time.time() + self.__generateRaftTimeout()
        self.__raftLog = []  # (command, logID, term)
        self.__raftLog.append((None, 1, self.__raftCurrentTerm))
        self.__raftCommitIndex = 1
        self.__raftLastApplied = 1
        self.__raftNextIndex = {}
        self.__raftMatchIndex = {}
        self.__lastSerializedTime = time.time()
        self.__forceLogCompaction = False
        self.__socket = None
        self.__resolver = DnsCachingResolver(self.__conf.dnsCacheTime, self.__conf.dnsFailCacheTime)
        self.__serializer = Serializer(self.__conf.fullDumpFile, self.__conf.logCompactionBatchSize)
        self.__poller = createPoller()
        self.__isInitialized = False
        self.__lastInitTryTime = 0

        self._methodToID = {}
        self._idToMethod = {}
        methods = sorted([m for m in dir(self) if callable(getattr(self, m))])
        for i, method in enumerate(methods):
            self._methodToID[method] = i
            self._idToMethod[i] = getattr(self, method)

        self.__thread = None
        self.__mainThread = None
        self.__initialised = None
        self.__commandsQueue = Queue.Queue(self.__conf.commandsQueueSize)
        self.__nodes = []
        self.__newAppendEntriesTime = 0

        self.__commandsWaitingCommit = collections.defaultdict(list)  # logID => [(termID, callback), ...]
        self.__commandsLocalCounter = 0
        self.__commandsWaitingReply = {}  # commandLocalCounter => callback

        self.__properies = set()
        for key in self.__dict__:
            self.__properies.add(key)

        if self.__conf.autoTick:
            self.__mainThread = threading.current_thread()
            self.__initialised = threading.Event()
            self.__thread = threading.Thread(target=SyncObj._autoTickThread, args=(weakref.proxy(self),))
            self.__thread.start()
            while not self.__initialised.is_set():
                pass
        else:
            self.__initInTickThread()

    def __initInTickThread(self):
        try:
            self.__lastInitTryTime = time.time()
            self.__bind()
            self.__nodes = []
            for nodeAddr in self.__otherNodesAddrs:
                self.__nodes.append(Node(self, nodeAddr))
                self.__raftNextIndex[nodeAddr] = 0
                self.__raftMatchIndex[nodeAddr] = 0
            self.__needLoadDumpFile = True
            self.__isInitialized = True
        except:
            LOG_CURRENT_EXCEPTION()

    def _applyCommand(self, command, callback):
        try:
            self.__commandsQueue.put_nowait((command, callback))
        except Queue.Full:
            callback(None, FAIL_REASON.QUEUE_FULL)

    def _checkCommandsToApply(self):
        for i in xrange(self.__conf.maxCommandsPerTick):
            if self.__raftLeader is None and self.__conf.commandsWaitLeader:
                break
            try:
                command, callback = self.__commandsQueue.get_nowait()
            except Queue.Empty:
                break

            requestNode, requestID = None, None
            if isinstance(callback, tuple):
                requestNode, requestID = callback

            if self.__raftState == _RAFT_STATE.LEADER:
                idx, term = self.__getCurrentLogIndex() + 1, self.__raftCurrentTerm
                self.__raftLog.append((command, idx, term))
                if requestNode is None:
                    if callback is not None:
                        self.__commandsWaitingCommit[idx].append((term, callback))
                else:
                    self.__send(requestNode, {
                        'type': 'apply_command_response',
                        'request_id': requestID,
                        'log_idx': idx,
                        'log_term': term,
                    })
                if not self.__conf.appendEntriesUseBatch:
                    self.__sendAppendEntries()
            elif self.__raftLeader is not None:
                if requestNode is None:
                    message = {
                        'type': 'apply_command',
                        'command': command,
                    }

                    if callback is not None:
                        self.__commandsLocalCounter += 1
                        self.__commandsWaitingReply[self.__commandsLocalCounter] = callback
                        message['request_id'] = self.__commandsLocalCounter

                    self.__send(self.__raftLeader, message)
                else:
                    self.__send(requestNode, {
                        'type': 'apply_command_response',
                        'request_id': requestID,
                        'error': FAIL_REASON.NOT_LEADER,
                    })
            else:
                if requestNode is None:
                    callback(None, FAIL_REASON.MISSING_LEADER)
                else:
                    self.__send(requestNode, {
                        'type': 'apply_command_response',
                        'request_id': requestID,
                        'error': FAIL_REASON.NOT_LEADER,
                    })

    def _autoTickThread(self):
        self.__initInTickThread()
        self.__initialised.set()
        time.sleep(0.1)
        try:
            while True:
                if not self.__mainThread.is_alive():
                    break
                self._onTick(self.__conf.autoTickPeriod)
        except ReferenceError:
            pass

    def _onTick(self, timeToWait=0.0):
        if not self.__isInitialized:
            if time.time() >= self.__lastInitTryTime + self.__conf.bindRetryTime:
                self.__initInTickThread()

        if not self.__isInitialized:
            time.sleep(timeToWait)
            return

        if self.__needLoadDumpFile:
            if self.__conf.fullDumpFile is not None and os.path.isfile(self.__conf.fullDumpFile):
                self.__loadDumpFile()
            self.__needLoadDumpFile = False

        if self.__raftState in (_RAFT_STATE.FOLLOWER, _RAFT_STATE.CANDIDATE):
            if self.__raftElectionDeadline < time.time():
                self.__raftElectionDeadline = time.time() + self.__generateRaftTimeout()
                self.__raftLeader = None
                self.__raftState = _RAFT_STATE.CANDIDATE
                self.__raftCurrentTerm += 1
                self.__votesCount = 1
                for node in self.__nodes:
                    node.send({
                        'type': 'request_vote',
                        'term': self.__raftCurrentTerm,
                        'last_log_index': self.__getCurrentLogIndex(),
                        'last_log_term': self.__getCurrentLogTerm(),
                    })
                self.__onLeaderChanged()

        if self.__raftState == _RAFT_STATE.LEADER:
            while self.__raftCommitIndex < self.__getCurrentLogIndex():
                nextCommitIndex = self.__raftCommitIndex + 1
                count = 1
                for node in self.__nodes:
                    if self.__raftMatchIndex[node.getAddress()] >= nextCommitIndex:
                        count += 1
                if count > (len(self.__nodes) + 1) / 2:
                    self.__raftCommitIndex = nextCommitIndex
                else:
                    break

            if time.time() > self.__newAppendEntriesTime:
                self.__sendAppendEntries()

        if self.__raftCommitIndex > self.__raftLastApplied:
            count = self.__raftCommitIndex - self.__raftLastApplied
            entries = self.__getEntries(self.__raftLastApplied + 1, count)
            for entry in entries:
                currentTermID = entry[2]
                subscribers = self.__commandsWaitingCommit.pop(entry[1], [])
                res = self.__doApplyCommand(entry[0])
                for subscribeTermID, callback in subscribers:
                    if subscribeTermID == currentTermID:
                        callback(res, FAIL_REASON.SUCCESS)
                    else:
                        callback(None, FAIL_REASON.DISCARDED)

                self.__raftLastApplied += 1

        self._checkCommandsToApply()
        self.__tryLogCompaction()

        for node in self.__nodes:
            node.connectIfRequired()

        self.__poller.poll(timeToWait)

    def _getLastCommitIndex(self):
        return self.__raftCommitIndex

    def _printStatus(self):
        LOG_DEBUG('self', self.__selfNodeAddr)
        LOG_DEBUG('leader', self.__raftLeader)
        LOG_DEBUG('partner nodes', len(self.__nodes))
        for n in self.__nodes:
            LOG_DEBUG(n.getAddress(), n.getStatus())
        LOG_DEBUG('log len:', len(self.__raftLog))
        LOG_DEBUG('log size bytes:', len(zlib.compress(cPickle.dumps(self.__raftLog, -1))))
        LOG_DEBUG('last applied:', self.__raftLastApplied)
        LOG_DEBUG('commit idx:', self.__raftCommitIndex)
        LOG_DEBUG('next node idx:', self.__raftNextIndex)

    def _forceLogCompaction(self):
        self.__forceLogCompaction = True

    def __doApplyCommand(self, command):
        args = []
        kwargs = {
            '_doApply': True,
        }
        if not isinstance(command, tuple):
            funcID = command
        elif len(command) == 2:
            funcID, args = command
        else:
            funcID, args, newKwArgs = command
            kwargs.update(newKwArgs)

        return self._idToMethod[funcID](*args, **kwargs)

    def _onMessageReceived(self, nodeAddr, message):

        if message['type'] == 'request_vote':

            if message['term'] > self.__raftCurrentTerm:
                self.__raftCurrentTerm = message['term']
                self.__raftState = _RAFT_STATE.FOLLOWER

            if self.__raftState in (_RAFT_STATE.FOLLOWER, _RAFT_STATE.CANDIDATE):
                lastLogTerm = message['last_log_term']
                lastLogIdx = message['last_log_index']
                if message['term'] >= self.__raftCurrentTerm:
                    if lastLogTerm < self.__getCurrentLogTerm():
                        return
                    if lastLogTerm == self.__getCurrentLogTerm() and \
                            lastLogIdx < self.__getCurrentLogIndex():
                        return

                    self.__raftElectionDeadline = time.time() + self.__generateRaftTimeout()
                    self.__send(nodeAddr, {
                        'type': 'response_vote',
                        'term': message['term'],
                    })

        if message['type'] == 'append_entries' and message['term'] >= self.__raftCurrentTerm:
            self.__raftElectionDeadline = time.time() + self.__generateRaftTimeout()
            if self.__raftLeader != nodeAddr:
                self.__onLeaderChanged()
            self.__raftLeader = nodeAddr
            self.__raftCurrentTerm = message['term']
            self.__raftState = _RAFT_STATE.FOLLOWER
            newEntries = message.get('entries', [])
            serialized = message.get('serialized', None)
            leaderCommitIndex = message['commit_index']

            # Regular append entries
            if 'prevLogIdx' in message:
                prevLogIdx = message['prevLogIdx']
                prevLogTerm = message['prevLogTerm']
                prevEntries = self.__getEntries(prevLogIdx)
                if not prevEntries:
                    self.__sendNextNodeIdx(nodeAddr, reset=True)
                    return
                if prevEntries[0][2] != prevLogTerm:
                    self.__deleteEntriesFrom(prevLogIdx)
                    self.__sendNextNodeIdx(nodeAddr, reset=True)
                    return
                if len(prevEntries) > 1:
                    self.__deleteEntriesFrom(prevLogIdx + 1)
                self.__raftLog += newEntries

            # Install snapshot
            elif serialized is not None:
                if self.__serializer.setTransmissionData(serialized):
                    self.__loadDumpFile()

            self.__sendNextNodeIdx(nodeAddr)

            self.__raftCommitIndex = min(leaderCommitIndex, self.__getCurrentLogIndex())

        if message['type'] == 'apply_command':
            if 'request_id' in message:
                self._applyCommand(message['command'], (nodeAddr, message['request_id']))
            else:
                self._applyCommand(message['command'], None)

        if message['type'] == 'apply_command_response':
            requestID = message['request_id']
            error = message.get('error', None)
            callback = self.__commandsWaitingReply.pop(requestID, None)
            if callback is not None:
                if error is not None:
                    callback(None, error)
                else:
                    idx = message['log_idx']
                    term = message['log_term']
                    assert idx > self.__raftLastApplied
                    self.__commandsWaitingCommit[idx].append((term, callback))

        if self.__raftState == _RAFT_STATE.CANDIDATE:
            if message['type'] == 'response_vote' and message['term'] == self.__raftCurrentTerm:
                self.__votesCount += 1

                if self.__votesCount > (len(self.__nodes) + 1) / 2:
                    self.__onBecomeLeader()

        if self.__raftState == _RAFT_STATE.LEADER:
            if message['type'] == 'next_node_idx':
                reset = message['reset']
                nextNodeIdx = message['next_node_idx']

                currentNodeIdx = nextNodeIdx - 1
                if reset:
                    self.__raftNextIndex[nodeAddr] = nextNodeIdx
                self.__raftMatchIndex[nodeAddr] = currentNodeIdx

    def __sendNextNodeIdx(self, nodeAddr, reset=False):
        self.__send(nodeAddr, {
            'type': 'next_node_idx',
            'next_node_idx': self.__getCurrentLogIndex() + 1,
            'reset': reset,
        })

    def __generateRaftTimeout(self):
        minTimeout = self.__conf.raftMinTimeout
        maxTimeout = self.__conf.raftMaxTimeout
        return minTimeout + (maxTimeout - minTimeout) * random.random()

    def __bind(self):
        self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.__conf.sendBufferSize)
        self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.__conf.recvBufferSize)
        self.__socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
        self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.__socket.setblocking(0)
        host, port = self.__selfNodeAddr.split(':')
        self.__socket.bind((host, int(port)))
        self.__socket.listen(5)
        self.__poller.subscribe(self.__socket.fileno(),
                                self.__onNewConnection,
                                POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.ERROR)

    def __onNewConnection(self, localDescr, event):
        if event & POLL_EVENT_TYPE.READ:
            try:
                sock, addr = self.__socket.accept()
                sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.__conf.sendBufferSize)
                sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.__conf.recvBufferSize)
                sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
                sock.setblocking(0)
                conn = Connection(socket=sock, timeout=self.__conf.connectionTimeout)
                descr = conn.fileno()
                self.__unknownConnections[descr] = conn
                self.__poller.subscribe(descr,
                                        self.__processUnknownConnections,
                                        POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.ERROR)
            except socket.error as e:
                if e.errno != socket.errno.EAGAIN:
                    self.__isInitialized = False
                    LOG_WARNING('Error in main socket:' + str(e))

        if event & POLL_EVENT_TYPE.ERROR:
            self.__isInitialized = False
            LOG_WARNING('Error in main socket')

    def __getCurrentLogIndex(self):
        return self.__raftLog[-1][1]

    def __getCurrentLogTerm(self):
        return self.__raftLog[-1][2]

    def __getPrevLogIndexTerm(self, nextNodeIndex):
        prevIndex = nextNodeIndex - 1
        entries = self.__getEntries(prevIndex, 1)
        if entries:
            return prevIndex, entries[0][2]
        return None, None

    def __getEntries(self, fromIDx, count=None):
        firstEntryIDx = self.__raftLog[0][1]
        if fromIDx is None or fromIDx < firstEntryIDx:
            return []
        diff = fromIDx - firstEntryIDx
        if count is None:
            return self.__raftLog[diff:]
        return self.__raftLog[diff:diff + count]

    def _isLeader(self):
        return self.__raftState == _RAFT_STATE.LEADER

    def _getLeader(self):
        return self.__raftLeader

    def _getRaftLogSize(self):
        return len(self.__raftLog)

    def __deleteEntriesFrom(self, fromIDx):
        firstEntryIDx = self.__raftLog[0][1]
        diff = fromIDx - firstEntryIDx
        if diff < 0:
            return
        self.__raftLog = self.__raftLog[:diff]

    def __deleteEntriesTo(self, toIDx):
        firstEntryIDx = self.__raftLog[0][1]
        diff = toIDx - firstEntryIDx
        if diff < 0:
            return
        self.__raftLog = self.__raftLog[diff:]

    def __onBecomeLeader(self):
        self.__raftLeader = self.__selfNodeAddr
        self.__raftState = _RAFT_STATE.LEADER

        for node in self.__nodes:
            nodeAddr = node.getAddress()
            self.__raftNextIndex[nodeAddr] = self.__getCurrentLogIndex() + 1
            self.__raftMatchIndex[nodeAddr] = 0

        self.__sendAppendEntries()

    def __onLeaderChanged(self):
        for id in sorted(self.__commandsWaitingReply):
            self.__commandsWaitingReply[id](None, FAIL_REASON.LEADER_CHANGED)
        self.__commandsWaitingReply = {}

    def __sendAppendEntries(self):
        self.__newAppendEntriesTime = time.time() + self.__conf.appendEntriesPeriod

        startTime = time.time()

        for node in self.__nodes:
            nodeAddr = node.getAddress()

            if not node.isConnected():
                self.__serializer.cancelTransmisstion(nodeAddr)
                continue

            sendSingle = True
            sendingSerialized = False
            nextNodeIndex = self.__raftNextIndex[nodeAddr]

            while nextNodeIndex <= self.__getCurrentLogIndex() or sendSingle or sendingSerialized:
                if nextNodeIndex >= self.__raftLog[0][1]:
                    prevLogIdx, prevLogTerm = self.__getPrevLogIndexTerm(nextNodeIndex)
                    entries = []
                    if nextNodeIndex <= self.__getCurrentLogIndex():
                        entries = self.__getEntries(nextNodeIndex, self.__conf.appendEntriesBatchSize)
                        self.__raftNextIndex[nodeAddr] = entries[-1][1] + 1

                    message = {
                        'type': 'append_entries',
                        'term': self.__raftCurrentTerm,
                        'commit_index': self.__raftCommitIndex,
                        'entries': entries,
                        'prevLogIdx': prevLogIdx,
                        'prevLogTerm': prevLogTerm,
                    }
                    node.send(message)
                else:
                    transmissionData = self.__serializer.getTransmissionData(nodeAddr)
                    message = {
                        'type': 'append_entries',
                        'term': self.__raftCurrentTerm,
                        'commit_index': self.__raftCommitIndex,
                        'serialized': transmissionData,
                    }
                    node.send(message)
                    if transmissionData is not None:
                        isLast = transmissionData[2]
                        if isLast:
                            self.__raftNextIndex[nodeAddr] = self.__raftLog[0][1]
                            sendingSerialized = False
                        else:
                            sendingSerialized = True
                    else:
                        sendingSerialized = False

                nextNodeIndex = self.__raftNextIndex[nodeAddr]

                sendSingle = False

                delta = time.time() - startTime
                if delta > self.__conf.appendEntriesPeriod:
                    break

    def __send(self, nodeAddr, message):
        for node in self.__nodes:
            if node.getAddress() == nodeAddr:
                node.send(message)
                break

    def __processUnknownConnections(self, descr, event):
        conn = self.__unknownConnections[descr]
        partnerNode = None
        remove = False
        if event & POLL_EVENT_TYPE.READ:
            conn.read()
            nodeAddr = conn.getMessage()
            if nodeAddr is not None:
                for node in self.__nodes:
                    if node.getAddress() == nodeAddr:
                        partnerNode = node
                        break
                else:
                    remove = True

        if event & POLL_EVENT_TYPE.ERROR:
            remove = True

        if remove or conn.isDisconnected():
            self.__unknownConnections.pop(descr)
            self.__poller.unsubscribe(descr)
            conn.close()
            return

        if partnerNode is not None:
            self.__unknownConnections.pop(descr)
            assert conn.fileno() is not None
            partnerNode.onPartnerConnected(conn)

    def _getSelfNodeAddr(self):
        return self.__selfNodeAddr

    def _getConf(self):
        return self.__conf

    def _getResolver(self):
        return self.__resolver

    def _getPoller(self):
        return self.__poller

    def __tryLogCompaction(self):
        currTime = time.time()
        serializeState, serializeID = self.__serializer.checkSerializing()

        if serializeState == SERIALIZER_STATE.SUCCESS:
            self.__lastSerializedTime = currTime
            self.__deleteEntriesTo(serializeID)

        if serializeState == SERIALIZER_STATE.FAILED:
            LOG_WARNING("Failed to store full dump")

        if serializeState != SERIALIZER_STATE.NOT_SERIALIZING:
            return

        if len(self.__raftLog) <= self.__conf.logCompactionMinEntries and \
                currTime - self.__lastSerializedTime <= self.__conf.logCompactionMinTime and\
                not self.__forceLogCompaction:
            return

        self.__forceLogCompaction = False

        lastAppliedEntries = self.__getEntries(self.__raftLastApplied - 1, 2)
        if not lastAppliedEntries:
            return

        data = dict([(k, self.__dict__[k]) for k in self.__dict__.keys() if k not in self.__properies])
        self.__serializer.serialize((data, lastAppliedEntries[1], lastAppliedEntries[0]), lastAppliedEntries[1][1])

    def __loadDumpFile(self):
        try:
            data = self.__serializer.deserialize()
            for k, v in data[0].iteritems():
                self.__dict__[k] = v
            self.__raftLog = [data[2], data[1]]
            self.__raftLastApplied = data[1][1]
        except:
            LOG_WARNING('Failed to load full dump')
            LOG_CURRENT_EXCEPTION()
Beispiel #60
0
 def serialize_to_string(self):
     serializer = Serializer()
     return serializer.serialize(self)