def removeObject(self, networkObject): """ Remove the indicated object and any dependent objects from the network and any displays. >>> network.removeObject(network.findNeuron('AVAL')) """ if networkObject in self.objects: # Determine all of the objects that will need to be removed objectsToRemove = set([networkObject]) objectsToInspect = [networkObject] while any(objectsToInspect): objectToInspect = objectsToInspect.pop(0) dependentObjects = set(objectToInspect.dependentObjects()) objectsToInspect += list(dependentObjects.difference(objectsToRemove)) objectsToRemove = objectsToRemove.union(dependentObjects) # Remove all of the objects. for objectToRemove in objectsToRemove: objectToRemove.disconnectFromNetwork() self.objects.remove(objectToRemove) del self.idDict[objectToRemove.networkId] # Keep the NetworkX graph in sync. if objectToRemove.networkId in self.graph: self.graph.remove_node(objectToRemove.networkId) # Let anyone who cares know that the network was changed. dispatcher.send('deletion', self, affectedObjects = objectsToRemove)
def _run(self): for event in self.schedule: if self.alive: if event['time'] not in [ON_LAST_EVENT_COMPLETION, ORIGIN]: tokens = self.regex.match(event['time']) if tokens: tokens_dict = tokens.groupdict() parameters = {} for key, value in tokens_dict.items(): if value: parameters[key] = int(value) else: parameters[key] = 0 sleep_s = datetime.timedelta(**parameters) while self.alive and sleep_s.total_seconds() > 0: sleep_step = min(self.step, sleep_s) time.sleep(sleep_step.total_seconds()) sleep_s -= sleep_step self.remaining -= sleep_step else: self.stop() break if 'parameters' in event.keys(): dispatcher.send(self.actions[event['action']], sender=self, **event['parameters']) else: dispatcher.send(self.actions[event['action']], sender=self) self.last_time = datetime.datetime.now() else: # terminate the thread break # end of the schedule self.remaining = datetime.timedelta(seconds=0) if self.alive: self.alive = False self.on_complete_callback()
def __protoAppend(self, x, appendFunc, popfunc): was_empty = self.__len__() == 0 appendFunc(x) not_empty_now = self.__len__() != 0 if was_empty and not_empty_now: dispatcher.send(signal=self.NoLongerEmptySignal, sender=self) self.__shrinkIfTooBig(popfunc)
def newSlave(self, ip, signal, sender): if not self.__driver.addSlave(ip): self.replyMsg = 'Slave: %s added' % (ip) else: self.replyMsg = 'Slave: %s already exists' % (ip) dispatcher.send('ERROR', 'dwgetd', 'Slave: %s already exists' % (ip))
def main(): logger = Logger('dwgetds', 2000, '', True) slaveMgr = slaveManager() logger.addStuff(slaveMgr) tThread = talkerThread(slaveMgr, '') tThread.start() dispatcher.send('DEBUG', 'dwgetds', 'Started dwget slave daemon.')
def __protoPop(self, popFunc): was_not_empty = self.__len__() != 0 return_value = popFunc() is_empty_now = self.__len__() == 0 if was_not_empty and is_empty_now: dispatcher.send(signal=self.EmptySignal, sender=self) return return_value
def check_ip(): """ Before every request, check if the IP address is allowed. """ if not self.mainMenu.agents.is_ip_allowed(request.remote_addr): dispatcher.send("[!] %s on the blacklist/not on the whitelist requested resource" % (request.remote_addr), sender="listeners/http") return make_response(self.default_response(), 200)
def delSlave(self, ip, signal, sender): if not self.__driver.removeSlave(ip): self.replyMsg = 'Slave %s deleted form slave list' % (ip) else: self.replyMsg = 'No such slave: %s' % (ip) dispatcher.send('ERROR', 'dwgetd', 'Can\'t delete: there is no slave with ip: %s' % (ip))
def __changeWrapper(self, function, *args, **kwargs): before_length = self.__len__() return_value = function(*args, **kwargs) after_length = self.__len__() if before_length != after_length: dispatcher.send(signal=self.ChangedSignal, sender=self) return return_value
def notification(node_id, network=None): """Fire a notification.""" dispatcher.send( MockNetwork.SIGNAL_NOTIFICATION, args={'nodeId': node_id}, network=network )
def BufferConfigDialog(self): """Displays a dialog in which you can set buffer speciffic properties.""" if not hasattr(self.session, 'buffers') or len(self.session.buffers)==0: return output.speak(_("No buffers in this session."), 1) frame = self.session.frame #FIXME! try: if self.session.kind == "twitter": new = gui.configuration.twitter.BufferConfigDialog(self.session, frame, wx.ID_ANY, title=_("%s Buffer Configuration") % self.session.name) elif self.session.kind == "solona": new = core.gui.solona.buffers.BufferConfigDialog(self.session, frame, wx.ID_ANY, title=_("%s Buffer Configuration") % self.session.name) elif self.session.kind == "facebook": new = core.gui.facebook.buffers.BufferConfigDialog(self.session, frame, wx.ID_ANY, title=_("%s Buffer Configuration") % self.session.name) elif self.session.kind == "bing": new = core.gui.bing.buffers.BufferConfigDialog(self.session, frame, wx.ID_ANY, title=_("%s Buffer Configuration") % self.session.name) elif self.session.kind == "rss": new = core.gui.rss.buffers.BufferConfigDialog(self.session, frame, wx.ID_ANY, title=_("%s Buffer Configuration") % self.session.name) else: new = ThrowException except: return logging.exception("%s: Failure in buffer configuration in session" % self.session.name) new.SetDefaultValues() if new.ShowModal() == wx.ID_OK: new.SetNewConfigValues() output.speak(_("Configuration saved."), 1) dispatcher.send(sender=self.session, signal=signals.config_saved) else: output.speak(_("Configuration canceled."), True) self.session.frame.Show(False) new.Destroy()
def create(self, properties, expand=None, password_format=None, registration_workflow_enabled=None, **params): """If password_format is specified, account will be created using existing password hash. If registration_workflow_enabled is specified (and set to false), no workflow emails will be sent when this account is created. http://docs.stormpath.com/python/product-guide/#create-an-account-with-an-existing-password-hash """ data, params = self._prepare_for_create(properties, expand, **params) create_path = self._get_create_path() if password_format: create_path += '?passwordFormat=' + password_format if registration_workflow_enabled is False: create_path += '?registrationWorkflowEnabled=false' if not ('passwordFormat' in create_path) else '®istrationWorkflowEnabled=false' created = self.resource_class( self._client, properties=self._store.create_resource( create_path, data, params=params ) ) dispatcher.send(signal=SIGNAL_RESOURCE_CREATED, sender=self.resource_class, data=data, params=params) return created
def configure(filename): """ Configures the gateway. A gateway can be re-configured during runtime. This will issue a 'configuration-update' event carrying the new configuration as named parameters. The following configuration parameters are valid name The gateway process name workdir Path to the working directory logdir daemonize If true start as a UNIX daemon """ global logger, _log_file_handler # Execute config file if filename is not None: try: with open(filename) as f: code = compile(f.read(), filename, 'exec') exec(code, globals()) except FileNotFoundError as ex: logger.error("Cannot find file: {}".format(ex.filename)) # Adding a file _handler logger.handlers.clear() logfile = os.path.join(logging_directory, _name + '.log') _log_file_handler = logging.FileHandler(logfile) logger.addHandler(_log_file_handler) # Inform others logger.info("{} configured from {}".format(_name, filename)) dispatcher.send(CONFIG_UPDATE_SIGNAL, _name)
def workdir_prepare_checkout(revision, folders): """ Returns an absolute path to a directory containing a git checkout of the given treeish revision. Registers a hook with atexit to delete the returned directory. Dispatches signals to pre_workdir_checkout and post_workdir_checkout. """ dispatcher.send( signal=signals.pre_workdir_prepare_checkout, sender=workdir_prepare_checkout, revision=revision, ) workdir = tempfile.mkdtemp() atexit.register(shutil.rmtree, workdir, True) with lcd(get_project_root_directory()): local("git archive %s %s | tar -x -C %s" % (revision, " ".join(folders), workdir)) dispatcher.send( signal=signals.post_workdir_prepare_checkout, sender=workdir_prepare_checkout, revision=revision, workdir=workdir, ) return workdir
def register_buffer (self, name, type, set_focus=True, announce=True, prelaunch_message="", postlaunch_message="", prevent_duplicates=True, *args, **kwargs): """Registers buffer in session buffer list.""" logging.debug("%s: Registering %s" % (self, name)) if self.get_buffer_by_name(name) != None and prevent_duplicates: logging.debug("Buffer %s already exists." % name) num = self.get_buffer_index(self.get_buffer_by_name(name)) if set_focus: self.set_buffer(self.get_navigation_index(buf_index=num)) if announce: self.announce_buffer(interrupt=False) return num prelaunch_message and output.speak(prelaunch_message, True) try: new = type(name=name, session=self, *args, **kwargs) except: logging.exception("Unable to initialize an instance of buffer.%s " % type) return None if self.buffer_exists(new): logging.warning("%s: Prevented duplicate buffer registration of buffer %s." % (self.name, name)) return None if new is None: #Something strange is going on. logging.debug("Attempted new buffer creation but got back a None object. Aborting.") return None dispatcher.send(sender=self, signal=signals.buffer_created, buffer=new) num = self.add_buffer(new) if set_focus and num in self.nav_order: self.set_buffer(self.get_navigation_index(buf_index=num)) postlaunch_message and output.speak(postlaunch_message, True) if announce: self.announce_buffer(interrupt=False) return num
def __init__(self): message = Message(SuccessParagraph('IF2 was initialised')) dispatcher.send( signal=DYNAMIC_MESSAGE_SIGNAL, sender=self, message=message) self.count = 0
def loadLines(self): if self.lineType == 'Limit': _ret = QtGui.QFileDialog.getOpenFileName(self, "Open TLM3", "", "TLM3 (*.Tlm3)") else: _ret = QtGui.QFileDialog.getOpenFileName(self, "Open TKR3", "", "TKR3 (*.Tkr3)") if (_ret == ""): return self.tkrSource = _ret self.ui.lineEdit.setText(self.tkrSource) _lineSource = Tpl3Lines(self.tkrSource,0) if self.lineType == "Corr": _ret = _lineSource.readCorrIDs() if self.lineType == "Limit": _ret = _lineSource.readLimitIDs() if _ret == False: return for _id in _lineSource.lineIDs: _lineSource.line_id = _id _ret = _lineSource.read() if _ret == False: return _lineDestination = copy.copy(_lineSource) _lineDestination.line_id = 0 self.ticket.data = _lineDestination dispatcher.send(self.signals.WB_GET_LINE_EXISTS, dispatcher.Anonymous,self.ticket) _exists = 'yes' if _lineDestination.line_id == 0: _exists = 'no' self.tm.addData([_lineSource.line_id, _lineSource.type, _lineSource.title, _lineSource.version, _lineSource.date,_exists])
def _on_move(self, uuid, row, col): """Handles the AiMove response.""" self.log.debug('_on_move: UUID = {uuid}, self.uuid={uuid2}', uuid=uuid, uuid2=self.uuid) self.log.debug('_on_move: type(uuid) = {t}, type(self.uuid) = {t2}', t=type(uuid), t2=type(self.uuid)) if str(self.uuid) != str(uuid): return i = int(row) j = int(col) self.log.debug('_on_move: game {uuid} row {row} col {col}', uuid=uuid, row=i, col=j) if (row == -1) or (col == -1): self.log.debug('It seems that we are done. Game was over !') # stops the AI process self._sendQuitCmd() return dispatcher.send(Events.aiResponse, uuid=self.uuid, row=i, col=j)
def add_agent_task(self, sessionID, taskName, task=""): """ Add a task to the specified agent's buffer. """ agentName = sessionID # see if we were passed a name instead of an ID nameid = self.get_agent_id(sessionID) if nameid : sessionID = nameid if sessionID not in self.agents: print helpers.color("[!] Agent " + str(agentName) + " not active.") else: if sessionID: dispatcher.send("[*] Tasked " + str(sessionID) + " to run " + str(taskName), sender="Agents") self.agents[sessionID][1].append([taskName, task]) # write out the last tasked script to "LastTask.ps1" if in debug mode if self.args and self.args.debug: f = open(self.installPath + '/LastTask.ps1', 'w') f.write(task) f.close() # report the agent tasking in the reporting database cur = self.conn.cursor() cur.execute("INSERT INTO reporting (name,event_type,message,time_stamp) VALUES (?,?,?,?)", (sessionID,"task",taskName + " - " + task[0:30],helpers.get_datetime())) cur.close()
def setup_folders(): if not (test_token(token['access_token'])): raise ValueError("Could not set up folders, access token invalid") base_object = s.get("%s/drive/root:/%s" % (base_url, base_folder)) if not (base_object.status_code == 200): print helpers.color("[*] Creating %s folder" % base_folder) params = {'@microsoft.graph.conflictBehavior': 'rename', 'folder': {}, 'name': base_folder} base_object = s.post("%s/drive/items/root/children" % base_url, json=params) else: message = "[*] {} folder already exists".format(base_folder) signal = json.dumps({ 'print' : True, 'message': message }) dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name)) for item in [staging_folder, taskings_folder, results_folder]: item_object = s.get("%s/drive/root:/%s/%s" % (base_url, base_folder, item)) if not (item_object.status_code == 200): print helpers.color("[*] Creating %s/%s folder" % (base_folder, item)) params = {'@microsoft.graph.conflictBehavior': 'rename', 'folder': {}, 'name': item} item_object = s.post("%s/drive/items/%s/children" % (base_url, base_object.json()['id']), json=params) else: message = "[*] {}/{} already exists".format(base_folder, item) signal = json.dumps({ 'print' : True, 'message': message }) dispatcher.send(signal, sender="listeners/onedrive/{}".format(listener_name))
def __init__(self, name, style, pipe=None, node=None): super().__init__(name, style, pipe, node) self._branch = node dispatcher.send(self._CONNECTOR_CREATED, sender=self, node=node)
def send_command(self, job): obj = job[1] command_sent = job[3] self.set_status(obj, "Connecting") try: telnet_session = self.establish_telnet(obj.ip_address) telnet_session.read_until('>', int(job[2])) #self.get_connection(obj, telnet_session, int(job[2])) command = command_sent + " \r" #print command telnet_session.write(str(command)) telnet_session.read_until('Sending', int(job[2])) result_raw = telnet_session.read_until('>', int(job[2])) #print result_raw.split() if result_raw.split()[0] != 'command:': raise Exception('Command not sent') else: dispatcher.send(signal="send_command result", sender=(('Sending ' + str(result_raw)[:-1]))) telnet_session.close() self.set_status(obj, "Success") self.notify_send_command_window(obj) except Exception as error: self.error_processing(obj, error)
def create(self, properties, expand=None, **params): resource_attrs = self.resource_class.get_resource_attributes() data = {} for k, v in properties.items(): if isinstance(v, dict) and k in resource_attrs: v = self._wrap_resource_attr(resource_attrs[k], v) if k in self.resource_class.writable_attrs: data[self.to_camel_case(k)] = self._sanitize_property(v) params = {self.to_camel_case(k): v for k, v in params.items()} if expand: params.update({'expand': expand.get_params()}) created = self.resource_class( self._client, properties=self._store.create_resource( self._get_create_path(), data, params=params ) ) dispatcher.send( signal=SIGNAL_RESOURCE_CREATED, sender=self.resource_class, data=data, params=params) return created
def node_changed(node): """Fire a node changed.""" dispatcher.send( MockNetwork.SIGNAL_NODE, node=node, network=node._network )
def poll_for_changes(self): while not self._stop.isSet(): errors = {} responses = {} for ci_server in self.ci_servers: name = ci_server['name'] url = ci_server['url'] username = ci_server.get('username') token = ci_server.get('token') auth = None if username is not None and token is not None: auth = requests.auth.HTTPBasicAuth(username, token) try: response = requests.get('{}/cc.xml'.format(url), auth=auth) if response.status_code == 200: responses[name] = response else: raise Exception('ci server {} returned {}: {}'.format(url, response, response.text)) except Exception as ex: logger.warning(ex) errors[name] = ex dispatcher.send(signal="CI_UPDATE", sender=self, responses=responses, errors=errors) time.sleep(self._poll_rate)
def on_ok(self, sender): """ This callback is called when one task reaches status S_OK. It executes on_all_ok when all task in self have reached S_OK. """ logger.debug("in on_ok with sender %s" % sender) if self.all_ok: if self.finalized: return AttrDict(returncode=0, message="Workflow has been already finalized") else: # Set finalized here, because on_all_ok might change it (e.g. Relax + EOS in a single workflow) self._finalized = True try: results = AttrDict(**self.on_all_ok()) except: self._finalized = False raise # Signal to possible observers that the `Workflow` reached S_OK logger.info("Workflow %s is finalized and broadcasts signal S_OK" % str(self)) logger.info("Workflow %s status = %s" % (str(self), self.status)) if self._finalized: dispatcher.send(signal=self.S_OK, sender=self) return results return AttrDict(returncode=1, message="Not all tasks are OK!")
def run(self): self._initializeSession() ctx = ServiceContext() queue = ctx.getQueueService() while(True): event = queue.get() dispatcher.send(signal=event.eventName,sender=event,session=self.session)
def save_module_file(self, sessionID, path, data): """ Save a module output file to the appropriate path. """ # see if we were passed a name instead of an ID nameid = self.get_agent_name(sessionID) if nameid : sessionID = nameid parts = path.split("/") # construct the appropriate save path savePath = self.installPath + "/downloads/"+str(sessionID)+"/" + "/".join(parts[0:-1]) filename = parts[-1] # make the recursive directory structure if it doesn't already exist if not os.path.exists(savePath): os.makedirs(savePath) # save the file out f = open(savePath+"/"+filename, 'w') f.write(data) f.close() # notify everyone that the file was downloaded dispatcher.send("[+] File "+path+" from "+str(sessionID)+" saved", sender="Agents") return "/downloads/"+str(sessionID)+"/" + "/".join(parts[0:-1]) + "/" + filename
def on_ok(self, event): """send fake DHCP""" ip = self.fake_ip_txt.GetValue() hostname = 'Fake DHCP host' mac_address = self.mac_address_txt.GetValue() dispatcher.send(signal="Incoming Packet", sender=(hostname, mac_address, ip))
def shutdown_listener(self, listenerId): """ Shut down the server associated with a listenerId/name, but DON'T delete it from the database. If the listener is a pivot, task the associated agent to kill the redirector. """ try: # get the listener information [ID,name,host,port,cert_path,staging_key,default_delay,default_jitter,default_profile,kill_date,working_hours,listener_type,redirect_target,default_lost_limit] = self.get_listener(listenerId) listenerId = int(ID) if listenerId in self.listeners: # can't shut down hop, foreign, or meter listeners if listener_type == "hop" or listener_type == "foreign" or listener_type == "meter": pass # if this listener is a pivot, task the associated agent to shut it down elif listener_type == "pivot": print helpers.color("[*] Tasking pivot listener to shut down on agent " + name) killCmd = "netsh interface portproxy reset" self.agents.add_agent_task(name, "TASK_SHELL", killCmd) else: # otherwise get the server object associated with this listener and shut it down self.listeners[listenerId].shutdown() # remove the listener object from the internal cache del self.listeners[listenerId] except Exception as e: dispatcher.send("[!] Error shutting down listener " + str(listenerId), sender="Listeners")
def run(self): """ Runs an entire Training epoch on the data provided by the DataLoader passed as argument during initialization. PyDispatch events will be generated as a result of the computation so that model saving, summaries and logs can be generated via associated hooks. :return: None """ logging.info('INFO: Training epoch {}'.format(self.epoch)) self.model.train() with self.epoch_aggregator as ea: for i, batch in enumerate(self.data_loader): if self.gpu: for key in batch.keys(): if isinstance(batch[key], Tensor): batch[key] = batch[key].cuda() logging.debug('DEBUG: Training epoch {}, batch {}'.format(self.epoch, i)) output_dictionary = self.get_output_dictionary(batch) dispatcher.send( message=output_dictionary, signal=EISEN_END_BATCH_EVENT, sender=self.id ) ea(output_dictionary) dispatcher.send( message=ea.epoch_data, signal=EISEN_END_EPOCH_EVENT, sender=self.id ) self.epoch += 1
def _forward(self, sender, signal, destination, destinationField, event=None, value=None, **arguments): """Do the low-level forwarding of the value to a target field""" if event is None: from vrml import event as eventmodule event = eventmodule.Event() signal, sourceField = signal if signal == 'del': value = sourceField.fget(sender) if destination and destinationField: destinationField = protofunctions.getField(destination, destinationField) if event and hasattr(event, "visited"): if event.visited((destination, destinationField), ): ### Short-circuit before a cycle is created... return event.visited((destination, destinationField), 1) if isinstance(destinationField, field.Field): try: value = destinationField.fset(destination, value, notify=0) except (ValueError, TypeError): traceback.print_exc() else: try: value = destinationField.__set__(destination, value) except (ValueError, TypeError): traceback.print_exc() dispatcher.send( signal=('route', destinationField), sender=destination, value=value, event=event, )
def stage_update(self): if self.finish_wave and len(self.game_state.enemies) == 0: self.finish_wave = False self.start_stage_time = datetime.datetime.now() pass_time = datetime.datetime.now() - self.start_stage_time if pass_time > datetime.timedelta(seconds=6) and len( self.game_state.enemies) == 0: if len(self.stage_data.road_map) == 0: logger.info("Game is Over") dispatcher.send(signal=SignalMapper.SCREEN_TYPE_CHANGE, event=ScreenType.MENU) return current_enemies = self.stage_data.road_map.pop(0) for enemy_dict in current_enemies: for key in enemy_dict: [ self.add_enemy(enemy_dict[key]) for num in range(int(key)) ] self.finish_wave = True
def _init_io(): dispatcher.connect(_handle_io_event, signal=Constant.SIGNAL_GPIO, sender=dispatcher.Any) # get list of input gpio ports and communicate them to gpio modules for proper port setup as "IN" port_list = [] local_sensors = m.IOSensor.find({m.IOSensor.host_name: Constant.HOST_NAME}) for sensor in local_sensors: gpio_pin = m.GpioPin() gpio_pin.host_name = Constant.HOST_NAME gpio_pin.contact_type = sensor.sensor_type if sensor.relay_type == Constant.GPIO_PIN_TYPE_PI_FACE_SPI: gpio_pin.board_index, gpio_pin.pin_direction, gpio_pin.pin_index_bcm = io_common.decode_piface_pin( sensor.io_code) else: gpio_pin.pin_index_bcm = int(sensor.io_code) gpio_pin.pin_type = sensor.relay_type gpio_pin.pin_code = sensor.io_code port_list.append(gpio_pin) L.l.info('Init {} IO sensor input ports'.format(len(port_list))) dispatcher.send(signal=Constant.SIGNAL_GPIO_INPUT_PORT_LIST, gpio_pin_list=port_list)
def received_batch_transfer( self, tx_hash: str, sender: str, amount: int, closure_time: int) -> None: expected = Income.select().where( Income.payer_address == sender, Income.accepted_ts > 0, Income.accepted_ts <= closure_time, Income.transaction.is_null(), Income.settled_ts.is_null()) expected_value = sum([e.value_expected for e in expected]) if expected_value == 0: # Probably already handled event return if expected_value != amount: logger.warning( 'Batch transfer amount does not match, expected %r, got %r', expected_value / denoms.ether, amount / denoms.ether) amount_left = amount for e in expected: received = min(amount_left, e.value_expected) e.value_received += received amount_left -= received e.transaction = tx_hash[2:] e.save() if e.value_expected == 0: dispatcher.send( signal='golem.income', event='confirmed', node_id=e.sender_node, )
async def send_async(self, signal, *args, **kwargs) -> List[Tuple]: """ receiver都是协程的情况下使用 :param signal: :param kwargs: :return: list of tuple pairs [(receiver, response), ... ] """ async def _process_data(data_: List[Tuple]): ret = await asyncio.gather(*map(lambda x: x[1], data_)) return list(zip(map(lambda x: x[0], data_), ret)) data = dispatcher.send(signal, self.sender, *args, **kwargs) return await _process_data(data)
def OnGo(self, event): self.gobutton.Disable() if self.modebox.GetSelection() == 0: offset = int(self.offset.GetValue(), 16) data = self.readfpicker.GetPath() if self.parent.ecuinfo["state"] != ECUSTATE.READ: self.bootwait = True self.statusbar.SetStatusText("Turn off ECU!", 0) dispatcher.send(signal="ReadPanel", sender=self, data=data, offset=offset) else: if self.htfoffset != None: offset = int(self.htfoffset, 16) else: offset = int(self.offset.GetValue(), 16) self.gobutton.Disable() dispatcher.send(signal="WritePanel", sender=self, data=self.byts, offset=offset)
def update(self, notif): # update state StateElem.update(self) if len(self.data) == 0: self.data.append({}) self.data[0]['isDAGroot'] = notif.isDAGroot self.data[0]['isBridge'] = notif.isBridge if 'my16bID' not in self.data[0]: self.data[0]['my16bID'] = typeAddr.typeAddr() self.data[0]['my16bID'].update(notif.my16bID_type, notif.my16bID_bodyH, notif.my16bID_bodyL) if 'my64bID' not in self.data[0]: self.data[0]['my64bID'] = typeAddr.typeAddr() self.data[0]['my64bID'].update(notif.my64bID_type, notif.my64bID_bodyH, notif.my64bID_bodyL) if 'myPANID' not in self.data[0]: self.data[0]['myPANID'] = typeAddr.typeAddr() self.data[0]['myPANID'].update(notif.myPANID_type, notif.myPANID_bodyH, notif.myPANID_bodyL) if 'myPrefix' not in self.data[0]: self.data[0]['myPrefix'] = typeAddr.typeAddr() self.data[0]['myPrefix'].update(notif.myPrefix_type, notif.myPrefix_bodyH, notif.myPrefix_bodyL) # announce information about the DAG root to the eventBus if self.data[0]['isDAGroot'] == 1: # dispatch dispatcher.send( signal='infoDagRoot', sender='StateIdManager', data={ 'ip': self.moteConnector.moteProbeIp, 'tcpPort': self.moteConnector.moteProbeTcpPort, 'eui64': self.data[0]['my64bID'].addr, }, )
def parse_result_packet(packet, offset=0): """ Parse a result packet- [2 bytes] - type [2 bytes] - total # of packets [2 bytes] - packet # [2 bytes] - task/result ID [4 bytes] - length [X...] - result data +------+--------------------+----------+---------+--------+-----------+ | Type | total # of packets | packet # | task ID | Length | task data | +------+--------------------+--------------------+--------+-----------+ | 2 | 2 | 2 | 2 | 4 | <Length> | +------+--------------------+----------+---------+--------+-----------+ Returns a tuple with (responseName, length, data, remainingData) Returns a tuple with (responseName, totalPackets, packetNum, taskID, length, data, remainingData) """ try: responseID = struct.unpack('=H', packet[0 + offset:2 + offset])[0] totalPacket = struct.unpack('=H', packet[2 + offset:4 + offset])[0] packetNum = struct.unpack('=H', packet[4 + offset:6 + offset])[0] taskID = struct.unpack('=H', packet[6 + offset:8 + offset])[0] length = struct.unpack('=L', packet[8 + offset:12 + offset])[0] if length != '0': data = base64.b64decode(packet[12 + offset:12 + offset + length]) else: data = None remainingData = packet[12 + offset + length:] return (PACKET_IDS[responseID], totalPacket, packetNum, taskID, length, data, remainingData) except Exception as e: dispatcher.send("[*] parse_result_packet(): exception: %s" % (e), sender='Packets') return (None, None, None, None, None, None, None)
def agent_rename(old_name, new_name): """ Helper function for reporting agent name changes. old_name - agent's old name new_name - what the agent is being renamed to """ # make sure to include new_name in there so it will persist if the agent # is renamed again - that way we can still trace the trail back if needed message = "[*] Agent {} has been renamed to {}".format(old_name, new_name) signal = json.dumps({ 'print': False, 'message': message, 'old_name': old_name, 'new_name': new_name, 'event_type': 'rename' }) # signal twice, once for each name (that way, if you search by sender, # the last thing in the old agent and the first thing in the new is that # it has been renamed) dispatcher.send(signal, sender="agents/{}".format(old_name)) dispatcher.send(signal, sender="agents/{}".format(new_name))
def run(self): while True: self.threshold -= self.descrate rawdata = self.queue.get() data = bpm.applylowpass(rawdata, self.filter['num'], self.filter['denom']) beat = bpm.beatdetection(data, self.threshold) if beat != False: beattime = time.clock() self.beats.append(beattime - self.timelast) self.timelast = beattime beatdata = [self.beats] self.message_peers(beatdata) self.threshold = beat LOGGER.info('BEAT:' + str(self.threshold)) dispatcher.send(signal='beats', sender=self.channel_id, data=True) else: dispatcher.send(signal='beats', sender=self.channel_id, data=False)
def print_msg(class_name, method, message, level='DEBUG'): if level.lower() != 'error' and Commons.quiet: return log_level = '[' + level + ']' log_message = '{:7s} {:11s} {:35s} {!s:s}'.format(log_level, class_name, method, message) try: print(log_message) Logger(log_message) except: print(log_message.encode('utf-8')) if level == 'ERROR': SIGNAL = 'publish-error-signal' sender = {} new_message = ''.join(str(v) for v in message) dispatcher.send(signal=SIGNAL, sender=sender, message=new_message, class_name=class_name, method_name=method)
def __init__(self, name, candles): """Initializes a new candles object :param name: channel name (i.e. 'candles_' + symbol) :param candles: a candles snapshot received from the exchange :raises WSException Keeps candles in a deque with size limited to MAX_CANDLES elements (just a safety). """ super(BitfinexCandles, self).__init__() self.name = name try: for candle in candles: # make all values float except for the timestamp. # Bitfinex will send rounded price as int (i.e., 92.0 as 92) for i in range(1,len(candle)): candle[i] = float(candle[i]) # swap columns 3->2 4->3 2->4 candle[2], candle[3] = candle[3], candle[2] candle[3], candle[4] = candle[4], candle[3] self.candles = deque(list(reversed(candles)), maxlen=self.MAX_CANDLES) except Exception as e: raise WSException("Error initializing candles channel {}: {}".format(self.name, e)) dispatcher.send(signal=self.name, sender='bitfinex', data=('snapshot', list(self.candles)))
def target(): if data[0] >> 7: # DAM MPDO return if can_id - 0x480 != data[0]: return idx = int.from_bytes(data[1:3], signed=False, byteorder='little') # Shall be 0x4E00 subIdx = data[3] # x: 1-32: Analog x, 33-64: Digital x - 32 unit = data[7] # Unit unitObj = units[unit] rawValue = data[4:6] if idx != 0x4E00: return if subIdx == 0 or subIdx > 64: return if subIdx <= 32: if subIdx in self._canAnalogOutputs: output = self._canAnalogOutputs[subIdx] output.update(rawValue) else: self._canAnalogOutputs[subIdx] = TACANAnalogOutput( self, subIdx, unitObj, rawValue) dispatcher.send(num=subIdx, signal="ADD_CAN-ANALOG-OUTPUT", sender=self) elif 33 <= subIdx <= 64: input_num = subIdx - 32 if input_num in self._canDigitalOutputs: output = self._canDigitalOutputs[input_num] output.update(rawValue) else: self._canDigitalOutputs[input_num] = TACANDigitalOutput( self, input_num, rawValue) dispatcher.send(num=input_num, signal="ADD_CAN-DIGITAL-OUTPUT", sender=self)
def _handle_order_update(self, msg): """Handles order updates received from a websocket :param msg order update message :returns None Processes received order updates and sends them to listeners via dispatcher. If the order was executed it generates trades update. """ order_id = int(msg['i']) timestamp = int(msg['T']) symbol = msg['s'] order_type = msg['o'].lower() side = msg['S'] price = float(msg['p']) amount = float(msg['q']) filled = float(msg['z']) total = price * amount percent_filled = '{:.2f}'.format(100 * filled / amount) status = msg['x'].lower() if status == 'trade': status = 'executed' is_active = msg['x'] == 'NEW' order_update = [ order_id, timestamp, symbol, order_type, side, price, amount, percent_filled, total ] trade_update = [ timestamp, symbol, order_type, side, price, amount, filled, total, status ] if is_active: # is it an update of existing order is_new_order = True for i, order in enumerate(self._orders): if order[0] == order_id: is_new_order = False self._orders[i] = order_update break if is_new_order: self._orders.append(order_update) dispatcher.send(signal='orders', sender='binance', data=list(reversed(self._orders))) else: # delete from orders for i, order in enumerate(self._orders): if order[0] == order_id: del self._orders[i] dispatcher.send(signal='orders', sender='binance', data=list(reversed(self._orders))) # add to trades self._trades.append(trade_update) print('Sending trade update') dispatcher.send(signal='user_trades', sender='binance', data=list(reversed(self._trades)))
def runsm(self): while self.__running: time.sleep(0.2) if self.timer_counter > 0: self.timer_counter -= 1 if self.timer_counter % 10000 == 0: dispatcher.send(message=" ", signal=SIGNALS.TERM_SHOWTIMER, sender=ENTITIES.RUNSM) else: # Execute @ states stack top if (self._states_stack.__len__()) == 0: self.__running = False else: # Do signal housekeeping if not self.__signal_queue.empty(): self.__signal = self.__signal_queue.get() self.push_cmd(self.__signal) # Now execute the next cmd on line state_index = self._states_stack.pop() payload = self.smstates.getPayload(state_index) states_to_load = self._states_lookup.get(state_index)( payload) if states_to_load != None: for lstate in states_to_load: statereturn = self.smstates.sindex(lstate) if statereturn != 2: logger.debug( 'State returned : retrun ={} / lstate={} return={}\n\r' .format(statereturn, lstate, states_to_load)) self._states_stack.append(statereturn) if self.timer_reload != 0: self.timer_counter = self.timer_reload
def doQuery(self, query, message_h, diff_h=None, end_h=None, s_scope=Scope.Week, author=""): dispatcher.connect(message_h, signal=MESSAGE, sender=dispatcher.Any) query_regex = self.make_query(query) if (diff_h == None): dispatcher.connect(message_h, signal=DIFF) else: dispatcher.connect(diff_h, signal=DIFF) if (end_h == None): dispatcher.connect(message_h, signal=END) else: dispatcher.connect(end_h, signal=END) scan_commits = [] kwargs = {} kwargs['after'] = make_after_date(s_scope) if author != "": kwargs['author'] = author logging.info("kwargs : %s", kwargs.__str__()) for commit in self.repo.iter_commits(**kwargs): scan_commits.append(commit) # check in head for commit in scan_commits: logging.debug("scaned %s", commit.message) m = query_regex.search(commit.message) if m: dispatcher.send(signal=MESSAGE, sender={ 'signal': MESSAGE, 'message': commit.message, 'commit': commit.hexsha }) prev_commit = None diffs = [] for commit in scan_commits: if prev_commit: diffs.append({ 'diff': commit.diff(prev_commit, create_patch=True), 'a_sha': commit.hexsha, 'b_sha': prev_commit.hexsha }) prev_commit = commit for diff in diffs: logging.debug("diff %s - %s", diff['a_sha'], diff['b_sha']) for patch in diff['diff']: b_src = patch.diff m = query_regex.search(b_src) if m: logging.debug(patch) dispatcher.send(signal=DIFF, sender={ 'signal': DIFF, 'diff': patch }) dispatcher.send(signal=END, sender={'signal': END})
def _handle_sub(self): """ :return: """ context = zmq.Context() self.subscriber = context.socket(zmq.SUB) self.subscriber.setsockopt(zmq.IPV6, 1) self.subscriber.setsockopt(zmq.IPV4ONLY, 0) self.subscriber.setsockopt(zmq.SUBSCRIBE, "") log.info("Subscriber started") count = 0 while True: event = self.subscriber.recv_json() if count > 10: log.info("Received remote event\n" + json.dumps(event) + "\nDispatching to event bus") count = 0 dispatcher.send(sender=event['sender'].encode("utf8"), signal=event['signal'].encode("utf8"), data=event['data']) count += 1
def addAttribute(self, name = None, type = None, value = None): # pylint: disable=W0622 """ Add a user-defined attribute to this network. >>> network.addAttribute('Preliminary', Attribute.BOOLEAN_TYPE, True) The type parameter should be one of the :class:`Attribute.*_TYPE <Network.Attribute.Attribute>` values. Returns the attribute object that is created. """ if name is None or type is None or value is None: raise ValueError, gettext('The name, type and value parameters must be specified when adding an attribute.') if not isinstance(name, str): raise TypeError, 'The name parameter passed to addAttribute() must be a string.' if type not in Attribute.TYPES: raise TypeError, 'The type parameter passed to addAttribute() must be one of the Attribute.*_TYPE values.' # TODO: validate value based on the type? attribute = Attribute(self, name, type, value) self._attributes.append(attribute) dispatcher.send(('set', 'attributes'), self) return attribute
def init(): L.l.info('TEST module initialising') # test1() # sensor_address = "ZMNHTDx Smart meter S4 S5 S6:2" # current_record = models.Sensor.query.filter_by(address=sensor_address).first() # thread_pool.add_interval_callable(test_run.thread_run, run_interval_second=60) global initialised dispatcher.send(Constant.SIGNAL_GPIO, gpio_pin_code='0:in:2', direction=Constant.GPIO_PIN_DIRECTION_IN, pin_value=1, pin_connected=False) initialised = True P.list[1] = {'one': 'unu'} P.list[2] = {'two': 'doi'} P.list[3] = {'three': 'trei'} getsome()['two'] = 'zwei' print(P.list) r = 'front_gate_relay_test' rel = m.ZoneCustomRelay.find_one({m.ZoneCustomRelay.relay_pin_name: r}) rel.relay_is_on = True rel.save_changed_fields()
def thread_run_recv(): prctl.set_name("mqtt_recv") threading.current_thread().name = "mqtt_recv" P.thread_recv = threading.current_thread() obj = None try: if len(mqtt_io.P.received_mqtt_list) > 10: L.l.info('Mqtt RECV len={}'.format( len(mqtt_io.P.received_mqtt_list))) if len(mqtt_io.P.received_mqtt_list) == 0: delta = (utils.get_base_location_now_date() - mqtt_io.P.last_rec).total_seconds() if delta > 60: L.l.warning('No mqtt received since {} sec'.format(delta)) for obj in list(mqtt_io.P.received_mqtt_list): mqtt_io.P.received_mqtt_list.remove(obj) dispatcher.send(signal=Constant.SIGNAL_MQTT_RECEIVED, obj=obj) except Exception as ex: L.l.error('Error on mqtt receive process, err={}, obj={}'.format( ex, obj)) finally: prctl.set_name("idle_mqtt_recv") threading.current_thread().name = "idle_mqtt_recv"
def save_item_data(item_id, data): """Save item data to the database. Fires the following signals: 1. icecrate.items.preupdate 2. icecrate.items.postupdate During pre-update, item data is mutable. Handlers of this event are free to modify the data. Order of handler execution is undefined. Keys and values must always be strings or numbers. When post-update is triggered, item data has already been inserted into the database. Postupdate sends only the item id, rather than full item data. """ dispatcher.send("icecrate.items.preupdate", item=data) # icecrate.items.update database.hmset(itemkey(item_id), data) database.sadd(itemkey(meta="all"), item_id) dispatcher.send("icecrate.items.postupdate", item_id=item_id)
def removeSlave(self, ip): toDelList = [slave for slave in self.__slaveList if slave.ip == ip] for slave in self.__slaveList: print slave.ip if len(toDelList): # @TODO: dispatch slaves task to other slaves. print "!!!!!!!!!!!!!!!!!!!!!!!" print "!!!!!!!!!!!!!!!!!!!!!!!" print toDelList[0].ip print ip print "!!!!!!!!!!!!!!!!!!!!!!!" print "!!!!!!!!!!!!!!!!!!!!!!!" print toDelList[0] in self.__slaveList self.__slaveList.remove(toDelList[0]) print "Po usunieciu: " for slave in self.__slaveList: print slave.ip return 0 else: dispatcher.send( 'ERROR', 'dwgetd', 'Can\'t delete: there is no slave with ip: %s' % (ip)) return -1
def addObject(self, objectToAdd): if objectToAdd.networkId in self.idDict: raise ValueError, gettext( 'All objects in a network must have unique identifiers.') self.objects.append(objectToAdd) self.idDict[objectToAdd.networkId] = objectToAdd if objectToAdd.networkId > self._nextUniqueId: self._nextUniqueId = objectToAdd.networkId # Update the NetworkX graph representation of the object and its connections. if not self._bulkLoading: self._updateGraph(objectToAdd) # Watch for any changes to the object so we can update our dirty state and the graph. dispatcher.connect(self._objectChanged, dispatcher.Any, objectToAdd) # Let anyone who cares know that the network was changed. if self._bulkLoading: self._bulkAddObjects += [objectToAdd] else: dispatcher.send('addition', self, affectedObjects=[objectToAdd])
def hard_reset(self): """ Hard Reset a PC Z-Wave Controller. Resets a controller and erases its network configuration settings. The controller becomes a primary controller ready to add devices to a new network. This command fires a lot of louie signals. Louie's clients must disconnect from nodes and values signals .. code-block:: python dispatcher.send(self._network.SIGNAL_NETWORK_RESETTED, **{'network': self._network}) """ self._network.state = self._network.STATE_RESETTED dispatcher.send(self._network.SIGNAL_NETWORK_RESETTED, \ **{'network':self._network}) self._network.manager.resetController(self._network.home_id) try: self.network.network_event.wait(5.0) except AssertionError: #For gevent AssertionError: Impossible to call blocking function in the event loop callback pass
def run(self): # Initiate with genesis account self.state.greedy_genesis_state() # Listens for a broadcast from the beacon chain that it is the shard's turn to write its crosslink dispatcher.connect(self.submit_to_beacon, signal=f"BEACON_TO_SHARD_{self.name}") while True: transactions = self.state.generate_random_transactions() # deep copy state so history is kept (vs. mutating state array) block = Block( len(self.blocks) - 1, new_hash(), transactions, copy.deepcopy(self.state.state)) self.blocks.append(block) # notify visualizer and prediction market that a block has been submitted dispatcher.send(signal=f"SHARD_{self.name}", message=block) logging.info(f"dispatched {block}") # wait slot period time.sleep(system_config["SHARD_SLOT_TIME"])
def update_username(self, uid, username): """ Update a user's username. Currently only when empire is start up with the username arg. """ conn = self.get_db_connection() try: self.lock.acquire() cur = conn.cursor() cur.execute("UPDATE users SET username=? WHERE id=?", (username, uid)) # dispatch the event signal = json.dumps({ 'print': True, 'message': "Username updated" }) dispatcher.send(signal, sender="Users") finally: cur.close() self.lock.release() return True
def run(self): last = time.time() while not self.stop: # Read the input and process data if available now = time.time() if (now - last) > 5.0: last = now self._log.debug("5s loop") rxc = Cargo.new_cargo() rxc.nodeid = 10 rxc.realdata = [100, 200, 300] for channel in self._settings["pubchannels"]: dispatcher.send(channel, cargo=rxc) self._log.debug( str(rxc.uri) + " Sent to channel' : " + str(channel)) # Don't loop to fast time.sleep(0.1)
def addAttribute(self, name = None, type = None, value = None): """ Add a user-defined attribute to this object. >>> neuron1.addAttribute('Confirmed', Attribute.BOOLEAN_VALUE, True) It is allowable to have multiple attributes on the same object which have the same name. The type parameter should be one of the :class:`Attribute.*_TYPE <Network.Attribute.Attribute>` values. Returns the attribute object that is created. """ if name is None or type is None or value is None: raise ValueError, gettext('The name, type and value parameters must be specified when adding an attribute.') if not isinstance(name, str): raise TypeError, 'The name parameter passed to addAttribute() must be a string.' if type not in Attribute.TYPES: raise TypeError, 'The type parameter passed to addAttribute() must be one of the Attribute.*_TYPE values.' # TODO: validate value based on the type? attribute = Attribute(self, name, type, value) self._attributes.append(attribute) dispatcher.send(('set', 'attributes'), self) return attribute
def _handle_robot_state_changed(self, name): """Forward the currently active robot controller's state Arguments: name {string} -- Name of the publisher """ # Handle change only from the current controller, drop the rest if name == self.current_controller.name: RobotControllerHandler.current_state = self.current_controller.current_state # Set the isSumulation flag RobotControllerHandler.current_state.isSimulation = RobotControllerHandler.simulation # Forward the robot state as a general rosweld robot_state self.proxy_robot_state(RobotControllerHandler.current_state) # Update the current step RobotControllerHandler.current_step = RobotControllerHandler.current_state.step dispatcher.send(signal=Signals['STEP_CHANGED'], sender=self, step=RobotControllerHandler.current_step)