def fsinit(self): self.logger = Logger(self.options.logdir, "%s.%s.log" % \ (self.options.serveraddress, self.options.serverport), False, self.options.logverbosity, self.options.logquiet) self.logger.debug("> Kernel.fsinit") try: self.state = State( self.logger, self.options, (self.options.serveraddress, self.options.serverport)) self.logger.debug("> Kernel.fsinit state created") # instance type is handled within state self.storage = storage_impl(self.options) self.network = network_impl self.storage.start() self.network.start(self, self.options) self.logger.debug( "> Kernel.fsinit storage and network modules up") self.maintenance = _system(self) self.maintenance.start() # start the periodic heartbeat self.logger.debug("> Kernel.fsinit maintenance thread up") # pick the initial dispatcher self.dispatcher = self._pick_dispatcher() except Exception as v: self.logger.error( "Crash in fsinit - filesystem will hang now %s" % v) self.logger.debug("< Leaving Kernel.fsinit")
def __init__(self, rootLogger, thLevel, expFile): self.experiment = None self.logger = logging.getLogger("mainWindow") self.prepareGUI() # add logging to output tab (using the global logger) rootLogger.debug("Add GUI log handler.") th = QTextEditHandler(self.ui.outputEdit) th.setLevel(thLevel) rootLogger.addHandler(th) # initialize state self.s = State(self.mainWindow) # display main window self.mainWindow.closeEvent = self.closeEvent self.mainWindow.show() # initialize GUI update self.updateInterval = 200 # ms self.running = False self.startUpdate() self.ui.fileNameEdit.setText(expFile) self.app.exec_()
def _scene(self, query, keyword: list, st: State) -> str: """ 在这个内部函数上同一以keyword处理, 传query是用于生成式的或者检索时回答不得与query相同 :param query: :param keyword: :param st: :return: """ ai_sentiment = st.get_ai_sentiment() candidate_ans = [] candidate_lv2 = [] # 所有都进lv2进行后续处理 # TODO 基于文本的,生成模型的,leaning2ask pair_data = self._read_pair_data_sence(keyword, st) candidate_lv2.extend(pair_data) say_data = self._read_user_say_data_sence(keyword, st) candidate_lv2.extend(say_data) # print(say_data) ### 情绪和AI说过的话过滤 ai_ever_say = st.get_AI_ever_say() candidate_lv3 = [] for item in candidate_lv2: response = item["response"] if response in ai_ever_say or response == query: continue sentiment = item["sentiment"] if sentiment == _Sentiment.Negative and ai_sentiment == _Sentiment.Negative: candidate_lv3.append(response) if sentiment != _Sentiment.Negative and ai_sentiment != _Sentiment.Negative: candidate_lv3.append(response) self.logger.info("candidate lv2-lv3:{}".format(candidate_lv3)) # 语义进行一次过滤 candidate_lv4 = [] for response in candidate_lv3: # print("query:",query," response:",response) score = self._match_score(query, response) if score >= config.MATCH_THRESHOLD: candidate_lv4.append(response) # 排序,按排位赋予被挑选的概率 candidate_lv4 = self._check_ai_profile(candidate_lv4, st.user_id) candidate_lv4 = self._rerank_topic(keyword, candidate_lv4) candidate_lv4 = sorted(candidate_lv4, key=lambda x: x["weight"], reverse=True) self.logger.info("candidate lv3-lv4:{}".format(candidate_lv4)) # candidate_lv4每一个item是一个dict temp = [{"response": item["response"], "weight": self.get_score_by_rank(i, len(candidate_lv4))} for i, item in enumerate(candidate_lv4)] candidate_ans.extend( self._dropout(temp) ) if len(candidate_ans) == 0: candidate_ans.extend(self.default_candidate + ["怎么了?", "什么事呀?"]) ans = random.choice(candidate_ans) return ans
def activate_say(self, query, st: State): if not config.WEIXIN_STYLE: return "" ai_record = st.get_last_AI_record() ai_seg = ai_record.segment # ai_response = ai_record.content file = [ x.path for x in os.scandir(os.path.join(config.EMOJI_PATH, "response")) if any([ x.path.endswith("jpg"), x.path.endswith("jpeg"), x.path.endswith("png") ]) ] response = [] for fp in file: basename = os.path.basename(fp) if self._sentence_with_word(ai_seg, basename): response.append("#Image:" + fp) if len(response) > 0: return random.choice(response) else: general_emoji = [ "#Image:" + x.path for x in os.scandir(os.path.join(config.EMOJI_PATH, "general")) if any([ x.path.endswith("jpg"), x.path.endswith("jpeg"), x.path.endswith("png") ]) ] return random.choice(general_emoji)
def main(): opt = Configuration() opt.backing_store = '/tmp' opt.instance_type = 'master' opt.forcedinitialtype = 'master' opt.backingstorestate = '../tests/mangler.bsc' opt.server_port = 8000 opt.validate() # fake, and never instantiated opt2 = Configuration() opt2.backing_store = '/tmp' opt2.instance_type = 'master' opt2.forcedinitialtype = 'master' opt2.backingstorestate = '../tests/mangler2.bsc' opt2.server_port = 6666 opt2.initial_connect_list = [('blah', 3330)] opt2.validate() import pdb # pdb.set_trace() k = kernel.Kernel(opt) k.fsinit() some_state = State(opt2, ("me2", 6666)) # also never instantiated
def _get_key_word_id(self, st: State): user_record = st.get_last_User_record() # 采用分词而不是实体来做keyword IDS = set() keyword = [] for i, (word, tag) in enumerate(user_record.segment): if "nr" == tag and len(word) == 1 and (i + 1) > len( user_record.segment): w, _ = user_record.segment[i + 1] name = word + w keyword.append(name) elif "n" in tag: keyword.append(word) for entityobj in user_record.entity: keyword.append(entityobj.entity) if len(keyword) == 0: keyword.append(user_record.content) condition_sql = "or".join([ " instr({}, '{}') > 0 ".format("KEYWORD", key) for key in keyword ]) sql = "select KEYWORD,ID_LIST from cQA_index where {}".format( condition_sql) result = self.cqa_database.execute(sql) for row in result: ids = row[1].split(",") for id in ids: IDS.add(id) return IDS
def get_response(self, query, st: State, keyword=None, keyword_priorty=False): """ 用于生成/检索的对话的依据有两种,第一种是query,第二种是query+keyword(or 关系) 当keyword_priorty为true时,先单纯用keyword,返回空再搜query :param query: :param st: :param keyword: :param keyword_priorty: :return: """ user_record = st.get_last_User_record() # USER 当前的实体 user_entity = user_record.entity seg = user_record.segment query_keyword = [word for word, _ in seg if word not in self.dictionary.stop_word] if keyword or len(user_entity) > 0: if keyword is None: self.logger.info("[Scene] Scene No keyword") ans = self._scene(query, query_keyword, st) else: # 携带其他keyword if not keyword_priorty: # 不优先,一起查 self.logger.info("[Scene] Scene keyword no priorty") ans = self._scene(query, query_keyword + keyword, st) else: self.logger.info("[Scene] Scene keyword priorty") ans = self._scene(query, keyword, st) if len(ans) == 0: ans = self._scene(query, query_keyword, st) else: self.logger.info("[Scene] No Scene") ans = self._no_scene(query, st) # TODO 对答案可能需要做一下改写,保持一致性啥的 return ans
def activate_say(self, query, st: State): ai_sentiment = st.get_ai_sentiment() if ai_sentiment == _Sentiment.Positive: ans = random.choice( [ "开心心", "{},我现在心情不错噢".format(self.ai_being.get_value("your_name", st.user_id)), "啦啦啦" ], ) elif ai_sentiment == _Sentiment.Negative: ans = random.choice( [ "坏人,我不开心了", "不开心", "不允许骂人" ] ) else: ans = random.choice( [ "{},今天心情一般般啦".format(self.ai_being.get_value("your_name", st.user_id)), "今天过得还行" ] ) return ans
def record(self, st: State): user_id = st.user_id path = self.path.format(user_id=user_id) if user_id not in self.user_id_file: if os.path.exists(path): self.user_id_file[user_id] = open(path, "a", encoding="utf-8") else: self.user_id_file[user_id] = open(path, "w", encoding="utf-8") file = self.user_id_file[user_id] ai_record = st.get_last_AI_record() user_record = st.get_last_User_record() data = { "query": "" if ai_record is None else ai_record.content, # AI说的上一句 "query_entity": [] if ai_record is None else [item.to_dict() for item in ai_record.entity], "response_sentiment": st.get_user_sentiment(), # user当前的情绪 "response": user_record.content, "response_entity": [item.to_dict() for item in user_record.entity], } string = json.dumps(data) file.write(string + "\n") file.flush()
def _is_repeater(self, query, st: State): user_says = [] frq = {} for item in st.get_dialogue_window(): if isinstance(item, Record): if item.speaker == _Speaker.USER: user_says.append(item.content) frq[item.content] = frq.get(item.content, 0) + 1 if len(frq) > 0: max_frq_sentence = max(frq, key=lambda x: frq[x]) p = user_says.count(max_frq_sentence) / len(user_says) return p >= 0.5 and query == max_frq_sentence and len( st._dialogue_window) > int(0.5 * config.WINDOW_SIZE) else: return False
def __init__(self): # query预处理模块 self.preprocess_query = PreProcess() # 情绪模块 self.sentiment = global_sentiment # 全局状态 self.user_id_table = {} for master_id in config.MASTER_ID: self.user_id_table[master_id] = State(master_id) # self.user_id = config.MASTER_ID[0] # 主动回话的callback self.callback = None global_bot_list.initialization() # 复读机 self.flag_rep = False
class Game: """ Description: Provides a game entity that represents the entirety of the game. Game is composed of engine to render graphics and to maintain frame rate and a state which handles game logic. """ engine = Engine() state = State() def __init__(self): pass def run(self) -> None: """ Runs game. Calls run_event from state to signal that next event should be executed. keep_running returns false when "X" in top right corner of game window is clicked """ while self.engine.should_run(): self.engine.clear_screen() self.state.run_event(self.engine.get_dt()) self.engine.update_screen(self.state.get_objects_to_render(), self.state.get_sprites())
def init_userid(self, user_id): # self.user_id = user_id if user_id not in self.user_id_table: self.user_id_table[user_id] = State(user_id)
def __init__(self, root): super().__init__(root, state=State({'bg': root['bg']}))
class MainWindow: """Display EFrame's main window.""" def __init__(self, rootLogger, thLevel, expFile): self.experiment = None self.logger = logging.getLogger("mainWindow") self.prepareGUI() # add logging to output tab (using the global logger) rootLogger.debug("Add GUI log handler.") th = QTextEditHandler(self.ui.outputEdit) th.setLevel(thLevel) rootLogger.addHandler(th) # initialize state self.s = State(self.mainWindow) # display main window self.mainWindow.closeEvent = self.closeEvent self.mainWindow.show() # initialize GUI update self.updateInterval = 200 # ms self.running = False self.startUpdate() self.ui.fileNameEdit.setText(expFile) self.app.exec_() def prepareGUI(self): self.logger.info("Start to initialize GUI.") self.app = QtGui.QApplication([""]) self.mainWindow = QtGui.QMainWindow() self.mainWindow.setWindowIcon(QtGui.QIcon("ui/icon.png")) self.app.setActiveWindow(self.mainWindow) self.ui = EFrame_UI.Ui_MainWindow() self.ui.setupUi(self.mainWindow) self.ui.loadFileButton.clicked.connect(self.loadFile) self.ui.saveFileButton.clicked.connect(self.saveFile) self.ui.runFileButton.clicked.connect(self.runFile) def closeEvent(self, event): self.stopUpdate() self.s.removeAllModules() def startUpdate(self): if self.running: self.logger.warning("Already running GUI update.") else: self.running = True QtCore.QTimer.singleShot(self.updateInterval, self.update) def stopUpdate(self): self.running = False def update(self): """ Update the mainWindow GUI and all module GUIs. This is called by :meth:`.updateTimer` at a default interval of 200 ms. We dynamically adjust the update interval between 200 and 500 ms. This is also useful to optimize modules and to investigate where e.g. blocking calls to remote servers cause unnecessary delays. Note that module-level update methods should never query remote hosts and should avoid calling methods from other EFrame modules. Ideally, all values to be set in the GUI should be read from a local dictionary or list. See the documentation for :meth:`~baseModule.baseModule.update` for details on this, where some underlying restrictions due to Python's implementation are discussed as well. """ if self.running: start = time.time() try: self.s.updateAllModules() except Exception as e: self.logger.critical( "Unhandled exception during GUI update: %s", e) self.logger.error(traceback.format_exc()) finally: delta = (time.time() - start) * 1000 # duration in ms self._computeUpdateInterval(delta) wait = max(delta - self.updateInterval, 0) QtCore.QTimer.singleShot(wait, self.update) else: self.logger.info("GUI update thread stopped.") def _computeUpdateInterval(self, delta): if delta < 0.35 * self.updateInterval: if self.updateInterval > 200: if 0.85 * self.updateInterval > 200.0: self.updateInterval = int(0.85 * self.updateInterval) else: self.updateInterval = 200 self.logger.debug("Reducing GUI updateInterval to %d ms.", self.updateInterval) elif delta < 0.5 * self.updateInterval: self.logger.info("GUI update took %d ms (> 35/100).", delta) if self.updateInterval * 1.15 < 500 and delta < 500: self.updateInterval = max(int(self.updateInterval * 1.15), delta) self.logger.debug("Increasing GUI updateInterval to %d ms.", self.updateInterval) else: self.updateInterval = 500 self.logger.critical("Already running at maximum GUI update" "Interval of 500 ms.") else: self.logger.warning("GUI update took %d ms (> 50/100).", delta) if self.updateInterval * 1.5 < 500 and delta < 500: self.updateInterval = max(int(self.updateInterval * 1.5), delta) self.logger.debug("Increasing GUI updateInterval to %d ms.", self.updateInterval) else: self.updateInterval = 500 self.logger.critical("Already running at maximum GUI update " "interval of 500 ms.") def loadFile(self): """Load an experiment configuration.""" fileName = QtGui.QFileDialog.getOpenFileName(parent=self.mainWindow, caption="Load File", directory="config/", filter="*.conf") if str(fileName).strip() != "": self.ui.fileNameEdit.setText(fileName) else: self.logger.error("'%s' is not a valid filename.", fileName) def saveFile(self): """Save an experiment configuration.""" fileName = str(self.ui.fileNameEdit.text()) if fileName.strip() != "": self.logger.info("Saving configuration for %s.", fileName) # save the XML configuration self.storeGeometry() self.s.config.currentFileName = fileName self.s.config.saveXML() # save the window state self.saveWindowState() self.logger.info("Done saving.") else: self.logger.error("'%s' is not a valid filename.", fileName) def storeGeometry(self): self.s.config.width = self.mainWindow.geometry().width() self.s.config.height = self.mainWindow.geometry().height() self.s.config.x = self.mainWindow.x() self.s.config.y = self.mainWindow.y() def saveWindowState(self): """Save the GUI state (i.e. position of widgets) to fileName.layout.""" fileName = self.s.config.currentFileName if fileName is None: self.logger.warning("No window state to save. Need to run an " "experiment first.") else: fileName = "%s.layout" % fileName try: with open(fileName, "w") as f: f.write(self.mainWindow.saveState()) except IOError: self.logger.error("Failed to write '%s'.", fileName) else: self.logger.info("Wrote window state to '%s'.", fileName) def loadWindowState(self): """Load a GUI state.""" self.stopUpdate() fileName = self.s.config.currentFileName if fileName is None: self.logger.warning("No window state to load. Please specify a " "configuration file first.") else: fileName = "%s.layout" % fileName try: with open(fileName, "r") as f: data = f.read() except IOError: self.logger.warning( "No window state found for configuration" "'%s'.", fileName) else: self.mainWindow.restoreState(data) self.logger.info("Loaded window state from '%s'.", fileName) self.s.loadingCompleted.emit() self.startUpdate() def reloadModule(self, name): self.stopUpdate() self.saveWindowState() self.s.reloadModule(name) QtCore.QTimer.singleShot(1, self.loadWindowState) def runFile(self): """Run an experiment.""" fileName = str(self.ui.fileNameEdit.text()).strip() dirPath, expName = os.path.split(fileName) # ensure the file is a .conf file, then strip the extension if expName[-5:] != '.conf': self.logger.error( "Bad extension. Experiment configuration needs to" "be a .conf file. Submitted filename: '%s'.", fileName) return else: expName = expName[:-5] self.logger.info("Clear state...") self.stopUpdate() self.s.removeAllModules() self.app.processEvents() self.logger.info("Starting experiment configuration '%s'.", expName) self.ui.fileNameEdit.setText(fileName) try: with open(fileName, "r") as config: toBeLoaded = [ line.strip() for line in config if not line.strip()[0] == "#" ] except IOError: self.logger.error("Cannot read file %s.", fileName) return self.logger.info("Loading XML configuration file.") self.s.config.currentFileName = fileName self.s.config.loadXML() self.logger.debug("Restoring window geometry and position.") self.mainWindow.resize(self.s.config.width, self.s.config.height) self.mainWindow.move(self.s.config.x, self.s.config.y) self.logger.info("Loading %d modules.", len(toBeLoaded)) try: for moduleName in toBeLoaded: if self.s.loaded(moduleName): # this can happen through baseModule.requiresModule() self.logger.debug("Module %s is already loaded.", moduleName) else: self.s.addModule(moduleName) except Exception as e: self.logger.error( "Caught exception during initialization: " "'%s: %s'.", e.__class__.__name__, e.message) self.logger.error("%s", traceback.format_exc()) self.logger.debug("Restoring window state.") # TB: This does not work properly if it is done directly, but # calling it in a single shot timer seems to fix this # https://bugreports.qt-project.org/browse/QTBUG-15080 # This is apparently not the whole story. I also had to change the # names of the objects to avoid using the name 'remoteControl'. # KK: Maybe we can run processEvents instead of the singleShot? QtCore.QTimer.singleShot(1, self.loadWindowState)
class __kernel(object): """ Implementation of the singleton interface """ def __init__(self, options): if options is None: raise Exception("A configuration instance must be supplied") self.options = options self.oft_lock = threading.RLock() self.oft_rwlock = ReadWriteLock.ReadWriteLock() self.oft = {} def fsinit(self): self.logger = Logger(self.options.logdir, "%s.%s.log" % \ (self.options.serveraddress, self.options.serverport), False, self.options.logverbosity, self.options.logquiet) self.logger.debug("> Kernel.fsinit") try: self.state = State( self.logger, self.options, (self.options.serveraddress, self.options.serverport)) self.logger.debug("> Kernel.fsinit state created") # instance type is handled within state self.storage = storage_impl(self.options) self.network = network_impl self.storage.start() self.network.start(self, self.options) self.logger.debug( "> Kernel.fsinit storage and network modules up") self.maintenance = _system(self) self.maintenance.start() # start the periodic heartbeat self.logger.debug("> Kernel.fsinit maintenance thread up") # pick the initial dispatcher self.dispatcher = self._pick_dispatcher() except Exception as v: self.logger.error( "Crash in fsinit - filesystem will hang now %s" % v) self.logger.debug("< Leaving Kernel.fsinit") def fshalt(self): """Shut down in an orderly fashion""" self.dispatcher.force_sync() self.maintenance.stop = True self.maintenance.join() self.network.stop() # PRIVATE/INTERNAL # communication internal def __broadcast(self, force_election, op, *args): ret = [] self.logger.debug("__broadcast '%s/%s'" % (op, str(args))) for connected_peer in self.state.active_members: try: ret.append( (connected_peer, getattr(connected_peer.link, op)(*args))) except Exception as v: self.logger.error( "Broadcast - peer %s faulted, error '%s'" % (connected_peer, v)) self._handle_dead_peer(connected_peer, force_election) return ret # statekeeping and process management def _pick_dispatcher(self): """ Pick a dispatcher """ # Change dispatcher requires we are sure that all pending operations have been consistently flushed dispatch_class = None itype = self.state.instancetype if itype == SINGULAR: dispatch_class = _dispatcher # special case for local-only elif itype == MASTER: dispatch_class = _master_dispatcher elif itype == REPLICA: dispatch_class = _replica_dispatcher elif itype == SPARE: dispatch_class = _ondemandfetch_dispatcher else: raise Exception( "Invalid instance_type %d when picking dispatcher" % itype) self.logger.info("Changing dispatcher to '%s'" % dispatch_class.__name__) self.state._mark_can_replicate() return dispatch_class(self) #END_DEF pick_dispatcher def _add_new_peer(self, peer): """ peer: a Peer object which may or may not be empty (other than connection information) """ self.logger.info("Seeing if we need to add %s to group of %s." % (peer, self.state.group)) existing = self.state.find(peer.connection) if existing is None: self.logger.debug("Adding %s TO %s" % (peer, self.state.group)) self.state.group.append(peer) self.state.group = sorted(self.state.group) self.state._mark_can_replicate() else: self.logger.warning( "Peer %s already known" % peer) # it's a (non-critical) bug if this happens def _handle_dead_peer(self, peer, force_election=True): if not peer.dead: self.logger.info( "Lost contact with %s, removing from group and reconfiguring" % peer) peer.dead = True self.state._mark_can_replicate() if force_election: self.state.must_run_election = True def _connect_to_known_peers(self): """ Connects to any peers that we might know of that aren't already connected. A state lock should be in place when entering this function """ hold_election = False for unconnected_peer in self.state.unconnected_nodes: self.logger.debug("Connecting to %s" % str(unconnected_peer)) retval = self.network.connect_to_peer( unconnected_peer, self.state.identification) if retval is None: self.logger.warn("Transmission failure '%s', dropping. Group is now %s" % \ (str(unconnected_peer), self.state.group)) self.state.group.remove(unconnected_peer) self.state._mark_can_replicate() continue (link, data) = retval unconnected_peer.link = link if unconnected_peer.recontact: self.state.must_run_election = True if data is None: # rebound request - just ignore it. self.logger.debug( "Received counter-connect from %s, returning" % str(unconnected_peer.connection)) continue (me_type, step, score, others, remote_instancetype) = data self.logger.info( "_connect_to_known_peers data package (me %d/remote %d) %s" % (me_type, remote_instancetype, data)) if me_type == -1: continue unconnected_peer.link = link unconnected_peer.score = score unconnected_peer.recontact = False if me_type == -2: unconnected_peer.instancetype = SPARE # FIXME make it so that spares cannot dictate to us if me_type == SPARE: self.state.me.instancetype = SPARE elif me_type == REPLICA: self.state.me.instancetype = REPLICA hold_election = True else: raise GRSException( "Unknown me_type %s in %s when connecting to %s" % (me_type, data, unconnected_peer.connection)) for other in others: new_peer = Peer(connection=tuple(other), recontact=True) if self.state.find(new_peer.connection) is None: self._add_new_peer(new_peer) def _run_election(self): """ A global lock should be in place when entering this function """ # TODO Should scores be updated every once in a while self.state.election_in_progress = True self.logger.info("*** ELECTION PROCESS RUNNING ***") try: for connected_peer in self.state.active_members: try: (status, step, score) = connected_peer.link.election_begin( self.state.current_step, self.state.score, self.state.conn_info_all) connected_peer.score = score except socket.error: self._handle_dead_peer(connected_peer) continue if status == 'IN_PROGRESS' or status == 'HOLD': # election in progress elsewhere self.logger.info( "Attempted to hold election but %s beat us to it." % connected_peer) return elif status == 'I_WIN': self.logger.info( "Held election but %s has higher and took over." % connected_peer) self.state.me.instancetype = REPLICA self.state.must_run_election = False return elif status != 'I_LOSE': self.logger.error( "Invalid response from election voter %s " % status) self.state.must_run_election = False # I win self.logger.info("I won the election") # make sure all members are connectable still self.__broadcast(False, 'ping', self.state.me.connection, None) all_members = sorted(self.state.active_members) all_members.extend(self.state.spares) for i in range(len(all_members)): itype = REPLICA if i < self.options.maxcopies - 1 else SPARE self.logger.debug("Assigning %s to type %s" % (all_members[i], itype)) all_members[i].instancetype = itype self.state.me.instancetype = MASTER group_update = [(peer.connection[0], peer.connection[1], peer.score, peer.instancetype) for peer in self.state.group] for i in range(len(all_members)): try: getattr(all_members[i].link, "election_finished")(group_update) except Exception as v: self.logger.error( "%s failed to communicate new status to %s (%s)" % (__name__, all_members[i], v)) self._handle_dead_peer(all_members[i], False) self.state.must_run_election = False except Exception as v: self.logger.error("Crash in _run_election. Cause '%s'" % v) raise finally: self.__election_finished() def __election_finished(self): self.state.election_in_progress = False # TODO consider not clearing this here, but rather in e_finish and master set. # done, and everybody know their place. Now pick a dispatcher that suits. self.dispatcher = self._pick_dispatcher() def _spare_upgrade(self): """ Master operation - upgrade a spare to full TODO: split this operation up so we can transfer partial datasets """ if len(self.state.spares) < 1: self.logger.error("%s no spares to upgrade" % (__name__)) spare = self.state.spares[0] self.logger.info(" ** BEGINNING UPGRADE OF SPARE %s ** " % spare) binary_data = self.network.wrapbinary( self.storage.get_full_dataset()) spare.node_upgrade(self.state.me.connection, binary_data, self.state.group.conn_info_all) # NETWORK COMMAND AND CONTROL def node_register(self, ident): """ Called when a new peer arrives and wants to enter the group """ with (self.storage.getlock(None).writelock): try: self.logger.info("Got a hello from a peer with ident %s" % (ident)) (server, port, step, score, neverparticipate) = ident if (server, port) in self.state.conn_info_all: self.logger.debug( "Preexisting peer said hello %s. Returning." % ident) peer_type = -1 elif self.state.instancetype == SPARE: peer_type = -2 else: if step < self.state.current_step or neverparticipate or self.state.active_group_size >= self.options.maxcopies: peer_type = SPARE elif step >= self.state.current_step: peer_type = REPLICA if step > self.state.current_step: # FIXME: validate buffer length in write_dispatcher if master and delayed writes pass new_peer = Peer( connection=(server, port), score=score, recontact=False, initial_type=peer_type) # FIXME allow spares self._add_new_peer(new_peer) # This tells everybody current about peer, which in turn causes peer to know about them. # FIXME stop this, and instead let peer handle it itself #self.__broadcast(False, "network_update_notification", "PEER_UP", (server, port)) returnvalue = (peer_type, self.state.current_step, self.state.score, self.state.conn_info_all, self.state.instancetype) self.logger.debug("Node_register returns %s to %s" % (returnvalue, (server, port))) return returnvalue except Exception as v: self.logger.error("Node_register failed for %s: %s" % (ident, v)) raise def node_unregister(self, peer): self.logger.info("Controlled goodbye from a peer %s" % peer) for p in self.state.group: if p.connection == tuple(peer): p.dead = True def node_upgrade(self, master, data, step): """ Sent to spares to upgrade to full copy """ with self.storage.getlock(None).writelock: if self.state.instancetype < SPARE: raise GRSException( "Received request to upgrade when not a spare") unwrapped = self.network.unwrapbinary(data) self.storage.set_full_dataset(unwrapped) self.state.me.instancetype = REPLICA self.state.current_step = step self.state.checkpoint_id() def network_update_notification(self, cmd, arg): """ Used to receive updates from other network parties. Does NOT ask for an election - that is the job of the originating party. """ self.logger.info("network_update_notification cmd=%s, arg={%s}", cmd, arg) if cmd == 'PEER_DOWN': arg = tuple(arg) if self.state.me.connection == arg: raise GRSException( "Internal error: invalid kill attempt from remote peer" ) self._handle_dead_peer(arg, False) elif cmd == 'PEER_UP': arg = tuple(arg) if self.state.me.connection == arg: self.logger.error("PEER_UP received about myself") return new_peer = Peer(connection=arg) self._add_new_peer(new_peer) def ping(self, whoami, dumb_data_carrier): """ Ping a service. dumb_data_carrier is used solely to test responsiveness at different workloads """ #self.logger.debug("Ping received") del dumb_data_carrier p = self.state.find(tuple(whoami)) if p is None: self.logger.warning( "%s group of %s heard from %s but this was not found" % (__name__, self.state.group, whoami)) return 'LOST' p.last_seen = time.time() return 'OK' def election_begin(self, remote_current_step, remote_current_score, remote_conn_info_all): if self.state.election_in_progress: self.logger.debug( "Election request received - signaling election in progress" ) return ("IN_PROGRESS", self.state.current_step, self.state.score) with self.storage.getlock(None).writelock: self.logger.debug( "%s Comparing my group %s with remote %s. Identical? %s" % (__name__, self.state.conn_info_all, remote_conn_info_all, self.state.compare(remote_conn_info_all))) if not self.state.compare(remote_conn_info_all): self.logger.debug( "States not identical, asking remote to hold") return ("HOLD", self.state.current_step, self.state.score) if remote_current_step < self.state.current_step or \ (remote_current_step == self.state.current_step and remote_current_score <= self.state.score): self.logger.debug("Election request received - I won") self.state.must_run_election = True return ("I_WIN", self.state.current_step, self.state.score) #if remote_current_step > self.state.current_step or remote_current_score > self.state.score: self.logger.debug("Election request received - I lost") return ('I_LOSE', self.state.current_step, self.state.score) def election_finished(self, group_update): with (self.storage.getlock(None).writelock): self.logger.info( "Received election finished, group update %s" % (group_update)) for peer in group_update: #self.logger.debug("election_finished: updating %s !! " % peer) (server, port, score, instancetype) = peer existing = self.state.find((server, port)) if existing is None: raise GRSException( "Election_finished received. Could not find peer %s in group of %s!" % (peer, self.state.group)) existing.score = score existing.instancetype = instancetype self.state.must_run_election = False self.__election_finished() # EXTERNAL COMMAND AND CONTROL def branch(self, subtree_root, destination, message): if self.state.instancetype > MASTER: return "NOT_MASTER" try: with (self.storage.getlock(None).writelock): code = self.storage.branch(subtree_root, destination, message) if code < 0: return "INTERNAL_ERROR" except: raise return "OK" # OPEN FILE HANDLING def _oft_get(self, args): """ Retrieve the cached open file proxy. Note: do not acquire oft_lock around this. """ (path, flags, mode) = args l = (path, flags, tuple(mode)) #self.oft_lock.acquire() if l not in self.oft: # raises IOError if it fails, otherwise has sideeffect of opening the file ret = self.open(path, flags, *tuple(mode)) file_fd = self.oft[l] self.logger.debug("_oft_get %s - %s" % (l, file_fd)) #self.oft_lock.release() return file_fd def _oft_add(self, path, flags, mode, retval): """Called ONLY by open (adds the opened file information to the cache)""" l = (path, flags, tuple(mode)) #self.oft_lock.acquire() self.logger.debug("_oft_added %s - %s - %s" % ((path, flags), self.oft.get(l), retval)) if l not in self.oft: self.oft[l] = retval #self.oft_lock.release() def _oft_release(self, path, flags, mode): """Called ONLY by release""" l = (path, flags, tuple(mode)) try: self.logger.debug("_oft_release %s - %s" % (l, self.oft[l])) del self.oft[l] except KeyError: pass # legit # Convinience wrappers ### DIRECTORY OPS ### def readdir(self, path, offset, internal): # TODO It might make sense here to cache stat data IF we are operating per remote. return self.dispatcher.do_read_op(internal, "readdir", path, offset) ### FILE OPS ### # READS def open(self, path, flags, additional_mode=[], internal={'local': None}): """ File open operation No creation (O_CREAT, O_EXCL) and by default also no truncation (O_TRUNC) flags will be passed to open(). If an application specifies O_TRUNC, fuse first calls truncate() and then open(). Only if 'atomic_o_trunc' has been specified and kernel version is 2.6.24 or later, O_TRUNC is passed on to open. Unless the 'default_permissions' mount option is given, open should check if the operation is permitted for the given flags. Optionally open may also return an arbitrary filehandle in the fuse_file_info structure, which will be passed to all file operations. NOTE: The internal parameter is disregarded and only included to present a consistent API """ if 'remote' in internal: raise GRSException("Open is never called remotely") accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR if isinstance(additional_mode, int): # HACK additional_mode = [additional_mode] # The reason why we need to replicate opens is because it can be used to make a file of size 0. (O_CREAT) if (flags & accmode) != os.O_RDONLY: self.logger.debug( "%s, %s, %s, %s, %s" % (internal, "open", path, flags, list(additional_mode))) retval = self.dispatcher.do_write_op(internal, "open", path, flags, list(additional_mode)) else: retval = self.dispatcher.do_read_op(internal, "open", path, flags, list(additional_mode)) if retval <= 0: raise IOError(abs(retval), "Error ROFS", path) else: self._oft_add(path, flags, tuple(additional_mode), retval) return (path, flags, additional_mode) def read(self, size, offset, internal): return self.dispatcher.do_read_op(internal, "read", size, offset) def fgetattr(self, internal): return self.dispatcher.do_read_op(internal, "fgetattr") def flush(self, internal): """ Possibly flush cached data BIG NOTE: This is not equivalent to fsync(). It's not a request to sync dirty data. Flush is called on each close() of a file descriptor. So if a filesystem wants to return write errors in close() and the file has cached dirty data, this is a good place to write back data and return any errors. Since many applications ignore close() errors this is not always useful. NOTE: The flush() method may be called more than once for each open(). This happens if more than one file descriptor refers to an opened file due to dup(), dup2() or fork() calls. It is not possible to determine if a flush is final, so each flush should be treated equally. Multiple write-flush sequences are relatively rare, so this shouldn't be a problem. Filesystems shouldn't assume that flush will always be called after some writes, or that if will be called at all. """ return self.dispatcher.do_read_op(internal, "flush") def release(self, flags, internal): """ Release an open file Release is called when there are no more references to an open file: all file descriptors are closed and all memory mappings are unmapped. For every open() call there will be exactly one release() call with the same flags and file descriptor. It is possible to have a file opened more than once, in which case only the last release will mean, that no more reads/writes will happen on the file. The return value of release is ignored. NOTE: the given flag parameter is overwritten by the stored value """ (path, flags, mode) = internal['file_id'] accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR if (flags & accmode) != os.O_RDONLY: self.dispatcher.do_write_op(internal, "release", flags) else: self.dispatcher.do_read_op(internal, "release", flags) self._oft_release(*internal['file_id']) def lock(self, cmd, owner, additional_args={}, internal={}): pass # WRITES def write(self, buf, offset, internal): return self.dispatcher.do_write_op(internal, "write", buf, offset) def fsync(self, isfsyncfile, internal): return self.dispatcher.force_sync( ) # TODO: can limit the bufferflush by using the file reference def ftruncate(self, length, internal): return self.dispatcher.do_write_op(internal, "ftruncate", length) #### NON FILE/DIRECTORY-SPECIFIC OPERATIONS #### ## Reads def getattr(self, path, internal): return self.dispatcher.do_read_op(internal, "getattr", path) def readlink(self, path, internal): return self.dispatcher.do_read_op(internal, "readlink", path) def access(self, path, mode, internal): return self.dispatcher.do_read_op(internal, "access", path, mode) ## Writes def unlink(self, path, internal): return self.dispatcher.do_write_op(internal, "unlink", path) def symlink(self, path, path2, internal): return self.dispatcher.do_write_op(internal, "symlink", path, path2) def rename(self, path, path2, internal): return self.dispatcher.do_write_op(internal, "rename", path, path2) def link(self, path, path2, internal): return self.dispatcher.do_write_op(internal, "link", path, path2) def chmod(self, path, mode, internal): return self.dispatcher.do_write_op(internal, "chmod", path, mode) def chown(self, path, user, group, internal): return self.dispatcher.do_write_op(internal, "chown", path, user, group) def truncate(self, path, length, internal): return self.dispatcher.do_write_op(internal, "truncate", path, length) def mknod(self, path, mode, dev, internal): return self.dispatcher.do_write_op(internal, "mknod", path, mode, dev) def mkdir(self, path, mode, internal): return self.dispatcher.do_write_op(internal, "mkdir", path, mode) def rmdir(self, path, internal): return self.dispatcher.do_write_op(internal, "rmdir", path) def utime(self, path, times, internal): return self.dispatcher.do_write_op(internal, "utime", path, times) # The Python stdlib doesn't know of subsecond preciseness in acces/modify times. def utimens(self, internal, path, ts_acc, ts_mod): self.dispatcher.do_write_op(internal, "utime", path, ts_acc, ts_mod) def statfs(self): """ Should return an object with statvfs attributes (f_bsize, f_frsize...). Eg., the return value of os.statvfs() is such a thing (since py 2.2). If you are not reusing an existing statvfs object, start with fuse.StatVFS(), and define the attributes. To provide usable information (ie., you want sensible df(1) output, you are suggested to specify the following attributes: - f_bsize - preferred size of file blocks, in bytes - f_frsize - fundamental size of file blcoks, in bytes [if you have no idea, use the same as blocksize] - f_blocks - total number of blocks in the filesystem - f_bfree - number of free blocks - f_files - total number of file inodes - f_ffree - nunber of free file inodes """ return os.statvfs(self.options.backingstore)
def activate_match(self, query, st: State): ai_sentiment = st.get_ai_sentiment() # TODO 根据情绪匹配来决定是否发表情包,并且选择表情包 # 现在随机匹配 return random.uniform(0, 1) < 0.2
def _parse_query(self, query, st: State, call_self=True): """ 对用户的query进行解释处理 实体 = 分词中长度大于1的名词 1. 初始状态中 没有实体的话语 -变成普通的闲聊回答 2. 转移过程中 没有实体的话语 -让agent自己继续场景 3. 能检测出实体,但没有预设好对应场景 4. 谈话中,场景发生变化 5 正常谈话中 检测出内容词 (在场景1中就是找到一个可以推荐的cQA连接) 情景完成后返回的列表中含有target word :param query: :param st: :param call_self :是否可以调用自己 :return: list 下一句话期待的target word """ state = self.user_id_table[st.user_id] user_record = st.get_last_User_record() seg = [ word for word, tag in user_record.segment if word not in self.dictionary.stop_word and ( "n" in tag or "v" in tag) and len(word) > 1 ] keyword = seg # print(keyword) if state == Chat.Init: if len(keyword) == 0: # 等价于无意义的闲聊 return None else: # 开始找合适场景 # 在场景1中就是找到一个可以推荐的cQA连接 ids = self._get_key_word_id(st) result_items = self._get_ans_by_id(ids) # n个样本最多采样n次,都找不到关键词就变成普通闲聊了 random.shuffle(result_items) # 先随机打乱顺序 for item in result_items: target_word = self._select_target_word(item.title, keyword) if target_word: # url = item.to_url() print("Find the target word:", target_word) self.user_id_Record[st.user_id] = item self.user_id_agent_target[st.user_id] = target_word next_word = self._find_next_word( query, keyword, target_word) self.user_id_table[st.user_id] = Chat.Wait return next_word return None elif state == Chat.Wait: target_word = self.user_id_agent_target[st.user_id] print("New turn target word", target_word) if len(keyword) == 0: # 完全靠agent来引导话题,如用户在说,哈哈哈。好厉害这种 # 这时的keyword改成ai的上一句 ai_record = st.get_last_AI_record() seg = [ word for word, tag in user_record.segment if word not in self.dictionary.stop_word and ( "n" in tag or "v" in tag) and len(word) > 1 ] keyword = ai_record.entity or seg # 开始判断是否已经达成目标 next_word = self._find_next_word(query, keyword, target_word) if not next_word: # 没法找到下一个word 用户转移话题 self._init_user_id_state(st.user_id) # 递归调用 if call_self: next_word = self._parse_query(query, st, False) # 新话题尝试一次,防止无限递归 return next_word return next_word else: raise ValueError("[user_id:{}] Unknown state: {} query:{}".format( st.user_id, state, query))
def main(): parser = argparse.ArgumentParser(description='QRL node') parser.add_argument('--quiet', '-q', dest='quiet', action='store_true', required=False, default=False, help="Avoid writing data to the console") parser.add_argument('--datapath', '-d', dest='data_path', default=config.user.data_path, help="Retrieve data from a different path") parser.add_argument('--walletpath', '-w', dest='wallet_path', default=config.user.wallet_path, help="Retrieve wallet from a different path") parser.add_argument('--no-colors', dest='no_colors', action='store_true', default=False, help="Disables color output") parser.add_argument( "-l", "--loglevel", dest="logLevel", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help="Set the logging level") parser.add_argument("--get-wallets", dest="get_wallets", action='store_true', default=False, help="Returns wallet address and stops the node") args = parser.parse_args() node_state = NodeState() # Logging configuration log_level = logging.INFO if args.logLevel: log_level = getattr(logging, args.logLevel) logger.initialize_default( force_console_output=not args.quiet).setLevel(log_level) custom_filter = ContextFilter(node_state) logger.logger.addFilter(custom_filter) file_handler = logger.log_to_file() file_handler.addFilter(custom_filter) file_handler.setLevel(logging.DEBUG) logger.set_colors(not args.no_colors, LOG_FORMAT_CUSTOM) logger.set_unhandled_exception_handler() logger_twisted.enable_twisted_log_observer() logger.debug( "=====================================================================================" ) ####### logger.info("Data Path: %s", args.data_path) logger.info("Wallet Path: %s", args.wallet_path) config.user.data_path = args.data_path config.user.wallet_path = args.wallet_path config.create_path(config.user.data_path) config.create_path(config.user.wallet_path) ntp.setDrift() logger.info('Initializing chain..') state_obj = State() chain_obj = Chain(state=state_obj) logger.info('Reading chain..') chain_obj.m_load_chain() logger.info(str(len(chain_obj.m_blockchain)) + ' blocks') logger.info('Verifying chain') logger.info('Building state leveldb') if args.get_wallets: address_data = chain_obj.wallet.list_addresses() addresses = [a[0] for a in address_data] print(addresses[0]) quit() p2p_factory = P2PFactory(chain=chain_obj, nodeState=node_state) pos = node.POS(chain=chain_obj, p2pFactory=p2p_factory, nodeState=node_state, ntp=ntp) p2p_factory.setPOS(pos) api_factory = ApiFactory(pos, chain_obj, state_obj, p2p_factory.peer_connections) welcome = 'QRL node connection established. Try starting with "help"' + '\r\n' wallet_factory = WalletFactory(welcome, chain_obj, state_obj, p2p_factory) logger.info('>>>Listening..') reactor.listenTCP(2000, wallet_factory, interface='127.0.0.1') reactor.listenTCP(9000, p2p_factory) reactor.listenTCP(8080, api_factory) webwallet.WebWallet(chain_obj, state_obj, p2p_factory) pos.restart_monitor_bk(80) logger.info( 'Connect to the node via telnet session on port 2000: i.e "telnet localhost 2000"' ) p2p_factory.connect_peers() reactor.callLater(20, pos.unsynced_logic) reactor.run()
def __init__(self, root, number): self.number = number super().__init__(root, state=State({'click': 0}))