def handle_channel_requests(self, client_id, owner_id, channel_template, paired_devices): logger.debug( "\n\n\n\n\n\t\t\t\t\t*******************HANDLE_CHANNEL_REQUEST****************************" ) logger.info( f"Client_id {client_id}; Owner_id: {owner_id}; Channel_template: {channel_template}; " f"Paired_devices: {paired_devices}") credentials = self.db.get_credentials(client_id, owner_id) channels = [] if paired_devices: loop = asyncio.new_event_loop() responses = loop.run_until_complete( self.send_channel_requests(paired_devices, client_id, owner_id, channel_template, credentials)) channels = [{ "id": channel_id } for channel_id in responses if channel_id] loop.close() if channels: if settings.config_refresh.get('enabled') is True: ignore_keys = [] old_credentials = {} channel_id = None for channel in channels: channel_id = channel['id'] credentials = self.implementer.auth_response( credentials) credentials = self.implementer.update_expiration_date( credentials) old_credentials = self.implementer.auth_response( self.db.get_credentials(client_id, owner_id, channel_id)) if 'client_man_id' not in old_credentials: credentials, has_error = self.implementer.check_manager_client_id( owner_id, channel_id, credentials, old_credentials) else: credentials['client_man_id'] = old_credentials[ 'client_man_id'] self.db.set_credentials(credentials, client_id, owner_id, channel_id) ignore_keys.append( f'credential-owners/{owner_id}/channels/{channel_id}' ) self.thread_pool.add_task(handle_credentials, credentials, old_credentials, client_id, owner_id, channel_id, ignore_keys) else: for channel in channels: self.db.set_credentials(credentials, client_id, owner_id, channel['id']) logger.info(f"Channels: {channels}") return channels, credentials
def member_join_post(db): nodes = [ (n.id,n.node_name) for n in db.query(models.SlcNode)] form = forms.member_join_form(nodes) if not form.validates(source=request.params): return render("join", form=form) if db.query(exists().where(models.SlcMember.member_name == form.d.username)).scalar(): return render("join",form=form,msg=u"用户{0}已被使用".format(form.d.username)) if db.query(exists().where(models.SlcMember.email == form.d.email)).scalar(): return render("join",form=form,msg=u"用户邮箱{0}已被使用".format(form.d.email)) member = models.SlcMember() member.node_id = form.d.node_id member.realname = form.d.realname member.member_name = form.d.username member.password = md5(form.d.password.encode()).hexdigest() member.idcard = form.d.idcard member.sex = form.d.sex member.age = int(form.d.age) member.email = form.d.email member.mobile = form.d.mobile member.address = form.d.address member.create_time = utils.get_currtime() member.update_time = utils.get_currtime() db.add(member) db.commit() logger.info(u"新用户注册成功,member_name=%s"%member.member_name) redirect('/login')
def slave_sync_data(self, port): """ 同步数据 """ import time time.sleep(1) self._server.is_sync = True client = PyCachedClient(self._address[0], port) self.add_slave(client) logger.info('Slave[%s:%d] Sync data start' % (self._address[0], port)) items = {} pos = 0 MAX_SEND = 20000 # 同步数据多少Key为一组 all_len = len(self._server.memory.caches.items()) for key, val in self._server.memory.caches.items(): pos += 1 items[key] = val if pos % MAX_SEND == 0: client.sync_data(pos, all_len, msgpack.packb(items)) items = {} if pos % MAX_SEND != 0: client.sync_data(pos, all_len, msgpack.packb(items)) self._server.is_sync = False client.sync_ok() logger.info('Slave[%s:%d] Sync data success' % (self._address[0], port))
def handle_request(self, request): logger.debug( "\n\n\n\n\n\t\t\t\t\t*******************HANDLE_REQUEST****************************" ) logger.info(f"Request {request}") downstream_result = self.implementer.downstream(request) downstream_list = downstream_result if type( downstream_result) == list else [downstream_result] for downstream_tuple in downstream_list: try: case = downstream_tuple[0] data = downstream_tuple[1] if case is not None and data is not None: try: custom_mqtt = downstream_tuple[3] custom_mqtt.publisher(io="iw", data=data, case=case) except (IndexError, AttributeError): self.queue.put({ "io": "iw", "data": data, "case": case })
def main(): optParser = OptionParser() optParser.add_option( "-t", "--timeout", action="store", dest="testTimeout", default=75, type='int', help="The program will return with timeout after specified seconds.", metavar="<timeout-second>", ) optParser.add_option( "-a", "--address", action="store", dest="deviceAddress", default='', type='str', help="Address of the device", metavar="<device-addr>", ) (options, remainingArgs) = optParser.parse_args(sys.argv[1:]) timeoutTicker = TestTimeout(options.testTimeout) timeoutTicker.start() test = BaseTestHelper(nodeid=112233) FailIfNot( test.TestKeyExchange(ip=options.deviceAddress, setuppin=20202021, nodeid=1), "Failed to finish key exchange") FailIfNot( test.TestNetworkCommissioning(nodeid=1, endpoint=ENDPOINT_ID, group=GROUP_ID, dataset=TEST_THREAD_NETWORK_DATASET_TLV, network_id=TEST_THREAD_NETWORK_ID), "Failed to finish network commissioning") FailIfNot( test.TestOnOffCluster(nodeid=1, endpoint=ENDPOINT_ID, group=GROUP_ID), "Failed to test on off cluster") FailIfNot( test.TestReadBasicAttribiutes(nodeid=1, endpoint=ENDPOINT_ID, group=GROUP_ID), "Failed to test Read Basic Attributes") timeoutTicker.stop() logger.info("Test finished") # TODO: Python device controller cannot be shutdown clean sometimes and will block on AsyncDNSResolverSockets shutdown. # Call os._exit(0) to force close it. os._exit(0)
def _cmd_hset_handler(self): key = self.parse_string_val() field = self.parse_string_val() val = self.parse_string_val() code, val = self._memory.hset(key, field, val) logger.info("HSet: %s: %s => %s, %d" % (key, field, val, code)) return code, val
def run(self): # start bluetooth server self.bt_server = BtServer(recv_callback=self.update_location) self.bt_server.start() # start sending latest captures and sys status to phone RepeatedTimer(func=self.send_latest_captures_sys_status, interval=5).start() # time synchronization is_ntpped = self.ntp() if not is_ntpped and not self.time_synchronized: logger.info('waiting for time synchronization...', extra=self.log_extra) while not is_ntpped and not self.time_synchronized: time.sleep(1) # start channel switch self.channel_switch = ChannelSwitch(self.interface) self.channel_switch.start() # start handlers self.handlers = create_handlers(self.frm_queues, self.event_queue) for handler in self.handlers: handler.start() # start sniffer logger.info('start sniffing', extra=self.log_extra) scapy.all.conf.iface = self.interface scapy.all.sniff(prn=self.dispatch, store=False) for handler in self.handlers: handler.join() self.channel_switch.join()
def send_notification(self): try: from systemd.daemon import notify event = threading.Event() # send first notification on init logger.debug('[Watchdog]... everything is ok') notify('WATCHDOG=1') while not event.wait(self.interval - 1): main_thread_alive = threading.main_thread().is_alive() logger.debug( '[Watchdog] is alive {}'.format(main_thread_alive)) if main_thread_alive: logger.debug('[Watchdog]...') url = settings.config_http['bind'] resp = requests.get(url) if resp.status_code == 200: logger.debug('[Watchdog] everything is ok') notify('WATCHDOG=1') else: logger.warning( f'[Watchdog] Watchdog not sent. Response status: {resp.status_code}; ' f'Response: {resp.__dict__}') else: logger.critical(f'[Watchdog] Main thread is not alive.') except (KeyError, TypeError, ValueError): logger.info('[Watchdog] not enabled, keep_alive missing') except ImportError: logger.warn('[Watchdog] systemd not imported {}'.format( traceback.format_exc(limit=5))) except: logger.alert('[Watchdog] Unexpected exception {}'.format( traceback.format_exc(limit=5)))
def start(self): self._spider_init() self._create_table() list_resp = self.get_list_resp() logger.info("List Resp: {}".format(list_resp)) if list_resp and list_resp.status_code == 200: body = list_resp.text ret = list(self._parse_list(body)) items = [] for one in ret: item = dict() link = one.get("l") item['link'] = link item['title'] = one.get("t") pub_date = one.get("p") item['pub_date'] = pub_date article = self._parse_detail(one.get("l")) if article: item['article'] = article items.append(item) if len(items) > 30: break else: raise Exception("请求无响应") if items: ret = self._batch_save(self.spider_client, items, self.table_name, self.fields) print(len(items)) print(ret)
def ethernet_commissioning(test: BaseTestHelper, discriminator: int, setup_pin: int, address_override: str, device_nodeid: int): logger.info("Testing discovery") address = test.TestDiscovery(discriminator=discriminator) FailIfNot(address, "Failed to discover any devices.") # FailIfNot(test.SetNetworkCommissioningParameters(dataset=TEST_THREAD_NETWORK_DATASET_TLV), # "Failed to finish network commissioning") if address_override: address = address_override else: address = address.decode("utf-8") logger.info("Testing key exchange") FailIfNot( test.TestKeyExchange(ip=address, setuppin=setup_pin, nodeid=device_nodeid), "Failed to finish key exchange") ok = asyncio.run( test.TestMultiFabric(ip=address, setuppin=20202021, nodeid=1)) FailIfNot(ok, "Failed to commission multi-fabric")
def docker_run_spider(self, spider_name, spider_file_path, restart=False): local_int = 1 if LOCAL else 0 try: spider_container = self.docker_containers_col.get(spider_name) except: spider_container = None if spider_container: spider_status = spider_container.status logger.info("{} spider status: {}".format(spider_name, spider_status)) if spider_status in ("exited", ): spider_container.start() elif spider_status in ("running", ): if restart: spider_container.restart() else: logger.warning("other status: {}".format(spider_status)) else: self.docker_containers_col.run( "registry.cn-shenzhen.aliyuncs.com/jzdev/jzdata/spi:v1", environment={"LOCAL": local_int}, name='{}'.format(spider_name), command='python {}'.format(spider_file_path), detach=True, # 守护进程运行 )
def get_response(self, endpoint_conf, credentials, channel_id, cred_key): url = endpoint_conf['url'] method = endpoint_conf['method'] data = endpoint_conf.get('data') params = endpoint_conf.get('params') if '{device_id}' in url: url = self.replace_device_id(url, cred_key.split('/')[-1]) response = requests.request(method, url, params=params, data=data, headers=self.authorization(credentials)) if response.status_code == requests.codes.ok: logger.info('[Polling] polling request successful with {}'.format( cred_key)) return { 'response': response.json(), 'channel_id': channel_id, 'credentials': credentials } else: logger.warning( f'[Polling] Error in polling request: CHANNEL_ID: {channel_id}; ' f'URL: {url}; RESPONSE: {response}') return {}
def start(self): """ If polling is enabled in config file, retrieves conf for polling in implementor """ try: if settings.config_polling.get('enabled') is True: logger.info('[Polling] **** starting polling ****') conf_data = self.implementer.get_polling_conf() if type(conf_data) is not list: conf_data = [conf_data] n_processes = settings.config_polling.get( 'requests_pool', DEFAULT_THREAD_MAX_WORKERS) self.pool_requests = ThreadPool(processes=n_processes) self.thread = threading.Thread(target=self.worker, args=[conf_data], name="Polling") self.thread.daemon = True self.thread.start() else: logger.info('[Polling] **** polling is not enabled ****') except NotImplementedError as e: logger.error("[Polling] NotImplementedError: {}".format(e)) except Exception: logger.alert( f"[Polling] Unexpected exception: {traceback.format_exc(limit=5)}" )
async def make_requests(self, conf_data): try: logger.info( f"[Polling] {threading.currentThread().getName()} starting {datetime.datetime.now()}" ) loop = asyncio.get_event_loop() with concurrent.futures.ThreadPoolExecutor( max_workers=DEFAULT_THREAD_MAX_WORKERS) as executor: futures = [ loop.run_in_executor(executor, self.send_request, conf_data, channel_id) for channel_id in self.db.get_channels() ] for response in await asyncio.gather(*futures): if response: for resp in response: self.implementer.polling(resp) logger.info("[Polling] {} finishing {}".format( threading.currentThread().getName(), datetime.datetime.now())) except Exception: logger.error("[Polling] Error on make_requests: {}".format( traceback.format_exc(limit=5)))
def rename_key(self, new_key, old_key): try: value = self.get_key(old_key) if not value: logger.warning(f"[DB] Key {old_key} not found in database.") created = False deleted = False if value: created = self.set_key(new_key, value) if not created: logger.warning( f"[DB] error while creating {new_key} to database.") if created: deleted = self.delete_key(old_key) if not deleted: logger.warning( f"[DB] error while deleting {old_key} from database.") if created and deleted: logger.info( f"[DB] Key {old_key} renamed to {new_key} successfully.") return created and deleted except Exception: logger.error( f"[DB] Failed to rename key {old_key} to {new_key}. {traceback.format_exc(limit=5)}" )
def _parse_list(self, page, list_url): """解析列表页""" list_resp = self.get(list_url) if list_resp and list_resp.status_code == 200: list_page = list_resp.text doc = html.fromstring(list_page) news_list = doc.xpath( '//div[@class="wrap_left"]/dl[@class="item clearfix"]') items = [] for news in news_list: item = {} link = news.xpath('./dd[@class="intro"]/a/@href')[0] item['link'] = link title = news.xpath("./dd/a/@title")[0] title = self._process_content(title) # 去除其中的字节顺序标记符 title = title.replace("\ufeff", '') item['title'] = title pub_date = news.xpath("./dd[@class='sort']/text()")[0] # print(">>> ", pub_date) pub_date = self._process_pub_dt(pub_date) item['pub_date'] = pub_date ret = self._parse_detail(link) item['article'] = ret.get("content") item['source'] = ret.get("source") items.append(item) self._spider_init() page_save_num = self._batch_save(self.spider_client, items, self.table_name, self.fields) logger.info("第{}页保存的个数是{} ".format(page, page_save_num))
def __get_credentials_old(self, client_id, owner_id, channel_id): """ Due to legacy code, this method retrieves credentials stored just by uuid """ logger.info( "[DB] No credentials found w/ new format! Search w/ old format") key_list = [ owner_id, "/".join([str(client_id), str(owner_id)]), "/".join([ "/v3", "managers", str(settings.client_id), str(owner_id), str(channel_id) ]), ] result = None for key in key_list: result = self.get_key(key) if result: break if result: self.set_credentials(result, client_id, owner_id, channel_id) return result
def start(self): self._create_table() self._spider_init() resp = self.get(self.list_url) if resp and resp.status_code == 200: body = resp.text doc = html.fromstring(body) news_list = doc.xpath( '//div[@class="wrap-l js-list fl_dib"]/ul/li/div[@class="list-text fr_dib"]' ) items = [] for news in news_list: item = {} title = news.xpath('./h1/a')[0].text_content() item['title'] = title link = news.xpath('./h1/a/@href')[0] item['link'] = link pub_date = news.xpath('./div[@class="date"]')[0].text_content() item['pub_date'] = pub_date detail_resp = self.get(link) if detail_resp and detail_resp.status_code == 200: detail_page = detail_resp.text article = self._parse_detail(detail_page) if article: item['article'] = article # print(item) items.append(item) fk_save_num = self._batch_save(self.spider_client, items, self.table_name, self.fields) logger.info("风口保存的个数是 {}".format(fk_save_num))
def get_list_page(self, list_url): resp = self.get(list_url) logger.info("List resp: {}".format(resp)) if resp and resp.status_code == 200: return resp.text else: logger.warning(resp) return None
def __init__(self): self.interval = None self.thread = None try: self.interval = int(settings.config_boot['keep_alive']) logger.debug('[Watchdog] interval {}'.format(self.interval)) except (KeyError, TypeError, ValueError): logger.info('[Watchdog] not enabled, keep_alive missing')
def slave_command_send(self, buf): """ 发送数据到从服务器 """ if self._server.slave_clients: logger.info("Slave Command send %d, %s" % (len(self._server.slave_clients), self._server.memory.last_cmd_time)) for hashid, client in self._server.slave_clients.items(): client.send_data(self._server.memory.last_cmd_time, buf)
def test_t(self): url = 'https://{{pet_name}}/{{pet_id}}' props = {'pet_id': '12345', 'pet_name': '黄狗'} keys = re.findall('{{(.*?)}}', url) logger.info('keys=' + str(keys)) for key in keys: url = re.sub('{{' + key + '}}', props[key], url) logger.info(url)
def parse_list_body(self, body): doc = html.fromstring(body) first = utils.parse_list_first(doc) logger.info(first) columns = self.list_parse_func(doc) columns = [column for column in columns if self.add_article(column)] if self.add_article(first): columns.append(first) return columns
def report(self, beacon, data): if self.mqtt_client: topic = self.topic_id + '%s' % beacon d = datetime.utcnow() timestamp = calendar.timegm(d.utctimetuple()) data['tst'] = timestamp self.beacons[beacon]['datetime'] = d self.mqtt_client.publish(topic, json.dumps(data)) logger.info("Published beacon: {}'s data: {}".format(beacon, data))
def worker(self, conf_data): asyncio.set_event_loop(self.loop) loop = asyncio.get_event_loop() while True: logger.info('[TokenRefresher] new refresh process {}'.format(datetime.datetime.now())) loop.run_until_complete(self.make_requests(conf_data)) time.sleep(self.interval) del self.channel_relations
def zip_attachment(files): """ :param files:list of pathlib.Path """ logger.info(str(files)) logger.info('archive_path: '+archive_path) with zipfile.ZipFile(archive_path, 'w') as archive: for i in files: archive.write(i, arcname=i.name)
def start_master(self): # Handles iterating through job stages self.curr_job = self.PARTITION #Partition or Map or Reduce self.job_status = self.NOT_STARTED self.index_status = self.RUNNING # Directory where partitions are stored self.storage_dir = '\partions' logger.info('Starting Indexing Job')
def get_known_beacons(self): beacons = {} with open("known_devices", "a+") as file: file.seek(0) for device in file: device = device.strip() if device not in beacons.keys(): beacons[device] = {} logger.info('Loaded known_device: {}'.format(device)) return beacons
def update_db_indexes(cursor): commands = ("""REINDEX DATABASE transactional""", ) try: for command in commands: cursor.execute(command) except Exception as e: logger.exception("Error in update_db_indexes: %s", e) return -1 else: logger.info("update_db_indexes run successfully") return 0
def run(self): self.init_socket() while True: logger.info('waiting for connecting...', extra=self.log_extra) sock, info = self.server_socket.accept() logger.info('{} connected!'.format(str(info[0])), extra=self.log_extra) self.socks.append(sock) t = threading.Thread(target=self.serve_socket, args=(sock, info[0])) t.start()
def run(self): while True: if not self.task_queue.empty(): task = self.task_queue.get_nowait() logger.info("Starting Task:" + repr(task) + "") task_type = task['task'] if task_type == 'map': self.run_map(task) if task_type == 'reduce': self.run_reduce(task)
def start(self): self._create_table() self._spider_init() for page in range(1, self.page + 1): if page == 1: list_url = self.first_url else: list_url = self.format_url.format(page) items = self._parse_list(list_url) page_save_num = self._batch_save(self.spider_client, items, self.table_name, self.fields) logger.info("第{}页保存的个数是{}".format(page, page_save_num))
def _process_data(self, ret_this, ret_last, threshold, r_threshold, title_format, content_format, change_type): this_end_date = ret_this.get("EndDate") quarter_info = """{}年第{}季度""".format(this_end_date.year, self.quarter_map.get(this_end_date.month)) # 转换为 保留两位的百分数 并且取绝对值 因为前期已经加上了判断增长还是下跌的定语 threshold = abs(self.re_percent_data(threshold)) r_threshold = abs(self.re_percent_data(r_threshold)) # 营业收入的单位从 元 转换为 亿元 this_operating_revenue = self.re_money_data(ret_this.get("OperatingRevenue")) # 净利润的单位 从 元 转换 为 万元 this_net_profit = self.re_money_data(ret_this.get("NPParentCompanyOwners")) last_net_profit = self.re_money_data(ret_last.get("NPParentCompanyOwners")) this_basic_EPS = self.re_decimal_data(ret_this.get("BasicEPS")) last_basic_EPS = self.re_decimal_data(ret_last.get("BasicEPS")) item = dict() item['EndDate'] = this_end_date # 最新一季的时间 item['InfoPublDate'] = ret_this.get("InfoPublDate") # 最新一季报表的发布时间 item['CompanyCode'] = self.company_code item['SecuCode'] = self.secu_code item['SecuAbbr'] = self.secu_addr item['ChangeType'] = change_type # 指标参数也保留在生成数据库中 item['NPParentCompanyOwners'] = ret_this.get("NPParentCompanyOwners") title = title_format.format(self.secu_addr, quarter_info, this_net_profit, threshold) item['Title'] = title item["SourceIds"] = ",".join(sorted([str(ret_this.get("id")), str(ret_last.get("id"))])) content = content_format.format(self.secu_addr, quarter_info, self.secu_addr, quarter_info, this_operating_revenue, r_threshold, this_net_profit, threshold, this_basic_EPS, last_net_profit, last_basic_EPS) item['Content'] = content # 从 stk_quot_idx 中获取涨跌幅(ChangePercActual) # 根据 InnerCode 对 ChangePercActual 进行关联 sql = ''' SELECT Date, InnerCode, ChangePercActual from stk_quot_idx WHERE InnerCode={} ORDER BY Date desc LIMIT 1; '''.format(self.inner_code) dc_client = self._init_pool(self.dc_cfg) _ret = dc_client.select_one(sql) dc_client.dispose() if _ret: logger.debug(_ret) change_percactual = _ret.get("ChangePercActual") item['ChangePercActual'] = change_percactual logger.info("\n" + pprint.pformat(item)) # 检查是否已经存在数据 与已存在值的偏差 self._target_init() self.check_exist_and_deviation(item, self.target_client)
def inner(cursor): commands = db_script_function(cursor) function_name = db_script_function.__qualname__ try: for command in commands: cursor.execute(command) except Exception as e: logger.exception("Error in %s: %s", function_name, e) return -1 else: logger.info("%s run successfully", function_name) return 0
def handle_stream(self, stream, address): """ 有连接进入 """ if not self.is_sync: self.connections += 1 ClientConnection(stream, self, address) self.status.inc(self.status_fields[1]) self.status.inc(self.status_fields[2]) logger.info("Client[%s] connection is success." % id(stream)) else: logger.warn("Server sync mode don't connection")
def update_db_indexes(cursor): commands = ( """REINDEX DATABASE transactional""", ) try: for command in commands: cursor.execute(command) except Exception as e: logger.exception("Error in update_db_indexes: %s", e) return -1 else: logger.info("update_db_indexes run successfully") return 0
def summon(data_fun, user, enemy): data = data_fun(user) player = copy.copy(user) player.hand_card = copy.copy(player.hand_card) logger.info("%s summon %s)" % (player.name, data.name)) player.hand_card.remove(data) player.remained_crystal -= data.cost monster = Monster(data, data.name, data.hp, 'normal', True) for fun in data.bc_funs: monster, player, enemy = fun(monster, player, enemy) player.monsters = copy.copy(player.monsters) player.monsters.append(monster) return player, enemy
def move_data_archives(cursor): commands = ( """INSERT INTO archive_orders SELECT * from orders WHERE order_date < '2016-01-01' """, """DELETE from orders WHERE order_date < '2016-01-01' """,) try: for command in commands: cursor.execute(command) except Exception as e: logger.exception("Error in move_data_archives: %s", e) return -1 else: logger.info("move_data_archives run successfully") return 0
def close_stream(self, hashid): """ 客户端关闭连接 """ self.connections -= 0 try: del self.slave_clients[hashid] logger.warn("Slave server[%s] leaving." % hashid) except KeyError: pass self.status.dec(self.status_fields[1]) logger.info("Client[%s] connect is closed." % hashid)
def do(self, who, what, message): if not who.name and what != 'handshake': raise GameError('Handshake first') not_your_turn = ( self.ovrs.game_started and self.ovrs.current_player is not who and what not in ['handshake, gameStart'] ) if not_your_turn: raise GameError('playr what r u doing playr staph') handler = self.EVENTS.get(what) if not handler: raise GameError('Invalid event type') logger.info('%s sent %s' % (who.id, what)) handler(who, message)
def execute(self, cmd_id): """ 解析数据, 执行操作命令 """ if cmd_id in self.CMD_MAPPING: logger.info("Command is: %d:%s" % (cmd_id, self.CMD_MAPPING[cmd_id])) #if self._server.slave and cmd_id in SLAVE_SYNC_SEND_CMDS: # cmd 为从不可以接收写入指令 # return 1, -999 #else: code, data = getattr(self, self.CMD_MAPPING[cmd_id])() return code, data else: # 未知命令ID logger.warn("Command unkonw: %d" % cmd_id) return None, None
def attack(source_fun, target_fun, user, enemy): source = source_fun(user) target = target_fun(enemy) if not (isinstance(source, Monster) and isinstance(target, (Monster, Player))): raise NotImplementedError("%s %s", type(source), type(target)) if (source.status != 'normal') or (target.status != 'normal'): raise NotImplementedError("%s %s", source.status, target.status) new_source = copy.copy(source) if (hasattr(target, 'monster_data')): new_source.hp -= target.monster_data.attack if (new_source.hp <= 0): new_source.status = 'dead' logger.info(source.name+" dead") new_source.attacked = True new_user = copy.copy(user) new_user.monsters = [monster if monster is not source else new_source for monster in user.monsters if (monster is not source) or (new_source.status != 'dead')] new_target = copy.copy(target) new_target.hp -= source.monster_data.attack if (new_target.hp <= 0): new_target.status = 'dead' logger.info(new_target.name+" dead") if isinstance(new_target, Player): new_enempy = new_target else: new_enempy = copy.copy(enemy) new_enempy.monsters = [monster if monster is not target else new_target for monster in enemy.monsters if (monster is not target) or (new_target.status != 'dead')] logger.info("%s attack %s ,%s blood is %s, %s blood is %s" % (source.name, target.name,source.name,new_source.hp, target.name,new_target.hp)) return new_user, new_enempy
def check_states(self): now = datetime.utcnow() for beacon, data in self.beacons.items(): do_update = False state = data.get('state') if data.get('datetime') and (now - data.get('datetime')).total_seconds() < 120: if state != 'home': state = 'home' do_update = True else: if state != 'not_home': state = 'not_home' do_update = True if (now - data.get('last_pub', now)).total_seconds() > 120: do_update = True if do_update: self.update_state(beacon, state) data['last_pub'] = datetime.utcnow() data['state'] = state logger.info('Checked states for {}'.format(self.beacons.items()))
def add_slave(self, client): """ 添加从服务器到从服务器列表 """ self._server.slave_clients[self._id] = client logger.info('Add slave client[%s] success.' % self._id)
def update_state(self, beacon, state): if self.mqtt_client: topic = self.topic_id + '%s' % beacon + '/state' self.mqtt_client.publish(topic, state) logger.info("Updated state for: {} to: {}".format(beacon, state))
def add_known_beacon(self, mac_addr): with open("known_devices", "ab") as file: file.write(mac_addr) file.write('\n') logger.info('Added device to known_devices: {}'.format(mac_addr))
def handle(self, environ, start_response): logger.info('Using overseer %s' % id(self)) socket = environ["wsgi.websocket"] logger.debug(socket.__dict__) player = Player(socket, start_response) enter_teh_infiniteh_loopah = True if self.game_started: logger.info( '%s tried to connect, but game has already started' % player.id ) logger.info('Delegating %s to new overseer...' % player.id) self.lcg.new_overseer() return self.lcg.redirect_to_overseer(environ, start_response) logger.info('%s connected' % player.id) while enter_teh_infiniteh_loopah: try: line = socket.receive() except socketerror: break if not line: break line = line.strip() if not line: break logger.debug('%s -> %s' % (player.id, line)) try: parsed = json.loads(line) except ValueError: player.exception('What the hell are you sending to me?') continue try: self.delegate(player, parsed) except (GameError, ServerError) as e: player.exception(e) logger.info('%s raised %s' % (player.id, e)) continue self.remove_player(player) try: socket.close() except socketerror: # whatever, I no more care about this socket pass logger.info('%s disconnected' % player.id)