def _get_all_rel_tables(self, tb_id, result, tb_map, dep=0, index=0): """ 获取一张表的所有被依赖表 :param tb_id: :param result: :param dep: :param tb_map: 用于判断是否存在循环依赖,防止死循环 :param index: 被依赖表的索引,用于按照依赖关系排序,保证合表次序不会错乱 :return: """ if tb_id in tb_map: raise Exception("there is a cycle in the relation graph") tb_map[tb_id] = dep share_model = Share() shares = share_model.get({Share.TB_ID: tb_id}, (Share.SH_ID, )) sh_ids = [s[Share.SH_ID] for s in shares] del share_model tb_ids = [tb_id] + sh_ids rel_tables = self.get_rel_tables(tb_ids) for rel_tb in rel_tables: rel_tb['id'] = index index += 1 dep += 1 for rel_table in rel_tables: rel_tb_id = rel_table[Relation.TB_ID] result.append(rel_table) index = self._get_all_rel_tables(rel_tb_id, result, tb_map, dep, index) tb_map.pop(tb_id) return index
def add_with_share(self, params): field_id = self.create(params) tb_or_share_id = params[self.TB_ID] share_model = Share() shares = share_model.get({Share.TB_ID: tb_or_share_id}, (Share.SH_ID, Share.COL_FILTER)) for share in shares: col_filter = json.loads(share[Share.COL_FILTER]) col_filter.append(field_id)
def signal_handler(signal, frame): Metadata.save_state() Metadata.stop() Database.stop() Share.stop() if os.path.exists("tmp"): shutil.rmtree("tmp") API.stop() sys.exit(0)
def __init__(self, info_filename): try: info = json.load(open(info_filename, "r")) self.share = Share(info) self.irc = IRC(self.share.HOST, self.share.PORT) except: print("\"info\" file not found or syntax error") traceback.print_exc() sys.exit(1)
def update(self, tb_id, params, update_share_version=True, is_del=False): if TB.VERSION in params and update_share_version: # 更新此表依赖的分享表的version share_model = Share() shares = share_model.get( {Share.TB_ID: tb_id}, (Share.SH_ID, Share.VERSION, Share.ROW_FILTER, Share.DEP_ID)) if shares: cond = (VField.FIELD_ID, VField.TITLE, VField.AGGREGATOR) table_vfield = VField().get( { VField.TB_ID: tb_id, VField.FLAG: VField.TABLE_VFIELD_FLAG }, cond) tb_info = self.get_one({TB.TB_ID: tb_id}, (TB.STORAGE_ID, )) storage_id = tb_info[TB.STORAGE_ID] # todo: 这里依赖mobius实现的不太好 mobius = Mobius() for share in shares: share[Share.VERSION] = 0 if share[ Share.VERSION] is None else share[Share.VERSION] update_info = {Share.VERSION: share[Share.VERSION] + 1} # 更新分享表的数据条数 if share[Share. ROW_FILTER] == '[]' and not share[Share.DEP_ID]: if TB.DATA_COUNT in params: update_info[Share.DATA_COUNT] = params[ TB.DATA_COUNT] else: if TB.DATA_COUNT in params and params[ TB.DATA_COUNT] == 0: update_info[Share.DATA_COUNT] = 0 else: dep_list = [share] self.find_parent_sh(shares, share[Share.SH_ID], dep_list) row_filter = [] for sh in dep_list: row_filter += json.loads(sh[Share.ROW_FILTER]) expand_filter = expand_vfield( row_filter, table_vfield) where = ' and '.join(expand_filter) if not expand_filter and TB.DATA_COUNT in params: update_info[Share.DATA_COUNT] = params[ TB.DATA_COUNT] else: try: count_data = mobius.query( 'select count(1) as count from %s where %s' % (storage_id, where)) update_info[Share.DATA_COUNT] = count_data[ 'data'][0][0] except Exception as e: self.logger.error(e) share_model.update(share[Share.SH_ID], update_info) return self._update({TB.TB_ID: tb_id, 'is_del': is_del}, params)
def test_bruteforce_complex_float(self): self.assertEqual(self.wallet.max_budget, 500) available_shares = [ Share("Share - DUPH", "100.01", "12.25"), Share("Share - GTAN", "26.04", "38.06"), Share("Share - USUF", "9.25", "27.69"), Share("Share - CFOZ", "10.64", "38.21"), Share("Share - QLRX", "50.72", "27.47"), Share("Share - HKFP", "230.97", "19.66"), Share("Share - PPPH", "24.06", "38.2"), Share("Share - HLJY", "78.98", "5.54"), Share("Share - CTCR", "160.6", "12.4") ] bruteforce(self.wallet, available_shares) # Expected_folder values come from the output of utils/knapsack_solver_google.py # used with roundings of this test values (the scripts only accepts integers as input) # Script results: # Total value (total_profit) = 98 # Total weight (total_cost) : 493 # Packed items (indexes in available_shares) : [1, 4, 5, 6, 8] # Packed_weights (prices in available_shares) : [26, 51, 231, 24, 161] expected_folder = { "Share - GTAN": 26.04, "Share - QLRX": 50.72, "Share - HKFP": 230.97, "Share - PPPH": 24.06, "Share - CTCR": 160.6 } self.assertEqual(dict(self.wallet.folder), expected_folder) self.assertEqual(self.wallet.total_cost, 492.39) self.assertEqual(self.wallet.total_profit, 98.35)
def test_bruteforce_simple(self): self.assertEqual(self.wallet.max_budget, 500) available_shares = [ Share("Share-1", "1", "1"), Share("Share-2", "10", "1"), Share("Share-3", "100", "1") ] bruteforce(self.wallet, available_shares) expected_folder = {"Share-1": 1, "Share-2": 10, "Share-3": 100} self.assertEqual(self.wallet.folder, expected_folder) self.assertEqual(self.wallet.total_cost, 111) self.assertEqual(self.wallet.total_profit, 1.11)
def test_bruteforce_simple_float(self): self.assertEqual(self.wallet.max_budget, 500) available_shares = [ Share("Share-1", "0.5", "1.5"), Share("Share-2", "10.22", "12.3"), Share("Share-3", "100.3", "22") ] bruteforce(self.wallet, available_shares) expected_folder = {"Share-1": 0.5, "Share-2": 10.22, "Share-3": 100.3} self.assertEqual(self.wallet.folder, expected_folder) self.assertEqual(self.wallet.total_cost, 111.02) self.assertEqual(self.wallet.total_profit, 23.34)
def test_optimized_simple_float(self): self.wallet = Wallet(optimized=True, nb_available_shares=3) self.assertEqual(self.wallet.max_budget, 50000) available_shares = [ Share("Share-1", "0.5", "1.5", cents=True), Share("Share-2", "10.22", "12.3", cents=True), Share("Share-3", "100.3", "22", cents=True) ] optimized(self.wallet, available_shares) expected_folder = {"Share-1": 0.5, "Share-2": 10.22, "Share-3": 100.3} self.assertEqual(self.wallet.folder, expected_folder) self.assertEqual(self.wallet.total_cost, 111.02) self.assertEqual(self.wallet.total_profit, 23.34)
def test_optimized_simple(self): self.wallet = Wallet(optimized=True, nb_available_shares=3) self.assertEqual(self.wallet.max_budget, 50000) available_shares = [ Share("Share-1", "1", "1", cents=True), Share("Share-2", "10", "1", cents=True), Share("Share-3", "100", "1", cents=True) ] optimized(self.wallet, available_shares) expected_folder = {"Share-1": 1, "Share-2": 10, "Share-3": 100} self.assertEqual(self.wallet.folder, expected_folder) self.assertEqual(self.wallet.total_cost, 111) self.assertEqual(self.wallet.total_profit, 1.11)
def submit(self, worker_name, job_id, extranonce2, ntime, nonce): """when the miner sends the "submit" RPC. We ***always*** accept, in order not to confuse miners.""" share = Share(extranonce2, nonce, ntime, job_context=self.job_context) valid = share.valid() if not valid: if job_id != str(self.job_id): self.log.debug("stale share {share} from {log_source} [my jobid {mine} vs his {his}", share=share, his=job_id, mine=self.job_id) return True self.log.warn("invalid share {share} from {log_source}", share=share) return True self.log.debug("valid share {share} from {log_source} ", share=share) ShareDB().save(share) self.worker.submit() return True
def __init__(self): print("Initializing Location") # Services initialisieren servicesDict = self.readJsonFile(self.SERVICEPATH) for service in servicesDict: self.services[service] = Service(service, servicesDict[service]["category"], servicesDict[service]["gpioPin"]) # Reservierungen initialisieren‚ if not self.services == {}: reservationDict = self.readJsonFile(self.RESERVATIONPATH) for reservation in reservationDict: self.reservations[reservation] = Reservation( reservation, reservationDict[reservation], self.services[reservationDict[reservation]["serviceID"]]) # Shares initialisieren if not self.reservations == {}: sharesDict = self.readJsonFile(self.SHAREPATH) for share in sharesDict: self.shares[share] = Share( share, sharesDict[share], self.reservations[sharesDict[share]["reservationID"]])
def share_from_share_key(self, share_key, password=None, debug=False): from share import Share from errors import SharePasswordError try: return Share.share_from_share_key(self.rest_interface, share_key, password, debug) except SharePasswordError, e: raise SharePasswordError(e.request, e.response, e.message, e.INTERNAL_CODE)
def generate_shares(self): """Process to generate shares at random intervals.""" block_probability = self.block_probability while True: # wait for next share limit = self.shares_limit if limit != -1 and self.seq_no >= limit: yield self.env.timeout(self.run_time) else: yield self.env.timeout(self.get_next_share_time()) share = Share( source=self.name, heads=self.heads(), env=self.env, seq_no=self.seq_no, is_block=get_random(period=1) < block_probability, ) self.seq_no += 1 if share.is_block: self.num_blocks += 1 msg = ShareMessage(share=share) self.add_to_dag(msg.share.hash, msg.share.heads) self.send(msg) self.shares_sent.append(msg.share.hash) self.num_shares_sent += 1 self.handle_block_found(msg)
def _create_share(self, shnum, bucket, server, dyhb_rtt): if shnum in self._commonshares: cs = self._commonshares[shnum] else: numsegs, authoritative = self.node.get_num_segments() cs = CommonShare(numsegs, self._si_prefix, shnum, self._node_logparent) if authoritative: cs.set_authoritative_num_segments(numsegs) # Share._get_satisfaction is responsible for updating # CommonShare.set_numsegs after we know the UEB. Alternatives: # 1: d = self.node.get_num_segments() # d.addCallback(cs.got_numsegs) # the problem is that the OneShotObserverList I was using # inserts an eventual-send between _get_satisfaction's # _satisfy_UEB and _satisfy_block_hash_tree, and the # CommonShare didn't get the num_segs message before # being asked to set block hash values. To resolve this # would require an immediate ObserverList instead of # an eventual-send -based one # 2: break _get_satisfaction into Deferred-attached pieces. # Yuck. self._commonshares[shnum] = cs s = Share(bucket, server, self.verifycap, cs, self.node, self._download_status, shnum, dyhb_rtt, self._node_logparent) return s
def __init__(self): super(User, self).__init__() self.set("data", {}) self.set("priority", Priority()) self.set("share", Share()) self.set("role", None) self.set("enabled", False)
def __init__(self): super(Project, self).__init__() self.set("data", {}) self.set("users", {}) self.set("share", Share()) self.set("quota", Quota()) self.set("TTL", float(0)) self.set("enabled", False)
def delete_with_share(self, tb_id, field_id): """ 删除字段,并从分配表中也将分配的字段移除 """ result = self._delete({Field.TB_ID: tb_id, Field.FIELD_ID: field_id}) if result: share_model = Share() shares = share_model.get({Share.TB_ID: tb_id}, (Share.SH_ID, Share.COL_FILTER)) for share_info in shares: share_info[Share.COL_FILTER] = json.loads( share_info[Share.COL_FILTER]) if field_id in share_info[Share.COL_FILTER]: share_info[Share.COL_FILTER].remove(field_id) share_model._update( {Share.SH_ID: share_info[Share.SH_ID]}, {Share.COL_FILTER: share_info[Share.COL_FILTER]}) return result
def process(blob): s = Share.unserialize(blob) if not s.valid(): print("Invalid block {}! bad hash".format(i)) return None as_number = int.from_bytes(s.block_hash(), byteorder="little") assert as_number < (1 << 224) top = as_number >> (224 - n) return (s.kind, top.to_bytes(8, byteorder='little'))
def get_cascade_update_plan(self, tb_id_list=[]): """ 此接口可以接收多个tb_id作为参数,根据级联更新关系给出一个执行级联更新的tb_id顺序 :param tb_id_list: :return: 区别于get_all_rel_tables,此函数返回数列返回已经是排好序并去重后的结果 """ if not tb_id_list: return [] return_list = [] # 去除源tb_list对应的所有分配表 share_model = Share() shares = share_model.get({Share.TB_ID: tb_id_list}, (Share.SH_ID, )) sh_ids = [s[Share.SH_ID] for s in shares] tb_id_list += sh_ids # 纪录第一层的所有tb_id, 这些tb_id是不用加入执行计划的,因为他们的类型不是view origin_tb_id_list = copy.deepcopy(tb_id_list) # 广度遍历并去重,如果有依赖重复出线的情况下,取更靠后的任务以保证任务依赖的正确 while tb_id_list: # 第一层的tb是不用加入到结果的,因为不是gen类型的 for tb_id in tb_id_list: if tb_id in return_list: return_list.remove(tb_id) return_list.append(tb_id) share_tb_list = [ share[Share.SH_ID] for share in share_model.get( {Share.TB_ID: tb_id_list}, [Share.SH_ID]) ] tb_ids = list(set(tb_id_list + share_tb_list)) tb_id_list = list( set([ tmp_tb[Relation.TB_ID] for tmp_tb in self.get_rel_tables(tb_ids) ])) return_list = [r for r in return_list if r not in origin_tb_id_list] return return_list
def cascade_delete(self, tb_id): relation_model = Relation() share_model = Share() gen_model = Gen() rel_tbs = [tb_id] # 需要删除此表的所有依赖表 rel_tb_infos = [] relation_model.get_all_rel_tables(tb_id, rel_tb_infos) for tb_info in rel_tb_infos: rel_tbs.append(tb_info['tb_id']) for tb_id in rel_tbs: # 删除数据表 self.delete(tb_id) # 删除合表规则 gen_model._delete({Gen.TB_ID: tb_id}) # 删除分享表 share_model.delete_tb_share(tb_id) # 删除此表的依赖关系 relation_model.delete_rel_info(tb_id) self.logger.warn('delete tb: %s' % ','.join(rel_tbs))
def del_rel_tables(self, tb_id): """ 递归删除所有相关依赖表(删除此表的依赖信息,和被依赖信息) 暂时不用 :param tb_id: :return: """ self._delete({Relation.DEP_ID: tb_id}) # 删除分享表 Share()._delete({Share.TB_ID: tb_id}) for rel_table in self.get_rel_tables(tb_id): tb_id = rel_table[Relation.TB_ID] self.del_rel_tables(tb_id)
def get_all_dependencies(self, tb_id): share_model = Share() result = [] self.get_dependencies(share_model, tb_id, result) distinct_result = [] for r in result: added = False for dr in distinct_result: if dr['tb_id'] == r['tb_id']: added = True if not added: distinct_result.append(r) return distinct_result
def get_tb(self, tb_id, cols=(), user_id=None, groups=[]): """ 获取表、分配表的基本信息,返回字段列表等信息,默认返回表的所有信息 如果需要获取表的使用者创建的计算字段信息,必须传user_id :param tb_id: :param cols: :param user_id: :return: """ # 如果是sh_开头的tb_id一定是分享表id,分享表在使用上和普通的表是一样的,只是多出了过滤条件信息 # todo 需要对删除虚拟列的Aggregator if tb_id.find('sh_') == 0: sh_id = tb_id share_model = Share() share_info = share_model.get_one(sh_id) if not share_info: self.logger.warn('分享表不存在:' + sh_id) return None tb_id = share_info[Share.TB_ID] tb_info = self.get_tb_info(tb_id, user_id, cols, groups=groups) if not tb_info: self.logger.warn('分享表对应的数据表不存在:sh_id:%s, tb_id:%s' % (sh_id, tb_id)) return None tb_info[Share.ROW_FILTER] = share_info[Share.ROW_FILTER] tb_info[Share.IS_FIXED] = share_info[Share.IS_FIXED] # 使用share的version替换tb的version,因为share的过滤条件有可能有修改,需要单独记录版本号 tb_info[TB.VERSION] = share_info[Share.VERSION] tb_info[TB.DATA_COUNT] = share_info[Share.DATA_COUNT] return tb_info else: tb_info = self.get_tb_info(tb_id, user_id, cols) if not tb_info: return None # fields = tb_info[TB.FIELDS] # self.change_aggregator(fields) return tb_info
def add_with_share(self, params): required_params = (Field.NAME, Field.TYPE) for rp in required_params: if rp not in params: self.logger.warn('缺失字段:%s' % rp) return None if not params.get(Field.FIELD_ID, ''): params[Field.FIELD_ID] = create_field_id() if params[Field.TYPE] == Field.NULL_DATA_TYPE: params[Field.TYPE] = Field.STRING_TYPE fid = params[Field.FIELD_ID] if self._create(params): tb_id = params[Field.TB_ID] share_model = Share() shares = share_model.get({Share.TB_ID: tb_id}, (Share.SH_ID, Share.COL_FILTER)) for share in shares: col_filter = json.loads(share[Share.COL_FILTER]) col_filter.append(fid) share_model.update(share[Share.SH_ID], {Share.COL_FILTER: col_filter}) return fid else: return None
def get_tb_info(self, tb_id, user_id, cols=(), filter_delete=True, groups=[]): """ 获取表信息,包括表的字段信息 字段包含所有类型的字段,通过包含'flag'key的计算字段 :param conds: :param cols: :param user_id: 附加的user_id,用于附加额外的计算字段(使用分配表的用户创建的计算字段) :param filter_delete: :return: """ fields = list(cols) append_fields = [TB.TB_ID, TB.OWNER, TB.VERSION] if fields: for f in append_fields: if f not in fields: fields.append(f) tb_info = self._get_one({TB.TB_ID: tb_id}, fields, filter_delete) if not tb_info: return None field_conds = (Field.FIELD_ID, Field.NAME, Field.TITLE, Field.TYPE, Field.SEQ_NO, Field.UNIQ_INDEX, Field.REMARK) real_fields = Field().get_list(tb_info[TB.TB_ID], field_conds) vfield_conds = (VField.FIELD_ID, VField.NAME, VField.TITLE, VField.TYPE, VField.AGGREGATOR, VField.OWNER, VField.SEQ_NO, VField.PARAM, VField.FLAG, VField.TB_ID, VField.CTIME, VField.REMARK) # 计算字段如果制定了特定的用户,需要将指定用户的计算字段也加上 # update BDP-3378 多次分享后最后一个用户能看到前面所有用户创建的计算字段 user_info = tb_info[TB.OWNER] if user_id and user_id != tb_info[TB.OWNER]: user_info = Share().get_share_chain(tb_id, user_id, groups) # user_info = [tb_info[TB.OWNER], user_id] virtual_fields = VField().get_list(tb_info[TB.TB_ID], user_info, vfield_conds) for vf in virtual_fields: vf[VField.RAW_FORMULA] = expand_vfield(vf[VField.AGGREGATOR], virtual_fields) # 加入计算字段 tb_info[TB.FIELDS] = real_fields + virtual_fields return tb_info
def get_shares_from_file(self, file_path, cents=False) -> list: shares = [] share_names = [] self._reset_parser(file_path) for row in self._reader: if self._should_parse_row(row): share_names.append(row[0]) new_share = Share(row[0], row[1], row[2], cents) shares.append(new_share) share_names_to_keep = [ share_name for share_name, count in Counter(share_names).items() if count == 1 ] shares = [ share for share in shares if share.name in share_names_to_keep ] return shares
def retrieve_share(self, share_key, password=None, debug=False): """Instantiate a Share object from a string key. Will throw an exception if password is needed but not supplied. :note: Cannot use shares created in other applications. :param share_key: String reference to a share created by another user. :param password: Password for the share. Optional, but will throw an exception if a password is required and not provided. :param debug: True if successful, exception otherwise. :return: Share specified by the share key. :rtype: Share object. """ from share import Share from errors import SharePasswordError try: return Share._share_from_share_key(self.rest_interface, share_key, password, debug) except SharePasswordError, e: raise SharePasswordError(e.request, e.response, e.message, e.INTERNAL_CODE)
def get_share(self): while True: print("\nEnter " + colored("Shared Code", "grey", "on_yellow") + colored(" {}", "blue").format(len(self.shares) + 1) + " and press ENTER") share_input = input("Shared Code: ") if not share_input: continue share = Share(share_input, self.batch) if not share.code: print(colored("Invalid Shared Code", "red")) print( colored( " - The code must consist of numbers and letters A thru F.\n" " - The code must be 86 characters long.\n" " - If a letter or number is unreadable, enter your best guess,\n" " or a zero, instead of that letter or number.", "cyan")) print("Try again...\n") continue else: return share
from setting import user_list_path, token_list from status import Status from blog import Blog from share import Share from profile import Profile from album import Album from photo import Photo from voice import Voice __all__ = ['Crawl'] crawl_list = [ Status(), Blog(), Share(), Profile(), ] class Crawl(object): def _get_all_user(self): for root, dirs, files in os.walk(user_list_path): return [f[:f.rfind('.')] for f in files] def update(self, user_list=None, force=False, token_list=token_list): if user_list is None: user_list = self._get_all_user() for user in user_list: for crawl_item in crawl_list: if not crawl_item.update(token_list, user, force): if not token_list:
if Config._config is not None: if "api" in Config._config: run_api = Config._config["api"] if "webui" in Config._config: API.ui = Config._config["webui"] if API.ui: run_api = True if "scrape" in Config._config: run_metadata = Config._config["scrape"] if "host" in Config._config: API.host = Config._config["host"] if "port" in Config._config: API.port = Config._config["port"] if os.path.exists("tmp"): shutil.rmtree("tmp") Database.start() Share.start() if run_metadata: Metadata.start() if run_api: print "Serving Web UI/RESTful API on http://%s:%i" % (API.host, API.port) API.start() else: while True: time.sleep(100)
def __init__(self, ticker): Share.__init__(self, ticker)
class Bot: def __init__(self, info_filename): try: info = json.load(open(info_filename, "r")) self.share = Share(info) self.irc = IRC(self.share.HOST, self.share.PORT) except: print("\"info\" file not found or syntax error") traceback.print_exc() sys.exit(1) """ Connect to the server by sending to IRC the required messages to initialize a connection """ def connect(self): self.sendcmd(("NICK", self.share.NICK)) self.sendcmd(("USER", self.share.IDENT, "0", "*"), self.share.REALNAME) def run(self): while True: # Sleep so we don't eat the CPU alive time.sleep(0.05) # Handle module commands server_msg = self.irc.getmsg() if server_msg: try: self.handle(server_msg) except: traceback.print_exc() # Process responses from modules while not self.share.empty_queue(): response = self.share.get_queue() self.sendcmd(response[0], response[1]) def send(self, string): self.irc.write(string + "\r\n") def sendcmd(self, cmd, text=None): temp = ' '.join(cmd) if text: temp = "{} :{}".format(temp, text)[:510] self.send(temp) def sendmsg(self, msg, string): if self.share.NICK == msg.TO[1]: msg.TO[1] = msg.FROM[1] sendcmd(("PRIVMSG", msg.TO[1]), string) # Try to run our modules def runmodules(self, msg): for mod in self.share.get_modulelist(): moduleClass = getattr(mod, "Module") modcmd = None modregex = None try: modcmd = moduleClass.cmd except: pass try: modregex = moduleClass.regex except: pass if modcmd: if modcmd == msg.MSG[:len(moduleClass.cmd)]: try: n_msg = copy.deepcopy(msg) n_msg.MSG = msg.MSG[len(modcmd):] thread = moduleClass(n_msg, self.share) thread.start() except: self.sendcmd(("PRIVMSG", msg.TO[1]), "MODULE {} FAILED".format(moduleClass)) traceback.print_exc() elif modregex: if re.search(modregex, msg.MSG): try: n_msg = copy.deepcopy(msg) thread = moduleClass(n_msg, self.share) thread.start() except: self.sendmsg(("PRIVMSG", msg.TO[1]), "MODULE {} FAILED".format(moduleClass)) traceback.print_exc() def handle(self, server_msg): try: getattr(self, "handle_{}".format(server_msg[1]))(server_msg) except: print("CMD {} - NOT IMPLEMENTED".format(server_msg[1])) # print("prefix: {}\nparams: {}\ntrailing: {}".format(server_msg[0], server_msg[2], server_msg[3])) # traceback.print_exc() # PING - Play PING PONG with the server def handle_PING(self, server_msg): self.sendcmd(("PONG",), server_msg[3]) # RPL_ENDOFMOTD / ERR_NOMOTD - Finish joining server here def handle_376(self, server_msg): self.handle_422(server_msg) def handle_422(self, server_msg): extras = self.share.EXTRAS for e in extras: self.send(e) channels = self.share.CHANNELS.split(",") for c in channels: self.sendcmd(("JOIN",), c) # INVITE - Accept all channel invites automatically def handle_INVITE(self, server_msg): print(server_msg) self.sendcmd(("JOIN",), server_msg[3]) # PRIVMSG - Any sort of message def handle_PRIVMSG(self, server_msg): print(server_msg) try: msg = Message(server_msg) self.runmodules(msg) except: traceback.print_exc()