def load_task_class(self): for parent, dirnames, filenames in os.walk( relative_project_path('src')): for filename in filenames: if filename.endswith('.py'): with open(os.path.join(parent, filename)) as f: code = f.read() if ('##multi_task' in code) and ('##hahaha' not in code): package_name = parent[len(relative_project_path('src/') ):].replace('/', '.') module_name = package_name + '.' + filename[:filename. find('.')] # print(module_name) try: # print('name:', module_name) module = importlib.import_module(module_name) class_list = dir(module) for class_name in class_list: try: taskclass = getattr(module, class_name) if issubclass( taskclass, TaskMultiTemplate ) and taskclass != TaskMultiTemplate: # print(class_name) print('add multi task:', module_name + '.' + class_name) run_instance = taskclass() self.task_dict[run_instance.task_name( )] = { 'run_instance': run_instance, 'logger': Logger(self.log_level, run_instance.task_name(), self.is_debug), 'recv_task_redis': TaskRedis(run_instance.task_name(), self.redis_host, self.redis_port, self.redis_timeout) } # print(class_name) except: pass except: pass
def __init__(self): self.logger = Logger(Logger.INFO, 'myclass', True) threading.Thread.__init__(self) self.logger().info("=====开始初始化=====") self.data = data cf = configparser.ConfigParser() cf.read("python.ini") self.kafka_host = cf.get("kafka", "host") self.kafka_port = cf.getint("kafka", "port") self.hbase_host = cf.get("hbase", "host") self.hbase_port = cf.getint("hbase", "port") self.consumer_topic = cf.get("kafka", "consumer_topic") self.producer_topic = cf.get("kafka", "producer_topic") # print self.consumer_topic # print self.producer_topic print self.kafka_port print self.kafka_host # self.kafka_host = '192.168.212.71' # kafka服务器地址 # self.kafka_port = 9092 # kafka服务器端口 # self.hbase_host='192.168.195.1' # self.hbase_port=9090 # print(self.kafka_host, self.kafka_port, self.hbase_host, self.hbase_port,self.consumer_topic,self.producer_topic) # self.connection=happybase.Connection(self.hbase_host, self.hbase_port) self.producer = KafkaProducer( bootstrap_servers=[ '{kafka_host}:{kafka_port}'.format(kafka_host=self.kafka_host, kafka_port=self.kafka_port) ], value_serializer=lambda v: json.dumps(v).encode('utf-8')) self.consumer = KafkaConsumer( # 'k8s_MonitorData', self.consumer_topic, # 'ai_threshold_alert', group_id='mygroup1', enable_auto_commit=False, # auto_offset_reset='earliest', bootstrap_servers=[ '{kafka_host}:{kafka_port}'.format(kafka_host=self.kafka_host, kafka_port=self.kafka_port) ], # 一定要设置下列两个参数。官网里有参数的解释https://kafka-python.readthed... # 主要是这句话:“ If no heartbeats are received by the broker before the expiration of this session timeout, then the broker will remove this consumer from the group and initiate a rebalance.” session_timeout_ms=6000, heartbeat_interval_ms=2000) self.logger().info("====初始化完成====")
def __init__(self, task_name, model_class, model_config, redis_host, redis_port, redis_timeout, cuda, gpu_mem, allow_growth, log_level, is_debug): cuda = str(cuda) self.task_name = task_name self.task_uname = task_name + cuda self.task_redis = TaskRedis(task_name, redis_host, redis_port, redis_timeout) self.task_redis.upload_data_convert(model_class) self.input_convert, self.output_convert = self.task_redis.create_data_convert() self.input_convert = self.input_convert() self.output_convert = self.output_convert() if cuda is not None: self.sess = TFSession(cuda, gpu_mem, allow_growth).get_sess() else: self.sess = None self.logger = Logger(log_level, self.task_uname, is_debug) self.model_class_instance = model_class(self.sess, self.task_redis,model_config) self.task_run = self.model_class_instance.run self.kill_last_task()
return aaaa # print(12) t = MyClass() t.start() # print(11) cf = configparser.ConfigParser() cf.read("python.ini") redis_server = cf.get("register", "address") host = cf.get("mysql", "host").encode() port = int(cf.get("mysql", "port")) user = cf.get("mysql", "user").encode() passwd = cf.get("mysql", "passwd").encode() db = cf.get("mysql", "db").encode() charset = cf.get("mysql", "charset").encode() minute = int(cf.get("mysql", "minute")) Polling(data, minute, host, port, user, passwd, db, charset).run() logger = Logger(Logger.INFO, 'example', True) # 参数:日志级别 日志文件名 日志是否控制台输出 # print(redis_server) # print(type(redis_server)) server = ThreadpoolServer(redis_server, [ExampleSvr], logger, 'JSON', 30, 30, 1) # 消息系统地址 服务列表 日志 交互协议 线程数量 server.start()