def __init__(self, topic='covid_kor', consumer_group='my-group', host="localhost", port="9092"): """ Kafka Consumer Option (not all) - bootstrap_servers(default=9092): 브로커(host:port)를 리스트로 나열, 노드 전부를 쓸 필요는 없음 - auto_offset_reset(default='latest'): 0, 서버로부터 어떠한 ack도 기다리지 않음. 처리량 증가하지만 유실율도 증가 1, 리더의 기록을 확인 후 ack 받음. 모든 팔로워에 대해서 확인하지는 않음 'all', 모든 ISR(리더의 모든 팔로워)의 기록을 확인 후 ack 받음. 무손실 보장 - enable_auto_commit(default=True): 데이터를 압축하여 보낼 포멧 (‘gzip’, ‘snappy’, ‘lz4’, or None) - value_deserializer(default=None): 압축된 msg를 받았을 때 이를 역직렬화할 함수(callable). (producer 의 serializer와 동일한 값으로 하지않으면 예외 발생) """ try: self.consumer = KafkaConsumer( f"{topic}", bootstrap_servers=[f"{host}:{port}"], auto_offset_reset="latest", enable_auto_commit=True, group_id=consumer_group, value_deserializer=lambda x: loads(x.decode("utf-8")), consumer_timeout_ms=1000 * 60 * 5, ) except KafkaError as e: Log.e(f"KafkaConsuemr fail. {e}") Log.i("KafkaConsuemr connect.")
def delete_stream_rules(self, rule_ids): try: if len(rule_ids) > 0: response = self.api.request("tweets/search/stream/rules", {"delete": { "ids": rule_ids }}) Log.i( f"[{response.status_code}] RULES DELETED: {json.dumps(response.json())}" ) if response.status_code != 200: raise Exception(response.text) except TwitterRequestError as e: msg_list = ["RequestError:", e] for msg in iter(e): msg_list.append(msg) err_msg = " ".join(msg_list) Log.e(err_msg) except TwitterConnectionError as e: Log.e(f"ConnectionError: {e}") self.prompt_reconnect_msg(2) except Exception as e: Log.e(f"BaseException: {e}") self.prompt_reconnect_msg(2)
def th_reqEsp(delay, id): global esp_id_ip global esp_subscribed global esp_messages_lora global esp_messages_displayed while True: for espid, espip in esp_id_ip.items(): Log.i("Envoi de la req pour espid = " + espid + ", ip = " + espip) wCli = MicroWebCli("http://" + espip + "/cm") wCli.QueryParams['message'] = str( esp_messages_displayed.get(espid)) print('GET %s' % wCli.URL) try: wCli.OpenRequest() buf = memoryview(bytearray(1024)) resp = wCli.GetResponse() if resp.IsSuccess(): while not resp.IsClosed(): x = resp.ReadContentInto(buf) if x < len(buf): buf = buf[:x] else: print('GET return %d code (%s)' % (resp.GetStatusCode(), resp.GetStatusMessage())) removeEsp(espid) except: removeEsp(espid) time.sleep(delay)
def add_stream_rules(self, request_query): """ 계정에 특정 rule 을 등록 :param request_query: [str] 문법에 맞는 쿼리문 :return: - """ try: response = self.api.request("tweets/search/stream/rules", {"add": [{ "value": request_query }]}) Log.i(f"[{response.status_code}] RULE ADDED: {response.text}") if response.status_code != 201: raise Exception(response.text) except TwitterRequestError as e: msg_list = [] for msg in iter(e): msg_list.append(msg) err_msg = "RequestError: " + "|".join(msg_list) Log.e(err_msg) except TwitterConnectionError as e: Log.e(f"ConnectionError: {e}") self.prompt_reconnect_msg(2) except Exception as e: Log.e(f"BaseException: {e}") self.prompt_reconnect_msg(2)
def _callback(message): Log.i("_callback") global seq_num global esp_messages_lora global esp_messages_displayed global reqLora global reqNextLora if (not messageReceived): return message = message.decode() Log.i("message decode = " + message) parsed = ujson.loads(message) sendToMonitors(message, "received") if seq_num == 0 and parsed['s'] != 0: return elif seq_num == 0 and parsed['s'] == 0: seq_num = seq_num + 1 elif parsed['s'] == seq_num: reqLora.clear() seq_num = parsed['s'] + 1 reqLora = reqNextLora.copy() reqNextLora.clear() if 'm' in parsed: for m in parsed['m']: mId = str(m.get("id")) mMes = str(m.get("mes")) if mId and mMes and not mId in esp_local_changed: if ((not mId in esp_messages_lora) or esp_messages_lora[mId] != mMes): esp_messages_lora[mId] = mMes esp_messages_displayed[mId] = mMes esp_local_changed.clear()
def get(self, request, week): games = Game.objects.filter(week=week).order_by('datetime') users = User.objects.all().order_by('last_name') data = { 'users': [], 'games': [] } for game in games: data['games'].append(game) for user in users: Log.i("Looking at user: %s %s" % (user.first_name, user.last_name)) try: user_data = { 'name': '%s %s' % (user.first_name, user.last_name), 'user_picks': [], 'tie_breaker': TieBreaker.objects.get(week=week, user=user).points } for game in games: print(user.username) # print(Pick.objects.filter(user=user, game=game)) print(Pick.objects.filter(user=user, game=game)) user_data['user_picks'].append(Pick.objects.get(user=user, game=game)) data['users'].append(user_data) except TieBreaker.DoesNotExist: Log.i('%s %s Does not have Tie Breaker' % (user.first_name, user.last_name)) kwargs = { 'this_week': get_closest_game_by_date(timezone.now()).week, 'week': week, 'data': data } if timezone.now() > games[0].datetime: kwargs['disabled'] = True return render(request, 'teamtrack/see.html', kwargs)
def get_stream_rules(self): rule_ids = [] try: response = self.api.request("tweets/search/stream/rules", method_override="GET") Log.i(f"[{response.status_code}] RULES: {response.text}") if response.status_code != 200: raise Exception(response.text) else: for item in response: if "id" in item: rule_ids.append(item["id"]) else: Log.i(json.dumps(item, ensure_ascii=False)) return rule_ids except TwitterRequestError as e: msg_list = ["RequestError:", e] for msg in iter(e): msg_list.append(msg) err_msg = " ".join(msg_list) Log.e(err_msg) except TwitterConnectionError as e: Log.e(f"ConnectionError: {e}") self.prompt_reconnect_msg(2) except Exception as e: Log.e(f"BaseException: {e}") self.prompt_reconnect_msg(2)
def __init__(self): init_datetime = datetime.utcnow() self.init_hms = [ init_datetime.hour, init_datetime.minute, init_datetime.second ] self.daily_cnt = 0 self.producer = kafka_producer.Producer('localhost', '9093') Log.i("DataGenerator on.")
def prompt_reconnect_msg(self, wait_secs): """ Twitter API has usage rate limit (Per 15m) - 50 connect - 450 adding, deleting, listing :param wait_secs: :return: """ Log.i(f"wait {wait_secs}secs for reconnecting..") time.sleep(wait_secs)
def _join(): global lopy_connected if not lora.has_joined(): Log.i("Connecting Lora...") lopy_connected = False Led.blink_yellow() lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0) while not lora.has_joined(): time.sleep(2.5) lopy_connected = True Led.blink_green() Log.i("Connected")
def prompt_reconnect_msg(self, wait_secs): """ 안내문구 출력 후 특정시간 동안 대기 아래와 같은 이용 정책을 준수하기 위해 최소 요구되는 대기 시간은 2~3초로 판단 Twitter API has usage rate limit (Per 15m) - 50 connect - 450 adding, deleting, listing :param wait_secs: [int] 대기 시간 (초) :return: - """ Log.i(f"wait {wait_secs}secs for reconnecting..") time.sleep(wait_secs)
def handlerFuncGetDisplays(httpClient, httpResponse): response = "[" for espid, espmes in esp_messages_displayed.items(): response += '{"message": "' + espmes + '",' response += '"name": "Afficheur-' + espid + '",' response += '"espId": "' + espid + '"}' response += "," if (len(response) != 1): response = response[:-1] response += "]" Log.i("response = " + response) httpResponse.WriteResponseOk(headers=None, contentType="application/json", contentCharset="UTF-8", content=response)
def send(message): sendToMonitors(message, "sent") Log.i("Sending : " + message) global messageReceived Led.blink_purple() _join() messageReceived = False attemptCounter = 0 while (not messageReceived and attemptCounter < 3): socketLora.send(message.encode()) time.sleep(20) attemptCounter = attemptCounter + 1 Led.blink_green() Log.i("Message sent")
def __init__(self): # CLI 실행시 config 파일 선택 가능 # if len(sys.argv) != 2: # print("Usage : python {.py file} {kafka_config_file}") # sys.exit(1) init_datetime = datetime.utcnow() self.init_hms = [init_datetime.hour, init_datetime.minute, init_datetime.second] self.daily_cnt = 0 # self.config = Config(sys.argv[1]) self.config = Config('configs/kafka_config.conf') self.host = self.config.load_config('BROKERS', 'host2') self.port = self.config.load_config('BROKERS', 'port_default') self.producer = kafka_producer.Producer(self.host, self.port) Log.i("DataGenerator on.")
def open_url(url): '''打开URL ''' import time chrome_path = Chrome._get_browser_path() if sys.platform == 'win32': import win32process, win32event, win32gui Log.i('Chrome', chrome_path) cmdline = [ '"%s"' % chrome_path, url, "--user-data-dir=remote-profile_1113" ] # , '--disk-cache-dir="%sqt4a_cache"' % chrome_path[:-10] processinfo = win32process.CreateProcess( None, ' '.join(cmdline), None, None, 0, 0, None, None, win32process.STARTUPINFO()) win32event.WaitForInputIdle(processinfo[0], 10000) time.sleep(2) return Chrome(win32gui.GetForegroundWindow()) elif sys.platform == 'darwin': import subprocess subprocess.Popen([chrome_path, url])
def handlerFuncPost(httpClient, httpResponse): global esp_subscribed global esp_messages_lora global esp_messages_displayed global esp_id_ip params = httpClient.GetRequestQueryParams() if "espid" in params: espid = params["espid"] Log.i("new sub espId : " + espid) if espid not in esp_subscribed: esp_subscribed.append(espid) if espid not in esp_messages_displayed: esp_messages_lora[espid] = espid esp_messages_displayed[espid] = espid esp_id_ip[espid] = httpClient.GetIPAddr() httpResponse.WriteResponseOk(headers=None, contentType="text/plain", contentCharset="UTF-8", content="Subscribed") else: httpResponse.WriteResponseForbidden()
def __init__(self, host="localhost", port="9092"): """ Kafka Producer Option (not all) - bootstrap_servers(default=9092): 브로커(host:port)를 리스트로 나열, 노드 전부를 쓸 필요는 없음 - acks(default=1): 0, 서버로부터 어떠한 ack도 기다리지 않음. 처리량 증가하지만 유실율도 증가 1, 리더의 기록을 확인 후 ack 받음. 모든 팔로워에 대해서 확인하지는 않음 'all', 모든 ISR(리더의 모든 팔로워)의 기록을 확인 후 ack 받음. 무손실 보장 - compression_type(default=None): 데이터를 압축하여 보낼 포멧 (‘gzip’, ‘snappy’, ‘lz4’, or None) - value_serializer(default=None): 유저가 보내려는 msg를 byte의 형태로 변환할 함수(callable). 여기서는 변환 후 인코딩 """ try: self.producer = KafkaProducer( api_version=(2, 7, 0), bootstrap_servers=[f"{host}:{port}"], acks=-1, compression_type="gzip", value_serializer=lambda x: x.encode("utf-8"), batch_size=1024*64, linger_ms=10, ) except KafkaError as e: Log.e(f"KafkaProducer fail. {e}") Log.i("KafkaProducer connect.")
def handlerFuncEditSsid(httpClient, httpResponse, routeArgs): global lopy_ssid global wlan lopy_ssid = config.WIFI_SSID_PREFIX + routeArgs["ssid"] Log.i("ssid changed : " + lopy_ssid) httpResponse.WriteResponseOk(headers=None, contentType="text/plain", contentCharset="UTF-8", content="SSID renamed.") ssid_file = open(config.CONFIGURATION_FILES_DIR + '/ssid', 'w+') try: ssid_file.write(lopy_ssid) ssid_file.close() except: Log.i("Cant save the new ssid") wlan.deinit() wlan = WLAN(mode=WLAN.AP, ssid=lopy_ssid, auth=(WLAN.WPA2, config.WIFI_PASS), channel=11, antenna=WLAN.INT_ANT) wlan.ifconfig(id=1, config=(config.API_HOST, '255.255.255.0', '10.42.31.1', '8.8.8.8'))
def _lora_callback(trigger): Log.i("_lora_callback") global messageReceived events = lora.events() if (events & LoRa.RX_PACKET_EVENT): messageReceived = True socketLora.setblocking(True) Log.i("LoRa.RX_PACKET_EVENT") Led.blink_blue() data = socketLora.recv(256) socketLora.setblocking(True) _callback(data) # if(events & LoRa.TX_PACKET_EVENT): # Log.i("LoRa.TX_PACKET_EVENT") if (events & LoRa.TX_FAILED_EVENT): Log.i("LoRa.TX_FAILED_EVENT") _join()
def _wlan_callback(trigger): Log.i("_wlan_callback") Log.i("trigger type = " + type(trigger)) Log.i("trigger = " + trigger)
from logger import Log from led import Led import machine import network import time import _thread import config import ujson import pycom from network import Server import ubinascii import socket from microWebCli import MicroWebCli import os Log.i("LoPy launched") Led.blink_red() time.sleep(10) ################################# INIT ##################################### lopy_ssid = config.WIFI_SSID_PREFIX + \ ubinascii.hexlify(network.WLAN().mac()[ 1], ':').decode().replace(":", "")[-5:] if not config.CONFIGURATION_FILES_DIR in os.listdir(): os.mkdir(config.CONFIGURATION_FILES_DIR) try: ssid_file = open(config.CONFIGURATION_FILES_DIR + '/ssid', 'r') lopy_ssid = ssid_file.read() ssid_file.close() except: ssid_file = open(config.CONFIGURATION_FILES_DIR + '/ssid', 'w+')
def start_stream(self, producer, topic): """ 계정에 등록된 rule 에 따라 filtered streaming data 를 받아옴 data를 정상적으로 받아오고 있다면 오류/종료 전까지 반복문에서 빠져나오지 않음 에러 원인에 따라 재연결을 시도하여 서버가 유지될 수 있도록 함 rule 과 별개로 하나의 tweet 에서 받아오고자 하는 정보를 쿼리에 포함시킬 수 있음 kafka 의 prodcer 역할을 하는 부분으로 받아오는 data 를 producer 와 연결된 broker 로 전달함 :param producer: [kafka.producer] kafka 의 produce 객체 :param topic: [str] 데이터를 전달하고자하는 broker 의 특정 topic :return: - """ total_cnt = 0 while True: try: response = self.api.request( "tweets/search/stream", { "expansions": self.expansions, "media.fields": self.media_fields, "tweet.fields": self.tweet_fields, "user.fields": self.user_fields, "place.fields": self.place_fields, }, ) Log.i(f"[{response.status_code}] START...") Log.i(response.get_quota()) # API connect 회수 조회 if (response.status_code != 200 and response.status_code != 400 and response.status_code != 429): # 에러 원인별 다른 처리를 위해 응답 코드로 구분 raise Exception(response) with open(self.output_file_name, "a", encoding="utf-8") as output_file, open( "logs/data_count.txt", "a") as cnt_file: # data_count.txt : 유실을 확인하기위해 count # [데이터를 받아온 시간] 해당 tweet 의 게시 시간 (파일이 open 된 후 받아온 데이터의 수 / 프로그램이 실행된 후 받아온 데이터의 수) print(f"[{datetime.datetime.now()}] file re-open", file=cnt_file) for no, item in enumerate(response): self.check_error_response(item) data = json.dumps(item, ensure_ascii=False) print(data, file=output_file, flush=True) producer.send_data(topic=topic, data=data) print( f"[{datetime.datetime.now()}] {item['data']['created_at']} ({no} / {total_cnt})", file=cnt_file, flush=True, ) total_cnt += 1 except TwitterRequestError as e: # ConnectionException will be caught here msg_list = [] for msg in iter(e): msg_list.append(msg) err_msg = "RequestError: " + "|".join(msg_list) Log.e(err_msg) if e.status_code >= 500: self.prompt_reconnect_msg(3) elif e.status_code == 429: self.prompt_reconnect_msg(63) else: exit() except TwitterConnectionError as e: Log.e(f"ConnectionError: {e}") self.prompt_reconnect_msg(3) except Exception as e: Log.e(f"Exception: {e}") self.prompt_reconnect_msg(3)
def start_stream(self, producer, topic): total_cnt = [0] # 스케줄러 함수의 인자로, 값이 변경될 수 있도록 mutable 객체인 list 로 선언 while True: try: response = self.api.request( "tweets/search/stream", { "expansions": self.expansions, "media.fields": self.media_fields, "tweet.fields": self.tweet_fields, "user.fields": self.user_fields, "place.fields": self.place_fields, }, ) Log.i(f"[{response.status_code}] START...") Log.i(response.get_quota()) # API connect 회수 조회 if response.status_code != 200 and response.status_code != 429: raise Exception(response) elif response.status_code != 200: # 그 외의 경우 예외처리 및 재연결 시도 raise Exception(response) with open(self.output_file_name, "a", encoding="utf-8") as output_file, open( "../logs/data_count.txt", "a") as cnt_file: print(f"[{datetime.datetime.now()}] file re-open", file=cnt_file) scheduler = BackgroundScheduler( ) # 5초마다 실행을 위해 백그라운드 스케줄러 사용 batch_cnt = [ 0 ] # 5초 간격으로 함수 내에서 reset 하기 위해 mutable 객체인 list 로 선언 scheduler.add_job( self.print_periodically, "cron", second="*/5", args=[batch_cnt, total_cnt, cnt_file], ) scheduler.start() for item in response: self.check_error_response(item) data = json.dumps(item, ensure_ascii=False) print(data, file=output_file, flush=True) producer.send_data(topic=topic, data=data) batch_cnt[0] += 1 total_cnt[0] += 1 except TwitterRequestError as e: msg_list = ["RequestError:", e] for msg in iter(e): msg_list.append(msg) err_msg = " ".join(msg_list) Log.e(err_msg) if e.status_code >= 500: self.prompt_reconnect_msg(2) elif e.status_code == 429: self.prompt_reconnect_msg(60) else: exit() except TwitterConnectionError as e: Log.e(f"ConnectionError: {e}") self.prompt_reconnect_msg(2) except Exception as e: Log.e(f"Exception: {e}") self.prompt_reconnect_msg(2)