def main_handler(event, context): log.info('任务开始') notify = Notify() msg_list = [] ret = success_num = fail_num = 0 # ============= miHoYo BBS COOKIE ============ # 此处填米游社的COOKIE # 注: Github Actions用户请到Settings->Secrets里设置,Name=COOKIE,Value=<获取的值> # 多个账号的COOKIE值之间用 # 号隔开,例如: 1#2#3#4 COOKIE = '' if os.environ.get('COOKIE', '') != '': COOKIE = os.environ['COOKIE'] cookie_list = COOKIE.split('#') log.info(f'检测到共配置了 {len(cookie_list)} 个帐号') for i in range(len(cookie_list)): log.info(f'准备为 NO.{i + 1} 账号签到...') try: msg = f' NO.{i + 1} 账号:{Sign(cookie_list[i]).run()}' msg_list.append(msg) success_num = success_num + 1 except Exception as e: msg = f' NO.{i + 1} 账号:\n {e}' msg_list.append(msg) fail_num = fail_num + 1 log.error(msg) ret = -1 continue notify.send(status=f'成功: {success_num} | 失败: {fail_num}', msg=msg_list) if ret != 0: log.error('异常退出') exit(ret) log.info('任务结束')
def main(datapath): """ Just a generic entrypoint for testing. """ notify = Notify(config.gitter_token, config.gitter_community) go_back = changed_after_days() ghf = GithubFeeds(config.github_token, datapath + '/github.yml', go_back) updates = ghf.update() results = [] for org in updates.keys(): if updates[org]: results.append(org) if len(results) > 0: message_template = Template(bot_message) message = message_template.render(data=results) print(message) notify.send(config.gitter_channel, message)
def monitor(self): """ @Purpose: Main monitoring routine. """ while True: ##### Checking envdb availability ##### try: envdb = EnvDB() self.isEnvDBAlive = True except Exception, e: if self.isEnvDBAlive: self.isEnvDBAlive = False else: if not self.sentDownMsg: ################ EMAIL DOWN MSG ############### print self.__DATABASE_DOWN_SUBJECT__ print self.__DATABASE_DOWN_MSG__ % self.__getTimeString__() notifyObj = Notify(notifyList, subject=self.__DATABASE_DOWN_SUBJECT__) notifyObj.send(self.__DATABASE_DOWN_MSG__ % self.__getTimeString__()) ############################################### self.sentDownMsg = True ###### try: for probe in self.monitorList: latestValue = envdb.getLatestProbeValue(probe) probeRange = self.__PROBE_RANGE__[probe] if self.monitorValidMap[probe] and (latestValue > probeRange[1] or latestValue < probeRange[0]): self.monitorValidMap[probe] = False ############### EMAIL PROBE OUT OF RANGE MSG ############ print self.__PROBE_OUT_OF_RANGE_SUBJECT__ % probe print self.__PROBE_OUT_OF_RANGE_MSG__ % (self.__getTimeString__(), probe, latestValue) notifyObj = Notify(notifyList, subject=self.__PROBE_OUT_OF_RANGE_SUBJECT__ % probe) notifyObj.send(self.__PROBE_OUT_OF_RANGE_MSG__ % (self.__getTimeString__(), probe, latestValue)) ######################################################### elif (not self.monitorValidMap[probe]) and \ (latestValue >= probeRange[0]-self.__PROBE_THRESHOLD__ and latestValue <= probeRange[1]-self.__PROBE_THRESHOLD__): self.monitorValidMap[probe] = True ############### EMAIL PROBE WITHIN RANGE MSG ############ print self.__PROBE_COME_BACK_WITH_IN_RANGE_SUBJECT__ % probe print self.__PROBE_COME_BACK_WITH_IN_RANGE_MSG__ % (self.__getTimeString__(), probe, latestValue) notifyObj = Notify(notifyList, subject=self.__PROBE_COME_BACK_WITH_IN_RANGE_SUBJECT__ % probe) notifyObj.send(self.__PROBE_COME_BACK_WITH_IN_RANGE_MSG__ % (self.__getTimeString__(), probe, latestValue)) ######################################################### except Exception, e: print traceback.print_exc()
"""miHoYo BBS COOKIE :param COOKIE: 米游社的COOKIE.多个账号的COOKIE值之间用 # 号隔开,例如: 1#2#3#4 """ # Github Actions用户请到Repo的Settings->Secrets里设置变量,变量名字必须与上述参数变量名字完全一致,否则无效!!! # Name=<变量名字>,Value=<获取的值> COOKIE = '' if os.environ.get('COOKIE', '') != '': COOKIE = os.environ['COOKIE'] cookie_list = COOKIE.split('#') log.info(f'检测到共配置了 {len(cookie_list)} 个帐号') for i in range(len(cookie_list)): log.info(f'准备为 NO.{i + 1} 账号签到...') try: msg = f' NO.{i + 1} 账号:{Sign(cookie_list[i]).run()}' msg_list.append(msg) success_num = success_num + 1 except Exception as e: msg = f' NO.{i + 1} 账号:\n {e}' msg_list.append(msg) fail_num = fail_num + 1 log.error(msg) ret = -1 continue notify.send(status=f'成功: {success_num} | 失败: {fail_num}', msg=msg_list) if ret != 0: log.error('异常退出') exit(ret) log.info('任务结束')
log.error( "Cookie not set properly, please read the documentation on how to set and format your cookie in Github Secrets." ) raise Exception("Cookie failure") cookie_list = OS_COOKIE.split('#') log.info(f'Number of account cookies read: {len(cookie_list)}') for i in range(len(cookie_list)): log.info(f'Preparing NO.{i + 1} Account Check-In...') try: ltoken = cookie_list[i].split('ltoken=')[1].split(';')[0] uid = cookie_list[i].split('account_id=')[1].split(';')[0] msg = f' NO.{i + 1} Account:{Sign(cookie_list[i]).run()}' msg_list.append(msg) success_num = success_num + 1 except Exception as e: msg = f' NO.{i + 1} Account:\n {e}' msg_list.append(msg) fail_num = fail_num + 1 log.error(msg) ret = -1 continue notify.send( status= f'\n -Number of successful sign-ins: {success_num} \n -Number of failed sign-ins: {fail_num}', msg=msg_list) if ret != 0: log.error('program terminated with errors') exit(ret) log.info('exit success')
# Github Actions用户请到Repo的Settings->Secrets里设置变量,变量名字必须与上述参数变量名字完全一致,否则无效!!! # Name=<变量名字>,Value=<获取的值> OS_COOKIE = '' if os.environ.get('OS_COOKIE', '') != '': OS_COOKIE = os.environ['OS_COOKIE'] cookie_list = OS_COOKIE.split('#') log.info(f'Total de compte à redeem : {len(cookie_list)}.') for i in range(len(cookie_list)): log.info(f'Compte n°{i + 1} en cours...') try: ltoken = cookie_list[i].split('ltoken=')[1].split(';')[0] uid = cookie_list[i].split('account_id=')[1].split(';')[0] msg = f'Compte n°{i + 1} :{Sign(cookie_list[i]).run()}' msg_list.append(msg) success_num = success_num + 1 except Exception as e: msg = f'Compte n°{i + 1} :\n {e}' msg_list.append(msg) fail_num = fail_num + 1 log.error(msg) ret = -1 continue notify.send(status=f'Succès: {success_num} | Echec: {fail_num}', msg=msg_list) if ret != 0: log.error('ERREUR') exit(ret) log.info('Fin normale du traitement.')
def monitor(self): """ @Purpose: Main monitoring routine. """ while True: ##### Checking envdb availability ##### try: envdb = EnvDB() self.isEnvDBAlive = True except Exception, e: if self.isEnvDBAlive: self.isEnvDBAlive = False else: if not self.sentDownMsg: ################ EMAIL DOWN MSG ############### print self.__DATABASE_DOWN_SUBJECT__ print self.__DATABASE_DOWN_MSG__ % self.__getTimeString__( ) notifyObj = Notify( notifyList, subject=self.__DATABASE_DOWN_SUBJECT__) notifyObj.send(self.__DATABASE_DOWN_MSG__ % self.__getTimeString__()) ############################################### self.sentDownMsg = True ###### try: for probe in self.monitorList: latestValue = envdb.getLatestProbeValue(probe) probeRange = self.__PROBE_RANGE__[probe] if self.monitorValidMap[probe] and ( latestValue > probeRange[1] or latestValue < probeRange[0]): self.monitorValidMap[probe] = False ############### EMAIL PROBE OUT OF RANGE MSG ############ print self.__PROBE_OUT_OF_RANGE_SUBJECT__ % probe print self.__PROBE_OUT_OF_RANGE_MSG__ % ( self.__getTimeString__(), probe, latestValue) notifyObj = Notify( notifyList, subject=self.__PROBE_OUT_OF_RANGE_SUBJECT__ % probe) notifyObj.send( self.__PROBE_OUT_OF_RANGE_MSG__ % (self.__getTimeString__(), probe, latestValue)) ######################################################### elif (not self.monitorValidMap[probe]) and \ (latestValue >= probeRange[0]-self.__PROBE_THRESHOLD__ and latestValue <= probeRange[1]-self.__PROBE_THRESHOLD__): self.monitorValidMap[probe] = True ############### EMAIL PROBE WITHIN RANGE MSG ############ print self.__PROBE_COME_BACK_WITH_IN_RANGE_SUBJECT__ % probe print self.__PROBE_COME_BACK_WITH_IN_RANGE_MSG__ % ( self.__getTimeString__(), probe, latestValue) notifyObj = Notify( notifyList, subject=self. __PROBE_COME_BACK_WITH_IN_RANGE_SUBJECT__ % probe) notifyObj.send( self.__PROBE_COME_BACK_WITH_IN_RANGE_MSG__ % (self.__getTimeString__(), probe, latestValue)) ######################################################### except Exception, e: print traceback.print_exc()
# Github Actions用户请到Repo的Settings->Secrets里设置变量,变量名字必须与上述参数变量名字完全一致,否则无效!!! # Name=<变量名字>,Value=<获取的值> OS_COOKIE = '' if os.environ.get('OS_COOKIE', '') != '': OS_COOKIE = os.environ['OS_COOKIE'] cookie_list = OS_COOKIE.split('#') log.info(f'检测到共配置了 {len(cookie_list)} 个帐号') for i in range(len(cookie_list)): log.info(f'准备为 NO.{i + 1} 账号签到...') try: ltoken = cookie_list[i].split('ltoken=')[1].split(';')[0] uid = cookie_list[i].split('account_id=')[1].split(';')[0] msg = f'Account NO.{i + 1}:{Sign(cookie_list[i]).run()}' msg_list.append(msg) success_num = success_num + 1 except Exception as e: msg = f'Account NO.{i + 1}:\n {e}' msg_list.append(msg) fail_num = fail_num + 1 log.error(msg) ret = -1 continue notify.send(status=f'Success: {success_num} | Failed: {fail_num}', msg=msg_list) if ret != 0: log.error('异常退出') exit(ret) log.info('任务结束')
with open(list_path, 'r') as f: reader = csv.reader(f) header = next(reader) # ヘッダーを読み飛ばしたい時 count = 0 for row in reader: manifest = row[0] if count % 20 == 0: print(str(count) + "\t" + manifest) if count % 500 == 0: Notify.send("dwn\t" + collection_name + "\t" + str(count), env_path) count += 1 try: output_path = output_dir + "/" + Common.getId( manifest) + ".json" except Exception as e: print(e) if not os.path.exists(output_path): sleep(0.5) try: Common.download(manifest, output_path)
def start(self): #ignore pings if self.event['name'] == 'ping': return #ignore all resources other than services. if self.event['resourceType'] != 'service': return #for services, we only care if the status has become active, or removed if self.event['data']['resource']['state'] == 'active' or self.event[ 'data']['resource']['state'] == 'removed': log.info( 'Detected a change in rancher services. Begin processing.') log.info(self._raw) #get the current event's stack information r = requests.get( self.event['data']['resource']['links']['environment'], auth=(self.access_key, self.secret_key), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json' }) r.raise_for_status() service_stack_response = r.json() try: notify = Notify( service_stack_response, 'started' if self.event['data']['resource']['state'] == 'active' else 'stopped') notify.send() except: log.error( 'An error occured while trying to notify stack change') # list of running stacks, called environments in api r = requests.get(self.api_endpoint + '/environments', auth=(self.access_key, self.secret_key), headers={ 'Accept': 'application/json', 'Content-Type': 'application/json' }) r.raise_for_status() stacks_response = r.json() loadbalancer_entries = [] loadbalancer_service = None log.info(' -- Finding all Stacks') for stack in stacks_response['data']: stack_name = stack['name'] # make sure the stack/environment is active if stack['state'] != 'active': log.info( ' -- -- Ignoring {0} stack because it\'s not active'. format(stack_name)) continue if stack_name == 'utility': loadbalancer_service = self.get_utility_loadbalancer(stack) depot_services = self.get_stack_services(stack) for service in depot_services: port = service['launchConfig'].get('labels', {}).get( 'depot.lb.port', '80') loadbalancer_entries.append({ 'serviceId': service['id'], 'ports': [ stack_name + '.' + self.domain + ':' + self.external_loadbalancer_http_port + '=' + port ] }) if loadbalancer_service is None: raise Exception( 'Could not find the Utility stack external load balancer. This should never happen' ) log.info(' -- Setting loadbalancer entries:') log.info(loadbalancer_entries) self.set_loadbalancer_links(loadbalancer_service, loadbalancer_entries) log.info('Finished processing')
def stream_file(self, fn): self.log.info('Comms: Streaming file {} to port'.format(fn)) self.is_streaming = True self.abort_stream = False self.pause_stream = False # .set() # start out not paused self.last_tool = None if self.ping_pong: self.okcnt = asyncio.Event() else: self.okcnt = 0 f = None success = False linecnt = 0 tool_change_state = 0 try: f = yield from aiofiles.open(fn, mode='r') while True: if tool_change_state == 0: # yield from self.pause_stream.wait() # wait for pause to be released # needed to do it this way as the Event did not seem to work it would pause but not unpause # TODO maybe use Future here to wait for unpause # create future when pause then yield from it here then delete it if self.pause_stream: if self.ping_pong: # we need to ignore any ok from command while we are paused self.okcnt = None # wait until pause is released while self.pause_stream: yield from asyncio.sleep(1) if self.progress: self.progress(linecnt) if self.abort_stream: break # recreate okcnt if self.ping_pong: self.okcnt = asyncio.Event() # read next line line = yield from f.readline() if not line: # EOF break if self.abort_stream: break l = line.strip() if len(l) == 0 or l.startswith(';'): continue if l.startswith('(MSG'): self.app.main_window.async_display(l) continue if l.startswith('(NOTIFY'): Notify.send(l) continue if l.startswith('('): continue if l.startswith('T'): self.last_tool = l if self.app.manual_tool_change: # handle tool change M6 or M06 if l == "M6" or l == "M06" or "M6 " in l or "M06 " in l or l.endswith( "M6"): tool_change_state = 1 if self.app.wait_on_m0: # handle M0 if required if l == "M0" or l == "M00": # we basically wait for the continue dialog to be dismissed self.app.main_window.m0_dlg() self.m0 = asyncio.Event() yield from self.m0.wait() self.m0 = None continue if self.abort_stream: break # handle manual tool change if self.app.manual_tool_change and tool_change_state > 0: if tool_change_state == 1: # we insert an M400 so we can wait for last command to actually execute and complete line = "M400\n" tool_change_state = 2 elif tool_change_state == 2: # we got the M400 so queue is empty so we send a suspend and tell upstream line = "M600\n" # we need to pause the stream here immediately, but the real _stream_pause will be called by suspend self.pause_stream = True # we don't normally set this directly self.app.main_window.tool_change_prompt( "{} - {}".format(l, self.last_tool)) tool_change_state = 0 # s= time.time() # print("{} - {}".format(s, line)) # send the line if self.ping_pong and self.okcnt is not None: # clear the event, which will be set by an incoming ok self.okcnt.clear() self._write(line) # wait for ok from that command (I'd prefer to interleave with the file read but it is too complex) if self.ping_pong and self.okcnt is not None: try: yield from self.okcnt.wait() # e= time.time() # print("{} ({}) ok".format(e, (e-s)*1000, )) except Exception: self.log.debug('Comms: okcnt wait cancelled') break # when streaming we need to yield until the flow control is dealt with if self.proto._connection_lost: # Yield to the event loop so connection_lost() may be # called. Without this, _drain_helper() would return # immediately, and code that calls # write(...); yield from drain() # in a loop would never call connection_lost(), so it # would not see an error when the socket is closed. yield if self.abort_stream: break # if the buffers are full then wait until we can send some more yield from self.proto._drain_helper() if self.abort_stream: break # we only count lines that start with GMXY if l[0] in "GMXY": linecnt += 1 if self.progress and linecnt % 10 == 0: # update every 10 lines if self.ping_pong: # number of lines sent self.progress(linecnt) else: # number of lines ok'd self.progress(self.okcnt) success = not self.abort_stream except Exception as err: self.log.error("Comms: Stream file exception: {}".format(err)) finally: if f: yield from f.close() if self.abort_stream: if self.proto: self.proto.flush_queue() self._write('\x18') if success and not self.ping_pong: self.log.debug( 'Comms: Waiting for okcnt to catch up: {} vs {}'.format( self.okcnt, linecnt)) # we have to wait for all lines to be ack'd while self.okcnt < linecnt: if self.progress: self.progress(self.okcnt) if self.abort_stream: success = False break yield from asyncio.sleep(1) self.file_streamer = None self.progress = None self.okcnt = None self.is_streaming = False self.do_query = False # notify upstream that we are done self.app.main_window.stream_finished(success) self.log.info('Comms: Streaming complete: {}'.format(success)) return success
def start(self): #ignore pings if self.event['name'] == 'ping': return #ignore all resources other than services. if self.event['resourceType'] != 'service': return #for services, we only care if the status has become active, or removed if self.event['data']['resource']['state'] == 'active' or self.event['data']['resource']['state'] == 'removed': log.info('Detected a change in rancher services. Begin processing.') log.info(self._raw) #get the current event's stack information r = requests.get(self.event['data']['resource']['links']['environment'], auth=(self.access_key, self.secret_key), headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} ) r.raise_for_status() service_stack_response = r.json() try: notify = Notify(service_stack_response, 'started' if self.event['data']['resource']['state'] == 'active' else 'stopped') notify.send() except: log.error('An error occured while trying to notify stack change') # list of running stacks, called environments in api r = requests.get(self.api_endpoint + '/environments', auth=(self.access_key, self.secret_key), headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} ) r.raise_for_status() stacks_response = r.json() loadbalancer_entries = [] loadbalancer_service = None log.info(' -- Finding all Stacks') for stack in stacks_response['data']: stack_name = stack['name'] # make sure the stack/environment is active if stack['state'] != 'active': log.info(' -- -- Ignoring {0} stack because it\'s not active'.format(stack_name)) continue if stack_name == 'utility': loadbalancer_service = self.get_utility_loadbalancer(stack) depot_services = self.get_stack_services(stack) for service in depot_services: port = service['launchConfig'].get('labels',{}).get('depot.lb.port', '80') loadbalancer_entries.append({ 'serviceId': service['id'], 'ports': [ stack_name + '.' + self.domain + ':' + self.external_loadbalancer_http_port + '=' + port ] }) if loadbalancer_service is None: raise Exception('Could not find the Utility stack external load balancer. This should never happen') log.info(' -- Setting loadbalancer entries:') log.info(loadbalancer_entries) self.set_loadbalancer_links(loadbalancer_service, loadbalancer_entries) log.info('Finished processing')