def start_svg_auto_updater(): second = dt.datetime.now().second + 1 while second != 60: time.sleep(1) second = dt.datetime.now().second + 1 print("Seconds synced") minute = dt.datetime.now().minute % 10 + 1 while minute != 4 and minute != 9: time.sleep(60) minute = dt.datetime.now().minute % 10 + 1 print("Minutes synced\nStarting update thread...") timeinterval.start(5 * 60 * 1000, update_svg) update_svg()
def __init__(self, *secrets, **kwargs): self.age = kwargs.get('age', 60) for secret in secrets: self.secrets.append(decode_secret(secret)) timeinterval.start(self.age * 1000, self._clean)
async def new_coming(self,event): async def call(): print("websocet contect kurdu ve 100 sn bir new message istiyor!!!") imbox = Imbox('imap.gmail.com', username='******', password='******', ssl=True, ssl_context=None, starttls=False) last_message = MessageBlock.objects.last() ll = LastMessage.objects.all()[0] if last_message != None: ll.date = last_message.date ll.save() last_message_date = ll.date print(last_message_date, "consemer içinden son mesaj zamanı") all_inbox_messages = imbox.messages(date__gt=last_message_date) new_messages = [] instance_new_messages = [] for uid, message in all_inbox_messages: d = message.date.replace('(GMT)', '').strip() date = datetime.strptime(d, '%a, %d %b %Y %H:%M:%S %z') if date > last_message_date: # uid = message.message_id.replace('<','').replace('@mail.gmail.com>','') subject = message.subject my_attachments = message.attachments print('*****************Sadece gelen mesaj öncekileri istemiyoruz************************') cc = message.body['html'][0] print(cc.partition("<br>")) print('*****************************************') instance = cc.partition('<br>') only_coming_message = instance[0] from_name = message.sent_from[0]['name'] from_email = message.sent_from[0]['email'] to_name = message.sent_to[0]['name'] to_email = message.sent_to[0]['email'] body_plain = message.body['plain'][0] body_html = only_coming_message date = date incoming = Message( message_type='incoming', uid=uid, subject=subject, name=from_name, email=from_email, body_plain=body_plain, body_html=body_html, date=date, ) # incoming.mail_attachments.add() incoming.save() new_messages.append(incoming) instance_new_messages.append(incoming) print('yenimail eklendi db-ye ve new_mails array ine eklendi eklendi') all_message_blocks = MessageBlock.objects.all() message_blocks = [[e.email, e.subject] for e in all_message_blocks] print(message_blocks, "Query deneme") for message in new_messages: for message_block in all_message_blocks: if message_block.messages.filter(subject='Re:{}'.format(message.subject),email=message.email).exists(): print('cevap message ı geldi') instance_new_messages = [x for x in instance_new_messages if x != message] print(instance_new_messages,'Buda hiç bir messaga bloğunda olmayan mesagges') new_blocks = [ MessageBlock( subject=nw_blk.subject, name=nw_blk.name, email=nw_blk.email, date=nw_blk.date ) for nw_blk in instance_new_messages ] print(new_blocks,"news blocks listesi") for i in range(len(new_blocks)): new_blocks[i].save() new_blocks[i].messages.add(instance_new_messages[i]) await self.send(text_data=json.dumps({ 'message': 'message' })) # reply_array = [] # # my_incoming = [] # # all_incoming = [] # for mail in new_coming_message: # reply_persons = mail.reply_persons.all() # reply_p = serializers.serialize('json', reply_persons) # reply_array.append(reply_p) # for attach in my_attachments: # print('#######################################') # print(io.BytesIO(attach['content'])) # # atch = attachments(attach['filename'], attach['content']) # # atch.save() # # if person in reply_persons: # # my_incoming.append([mail,reply_persons]) # # all_incoming.append([mail,reply_persons]) # # new_msg = serializers.serialize('json', new_coming_message) # reply_json = json.dumps(reply_array) # data = {'reply': reply_json, 'new_msg': new_msg} # return JsonResponse(data) call() timeinterval.start(100000,call)
def watchForButtonPress(self): self.timer = timeinterval.start(50, self.buttonsPressed)
def start(self, test=True): """ Startup script for webhook routing. Called from agent start """ cherrypy.log = logger.CherryLog() cherrypy.config.update({ 'log.screen': False, 'log.access_file': '', 'log.error_file': '' }) cherrypy.engine.unsubscribe('graceful', cherrypy.log.reopen_files) logging.config.dictConfig({ 'version': 1, 'formatters': { 'custom': { '()': 'server.logger.Logger' } }, 'handlers': { 'console': { 'level':'INFO', 'class': 'server.logger.Logger', 'formatter': 'custom', 'stream': 'ext://sys.stdout' } }, 'loggers': { '': { 'handlers': ['console'], 'level': 'INFO' }, 'cherrypy.access': { 'handlers': ['console'], 'level': 'INFO', 'propagate': False }, 'cherrypy.error': { 'handlers': ['console'], 'level': 'INFO', 'propagate': False }, } }) # lots of legacy stuff here which isn't used defaults = { 'deploy_ver': 0, # usable for deployment tools 'server': { 'route_base': '/api/v1', 'port': 64000, 'host': '0.0.0.0' }, 'heartbeat': 10, 'status_report': 3600, # every hour 'requestid': True, 'refresh_maps': 300, 'cache': { 'housekeeper': 60, 'policies': 300, 'sessions': 300, 'groups': 300 }, 'auth': { 'expires': 300 } } cfgin = None # try docker secrets if os.path.exists("/run/secrets/SERVER_CONFIG"): with open("/run/secrets/SERVER_CONFIG") as infile: cfgin = infile.read() # try environ if not cfgin: cfgin = os.environ.get('SERVER_CONFIG') if cfgin: try: cfgin = json2data(base64.b64decode(cfgin)) except: # pylint: disable=bare-except try: cfgin = json2data(cfgin) except Exception as err: # pylint: disable=broad-except traceback.print_exc() logger.abort("Cannot process SERVER_CONFIG: " + str(err) + " from " + cfgin) conf = Dict(dictlib.union(defaults, cfgin)) else: logger.log("Unable to find configuration, using defaults!") conf = Dict(defaults) # cherry py global cherry_conf = { 'server.socket_port': 64000, 'server.socket_host': '0.0.0.0' } if dictlib.dig_get(conf, 'server.port'): # .get('port'): cherry_conf['server.socket_port'] = int(conf.server.port) if dictlib.dig_get(conf, 'server.host'): # .get('host'): cherry_conf['server.socket_host'] = conf.server.host # if production mode if test: logger.log("Test mode enabled", type="notice") conf['test_mode'] = True else: cherry_conf['environment'] = 'production' conf['test_mode'] = False sys.stdout.flush() cherrypy.config.update(cherry_conf) cherrypy.config.update({'engine.autoreload.on': False}) self.conf = conf sys.path.append('.') # # eventually # for mod in self.endpoint_names: # self.add_endpoint(mod) # hack for now # from . import polyform as polyform from server.endpoints import polyform self.add_endpoint('polyform', polyform) # startup cleaning interval def housekeeper(server): for endpoint in server.endpoints: try: endpoint.handler.housekeeper(server) except: # pylint: disable=bare-except traceback.print_exc() timeinterval.start(conf.auth.expires * 1000, housekeeper, self) # mount routes cherrypy.tree.mount(http.Health(server=self), conf.server.route_base + "/health", self.endpoint_conf) int_mon = cherrypy.process.plugins.Monitor(cherrypy.engine, self.monitor, frequency=conf.heartbeat/2) int_mon.start() # whew, now start the server logger.log("Base path={}".format(conf.server.route_base), type="notice") cherrypy.engine.start() cherrypy.engine.block()
def start_housekeeper(self, interval): """startup the housekeeper interval""" timeinterval.start(interval * 1000, self._clean)
def start(self, test=True): """ Startup script for webhook routing. Called from agent start """ cherrypy.log = CherryLog() cherrypy.config.update({ 'log.screen': False, 'log.access_file': '', 'log.error_file': '' }) cherrypy.engine.unsubscribe('graceful', cherrypy.log.reopen_files) logging.config.dictConfig({ 'version': 1, 'formatters': { 'custom': { '()': 'rfxengine.server.cherry.Logger' } }, 'handlers': { 'console': { 'level':'INFO', 'class':'rfxengine.server.cherry.Logger', #logging.StreamHandler', 'formatter': 'custom', 'stream': 'ext://sys.stdout' } }, 'loggers': { '': { 'handlers': ['console'], 'level': 'INFO' }, 'cherrypy.access': { 'handlers': ['console'], 'level': 'INFO', 'propagate': False }, 'cherrypy.error': { 'handlers': ['console'], 'level': 'INFO', 'propagate': False }, } }) defaults = { 'deploy_ver': 0, # usable for deployment tools 'server': { 'route_base': '/api/v1', 'port': 54000, 'host': '0.0.0.0' }, 'heartbeat': 10, 'status_report': 3600, # every hour 'requestid': False, 'refresh_maps': 300, 'cache': { 'housekeeper': 60, 'policies': 300, 'sessions': 300, 'groups': 300 }, 'crypto': { # pylint: disable=bad-continuation # '000': { # dd if=/dev... # 'key': "", # 'default': True, # } }, 'db': { 'database': 'reflex_engine', 'user': '******' }, 'auth': { 'expires': 300 } } cfgin = None # try docker secrets if os.path.exists("/run/secrets/REFLEX_ENGINE_CONFIG"): with open("/run/secrets/REFLEX_ENGINE_CONFIG") as infile: cfgin = infile.read() # try environ if not cfgin: cfgin = os.environ.get('REFLEX_ENGINE_CONFIG') if cfgin: try: cfgin = json2data(base64.b64decode(cfgin)) except: # pylint: disable=bare-except try: cfgin = json2data(cfgin) except Exception as err: # pylint: disable=broad-except traceback.print_exc() self.ABORT("Cannot process REFLEX_ENGINE_CONFIG: " + str(err) + " from " + cfgin) conf = dictlib.Obj(dictlib.union(defaults, cfgin)) else: self.NOTIFY("Unable to find configuration, using defaults!") conf = dictlib.Obj(defaults) # cherry py global cherry_conf = { 'server.socket_port': 9000, 'server.socket_host': '0.0.0.0' } if dictlib.dig_get(conf, 'server.port'): # .get('port'): cherry_conf['server.socket_port'] = int(conf.server.port) if dictlib.dig_get(conf, 'server.host'): # .get('host'): cherry_conf['server.socket_host'] = conf.server.host # if production mode if test: log("Test mode enabled", type="notice") conf['test_mode'] = True else: cherry_conf['environment'] = 'production' conf['test_mode'] = False # db connection self.dbm = mxsql.Master(config=conf.db, base=self, crypto=conf.get('crypto')) # configure the cache self.dbm.cache = rfxengine.memstate.Cache(**conf.cache.__export__()) self.dbm.cache.start_housekeeper(conf.cache.housekeeper) # schema schema = dbo.Schema(master=self.dbm) schema.initialize(verbose=False, reset=False) sys.stdout.flush() cherrypy.config.update(cherry_conf) endpoint_conf = { '/': { 'response.headers.server': "stack", 'tools.secureheaders.on': True, 'request.dispatch': cherrypy.dispatch.MethodDispatcher(), 'request.method_with_bodies': ('PUT', 'POST', 'PATCH'), } } cherrypy.config.update({'engine.autoreload.on': False}) self.conf = conf # startup cleaning interval def clean_keys(dbm): """periodically called to purge expired auth keys from db""" dbo.AuthSession(master=dbm).clean_keys() timeinterval.start(conf.auth.expires * 1000, clean_keys, self.dbm) # recheck policymaps every so often def check_policymaps(dbm): """ periodically remap policy maps, incase somebody was fidgeting where they shoudln't be """ dbo.Policyscope(master=dbm).remap_all() timeinterval.start(conf.refresh_maps * 1000, check_policymaps, self.dbm) # mount routes cherrypy.tree.mount(endpoints.Health(conf, server=self), conf.server.route_base + "/health", endpoint_conf) cherrypy.tree.mount(endpoints.Token(conf, server=self), conf.server.route_base + "/token", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="config"), conf.server.route_base + "/config", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="service"), conf.server.route_base + "/service", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="pipeline"), conf.server.route_base + "/pipeline", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="instance"), conf.server.route_base + "/instance", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="build"), conf.server.route_base + "/build", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="group"), conf.server.route_base + "/group", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="apikey"), conf.server.route_base + "/apikey", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="policy"), conf.server.route_base + "/policy", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="policyscope"), conf.server.route_base + "/policyscope", endpoint_conf) cherrypy.tree.mount(endpoints.Object(conf, server=self, obj="state"), conf.server.route_base + "/state", endpoint_conf) cherrypy.tree.mount(endpoints.InstancePing(conf, server=self), conf.server.route_base + "/instance-ping", endpoint_conf) # cherrypy.tree.mount(endpoints.Compose(conf, server=self), # conf.server.route_base + "/compose", # endpoint_conf) # setup our heartbeat monitor int_mon = cherrypy.process.plugins.Monitor(cherrypy.engine, self.monitor, frequency=conf.heartbeat/2) int_mon.start() log("Base path={}".format(conf.server.route_base), type="notice") cherrypy.engine.start() cherrypy.engine.block()