def update_character_sheet(key_id, vcode, mask, character_id): data = eveapi.character_sheet(key_id, vcode, mask, character_id) # Fudge the cached_until timer because it always returns ~30 seconds, and we # don't care to update that often data.cached_until = data.cached_until + datetime.timedelta(minutes=30) db.save_character_sheet(data) cache.remove("character-sheet:%s" % character_id) cache.remove("character-skills:%s" % character_id) stat = {'cached_until': data.cached_until, 'response_code': 200, 'response_error': '', 'character_id': character_id, 'api_method': 'CharacterSheet', 'ignored': False, 'key_id': key_id} db.save_update_status(stat) # Handle clone status alert alerts = db.get_alert('CLONE_CAPACITY', character_id) skillpoints = sum(int(skill.skillpoints) for skill in data.skills.rows) for alert in alerts: cooldown = datetime.datetime.utcnow() + datetime.timedelta(minutes=alert.interval) remaining = int(data.cloneskillpoints) - skillpoints if remaining < alert.option_1_value: mail.send_alert(alert.user_id, alert, remaining) db.update_alert(alert.alert_type_id, alert.user_id, alert.character_id, cooldown)
def update_character_queue(key_id, vcode, mask, character_id): data = eveapi.skill_queue(key_id, vcode, mask, character_id) db.save_skill_queue(character_id, data.skillqueue) cache.remove("character-queue:%s" % character_id) stat = {'cached_until': data.cached_until, 'response_code': 200, 'response_error': '', 'character_id': character_id, 'api_method': 'SkillQueue', 'ignored': False, 'key_id': key_id} db.save_update_status(stat) # Handle queue length alert alerts = db.get_alert('QUEUE_TIME', character_id) for alert in alerts: cooldown = datetime.datetime.utcnow() + datetime.timedelta(minutes=alert.interval) if len(data.skillqueue.rows) == 0: return last_skill = data.skillqueue.rows[-1] if last_skill.endtime != '': end_time = datetime.datetime.strptime(last_skill.endtime, '%Y-%m-%d %H:%M:%S') if end_time - datetime.timedelta(hours=int(alert.option_1_value)) < datetime.datetime.utcnow(): mail.send_alert(alert.user_id, alert, end_time) db.update_alert(alert.alert_type_id, alert.user_id, alert.character_id, cooldown)
def on_message(client, userdata, msg): payload = json.loads(msg.payload) sensor_type = payload.get("type") min_value, max_value = payload.get("range") value = payload.get("value") print("Data recieved : " + str(msg.payload)) if value not in range(min_value, max_value): print("Value of "+ sensor_type + " is out of range. Value : " + str(value)) send_alert(sender, receiver, login_pass, smtpserver, smtpport)
def notify(self): logging.warning("State: NOTIFY") self.kinect.set_led(LED_RED) try: mail.send_alert("Motion detected") except: logging.exception("Alert failed!") return self.alarm for i in range(NOTIFY_TIMEOUT): time.sleep(1) if self.new_state: return return self.alarm
def co2(): message = request.get_json() co2 = int(message["value"]) uuid = message["id"] database.update_co2(uuid, co2) if co2 > 1000: mail_list = database.get_mail_addr(uuid) for adr in mail_list: mail.send_alert(adr) pass line_ids = database.get_Line_id(uuid) for id in line_ids: print(id, type(id)) try: line_bot_api.push_message( id, TextMessage(text="CO2が充満しております。至急換気をお願いします。")) except: print("fail line id-->", id) return Response("OK", status=200)
def update_api_key(key_id, vcode, character_id): mask, characters, expires = eveapi.key_info(key_id, vcode) # Fudge the cached_until timer data.cached_until = datetime.datetime.utcnow() + datetime.timedelta(days=2) #db.update_api_key(key_id, vcode, mask, expires) stat = {'cached_until': data.cached_until.replace(tzinfo=FixedOffsetTimezone(0)), 'response_code': 200, 'response_error': '', 'character_id': character_id, 'api_method': 'APIKeyInfo', 'ignored': False, 'key_id': key_id} db.save_update_status(stat) # Handle key expiry alert alerts = db.get_alert('API_KEY', character_id) for alert in alerts: cooldown = datetime.datetime.utcnow() + datetime.timedelta(minutes=alert.interval) if expires - datetime.timedelta(days=int(alert.option_1_value)) < datetime.datetime.utcnow(): mail.send_alert(alert.user_id, alert, None) db.update_alert(alert.alert_type_id, alert.user_id, alert.character_id, cooldown)
def post(self): """ Handles an HTTP POST request. """ _logger.info("Received an HTTP POST request for '%(path)s' from %(address)s" % { 'path': self.request.path, 'address': self.request.remote_ip, }) try: _logger.debug("Processing request...") output = self._post() except filesystem.Error as e: summary = "Filesystem error; exception details follow:\n" + traceback.format_exc() _logger.critical(summary) mail.send_alert(summary) self.send_error(500, premature_termination=False) except pymongo.errors.AutoReconnect: summary = "Database unavailable; exception details follow:\n" + traceback.format_exc() _logger.error(summary) mail.send_alert(summary) self.send_error(503, premature_termination=False) except PrematureTermination as e: #Raised when an error code is sent from a handler. _logger.debug(str(e)) except Exception as e: summary = "Unknown error; exception details follow:\n" + traceback.format_exc() _logger.error(summary) mail.send_alert(summary) self.send_error(500, premature_termination=False) else: _logger.debug("Responding to request...") try: if not output is None: self.write(output) self.finish() except Exception as e: _logger.error("Unknown error when writing response; exception details follow:\n" + traceback.format_exc())
def _retrieve(server, uid, read_key, content): target_path = "%(base)s%(host)s_%(port)i%(sep)s" % { 'base': CONFIG.storage_path, 'host': server['host'], 'port': server['port'], 'sep': os.path.sep, } try: if not os.path.isdir(target_path): _logger.info("Creating directory %(path)s..." % { 'path': target_path, }) try: os.makedirs(target_path, 0700) except OSError as e: if e.errno == 17: _logger.debug("Directory %(path)s already exists" % { 'path': target_path, }) else: raise contentfile = "%(path)s%(name)s" % { 'path': target_path, 'name': uid, } metafile = "%(contentfile)s%(ext)s" % { 'contentfile': contentfile, 'ext': _EXTENSION_METADATA, } _cache_lock.acquire() try: for file in (contentfile, metafile): if not os.path.isfile(file): _cache_lock.release() _download(server, uid, read_key, contentfile, metafile) _cache_lock.acquire() break mf = open(metafile, 'rb') meta = json.loads(mf.read()) mf.close() if meta['keys']['read'] == read_key: if content: cf = open(contentfile, 'rb') content = cf.read() cf.close() return (content, meta) else: return meta else: raise PermissionsError("Invalid read key provided for '%(uid)s'" % { 'uid': uid, }) finally: try: _cache_lock.release() except Exception: #Lock alredy released pass except media_storage.NotAuthorisedError: raise PermissionsError("Invalid read key provided for '%(uid)s'" % { 'uid': uid, }) except (OSError, IOError): summary = "Unable to access files on disk; stack trace follows:\n" + traceback.format_exc() _logger.critical(summary) mail.send_alert(summary) return None
def add_entity(server, path, meta): """ Copies the file at the given path to the appropriate local path and adds a '.meta' file. On completion, the entity is added to the runtime upload pool. The entity is '.part'-cycled. """ target_path = "%(base)s%(host)s_%(port)i_%(flags)s%(sep)s" % { 'base': CONFIG.storage_path, 'host': server['host'], 'port': server['port'], 'flags': ( (server['ssl'] and 's' or '') + (server['srv'] and 'S' or '') ), 'sep': os.path.sep, } try: if not os.path.isdir(target_path): _logger.info("Creating directory %(path)s..." % { 'path': target_path, }) try: os.makedirs(target_path, 0700) except OSError as e: if e.errno == 17: _logger.debug("Directory %(path)s already exists" % { 'path': target_path, }) else: raise permfile = "%(path)s%(name)s" % { 'path': target_path, 'name': meta['uid'], } tempfile = "%(permfile)s%(ext)s" % { 'permfile': permfile, 'ext': _EXTENSION_PARTIAL, } _logger.debug("Copying data from %(source)s to %(destination)s..." % { 'source': path, 'destination': tempfile, }) shutil.copyfile(path, tempfile) metafile = "%(permfile)s%(ext)s" % { 'permfile': permfile, 'ext': _EXTENSION_METADATA, } _logger.debug("Writing metadata to %(destination)s..." % { 'destination': metafile, }) metafile_fp = open(metafile, 'wb') metafile_fp.write(json.dumps(meta)) metafile_fp.close() _logger.debug("Renaming data from %(source)s to %(destination)s..." % { 'source': tempfile, 'destination': permfile, }) os.rename(tempfile, permfile) except (OSError, IOError): summary = "Unable to write files to disk; stack trace follows:\n" + trceback.format_exc() _logger.critical(summary) mail.send_alert(summary) else: _pool.put(((server['host'].encode('utf-8'), server['port'], server['ssl'], server['srv']), permfile.encode('utf-8'), metafile.encode('utf-8')))