def __init__(self, **desc): Domain.__init__(self, **desc) self.type = "duration" self.NULL = Null self.min = Duration(self.min) self.max = Duration(self.max) self.interval = Duration(self.interval) if self.partitions: # IGNORE THE min, max, interval if not self.key: Log.error("Must have a key value") Log.error("not implemented yet") # VERIFY PARTITIONS DO NOT OVERLAP return elif not all([self.min, self.max, self.interval]): Log.error("Can not handle missing parameter") self.key = "min" self.partitions = wrap([{ "min": v, "max": v + self.interval, "dataIndex": i } for i, v in enumerate( Duration.range(self.min, self.max, self.interval))])
def __sub__(self, other): if other == None: return None if isinstance(other, datetime): return Duration(self.unix - Date(other).unix) if isinstance(other, Date): return Duration(self.unix - other.unix) return self.add(-other)
def __init__(self, from_address, to_address, subject, host, username, password, port=465, use_ssl=1, log_type="email", max_interval=HOUR, settings=None): """ SEND WARNINGS AND ERRORS VIA EMAIL settings = { "log_type":"email", "from_address": "*****@*****.**", "to_address": "*****@*****.**", "subject": "Problem in Pulse Logger", "host": "mail.mozilla.com", "port": 465, "username": "******", "password": "******", "use_ssl": 1 } """ assert settings.log_type == "email", "Expecing settings to be of type 'email'" self.settings = settings self.accumulation = [] self.next_send = Date.now() + MINUTE self.locker = Lock() self.settings.max_interval = Duration(settings.max_interval)
def simple_date(sign, dig, type, floor): if dig or sign: from pyLibrary.debugs.logs import Log Log.error("can not accept a multiplier on a datetime") if floor: return Date(type).floor(Duration(floor)) else: return Date(type)
def parse_time_expression(value): def simple_date(sign, dig, type, floor): if dig or sign: from pyLibrary.debugs.logs import Log Log.error("can not accept a multiplier on a datetime") if floor: return Date(type).floor(Duration(floor)) else: return Date(type) terms = re.match(r'(\d*[|\w]+)\s*([+-]\s*\d*[|\w]+)*', value).groups() sign, dig, type = re.match(r'([+-]?)\s*(\d*)([|\w]+)', terms[0]).groups() if "|" in type: type, floor = type.split("|") else: floor = None if type in MILLI_VALUES.keys(): value = Duration(dig+type) else: value = simple_date(sign, dig, type, floor) for term in terms[1:]: if not term: continue sign, dig, type = re.match(r'([+-])\s*(\d*)([|\w]+)', term).groups() if "|" in type: type, floor = type.split("|") else: floor = None op = {"+": "__add__", "-": "__sub__"}[sign] if type in MILLI_VALUES.keys(): if floor: from pyLibrary.debugs.logs import Log Log.error("floor (|) of duration not accepted") value = value.__getattribute__(op)(Duration(dig+type)) else: value = value.__getattribute__(op)(simple_date(sign, dig, type, floor)) return value
def get_instance_metadata(timeout=None): if not isinstance(timeout, (int, float)): timeout = Duration(timeout).seconds output = wrap({ k.replace("-", "_"): v for k, v in boto_utils.get_instance_metadata( timeout=coalesce(timeout, 5), num_retries=2).items() }) return output
def __init__(self, till=None, timeout=None, seconds=None): global next_ping Signal.__init__(self, "a timeout") if till != None: timeout = Date(till).unix elif timeout != None: timeout = _time() + Duration(timeout).seconds elif seconds != None: timeout = _time() + seconds with Till.locker: next_ping = min(next_ping, timeout) Till.all_timers.append((timeout, self))
def __init__(self, rollover_field, rollover_interval, rollover_max, queue_size=10000, batch_size=5000, settings=None): """ :param rollover_field: the FIELD with a timestamp to use for determining which index to push to :param rollover_interval: duration between roll-over to new index :param rollover_max: remove old indexes, do not add old records :param queue_size: number of documents to queue in memory :param batch_size: number of documents to push at once :param settings: plus additional ES settings :return: """ self.settings = settings self.rollover_field = jx.get(rollover_field) self.rollover_interval = self.settings.rollover_interval = Duration( settings.rollover_interval) self.rollover_max = self.settings.rollover_max = Duration( settings.rollover_max) self.known_queues = {} # MAP DATE TO INDEX self.cluster = elasticsearch.Cluster(self.settings)
def __init__(self, host, index, type="log", max_size=1000, batch_size=100, settings=None): """ settings ARE FOR THE ELASTICSEARCH INDEX """ self.es = Cluster(settings).get_or_create_index( schema=convert.json2value(convert.value2json(SCHEMA), leaves=True), limit_replicas=True, tjson=True, settings=settings ) self.batch_size = batch_size self.es.add_alias(coalesce(settings.alias, settings.index)) self.queue = Queue("debug logs to es", max=max_size, silent=True) self.es.settings.retry.times = coalesce(self.es.settings.retry.times, 3) self.es.settings.retry.sleep = Duration(coalesce(self.es.settings.retry.sleep, MINUTE)) Thread.run("add debug logs to es", self._insert_loop)
def __init__(self, repo=None, timeout=30 * SECOND, settings=None): self.settings = settings self.timeout = Duration(timeout) self.branches = self.get_branches() if repo == None: return self.es = elasticsearch.Cluster(settings=repo).get_or_create_index( settings=repo) self.es.add_alias() self.es.set_refresh_interval(seconds=1) # TO ESTABLISH DATA self.es.add({ "id": "b3649fd5cd7a-mozilla-inbound", "value": { "index": 247152, "branch": { "name": "mozilla-inbound" }, "locale": DEFAULT_LOCALE, "changeset": { "id": "b3649fd5cd7a76506d2cf04f45e39cbc972fb553", "id12": "b3649fd5cd7a", "author": "Ryan VanderMeulen <*****@*****.**>", "description": "Backed out changeset 7d0d8d304cd8 (bug 1171357) for bustage.", "date": 1433429100, "files": ["gfx/thebes/gfxTextRun.cpp"] }, "push": { "id": 60618, "user": "******", "date": 1433429138 }, "parents": ["7d0d8d304cd871f657effcc2d21d4eae5155fd1b"], "children": ["411a9af141781c3c8fa883287966a4af348dbca8"] } }) self.es.flush() self.current_push = None
def __init__(self, **desc): Domain.__init__(self, **desc) self.type = "duration" self.NULL = Null self.min = Duration(self.min) self.max = Duration(self.max) self.interval = Duration(self.interval) if self.partitions: # IGNORE THE min, max, interval if not self.key: Log.error("Must have a key value") Log.error("not implemented yet") # VERIFY PARTITIONS DO NOT OVERLAP return elif not all([self.min, self.max, self.interval]): Log.error("Can not handle missing parameter") self.key = "min" self.partitions = wrap([{"min": v, "max": v + self.interval, "dataIndex":i} for i, v in enumerate(Duration.range(self.min, self.max, self.interval))])
def duration(self): if not self.end: return Duration(clock() - self.start) return Duration(self.interval)
def duration(self): return Duration(self.interval)