예제 #1
0
    def merge(self, other):
        if other.total_buy_month == 0:
            return

        if other.qq != self.qq and other.qq not in self.game_qqs:
            self.game_qqs.append(other.qq)

        for qq in other.game_qqs:
            if qq not in self.game_qqs:
                self.game_qqs.append(qq)

        self.total_buy_month += other.total_buy_month

        records = [*self.buy_records, *other.buy_records]  # type: List[BuyRecord]
        records.sort(key=lambda br: br.buy_at)

        # 重新计算时长
        expired_at = parse_time(records[0].buy_at)
        for record in records:
            now = parse_time(record.buy_at)
            if now > expired_at:
                # 已过期,从当前时间开始重新计算
                start_time = now
            else:
                # 续期,从之前结束时间叠加
                start_time = expired_at

            expired_at = start_time + record.buy_month * timedelta(days=31)

        self.expire_at = format_time(expired_at)
        self.buy_records = records
예제 #2
0
def make_ffmpeg_args(in_file: str, out_file: str, from_time: str,
                     to_time: str) -> t.Tuple[str, ...]:
    args = DEFAULT_ARGS.copy()

    from_time = parse_time(from_time)
    from_time_formatted = format_time(from_time)
    args[10] = f'-ss {from_time_formatted}'

    args[20] = f'-i "{in_file}"'

    try:
        to_time = parse_time(to_time)
    except ValueError:
        pass
    else:
        duration_formatted = format_timedelta(get_duration(from_time, to_time))
        args[30] = f'-to {duration_formatted}'

    _, in_file_name = split_path(in_file)
    in_file_extension = get_extension(in_file_name)
    args[50] = f'"{add_extension(out_file, extension=in_file_extension)}"'

    sorted_args = tuple(v for _, v in sorted(args.items()))

    return sorted_args
예제 #3
0
def _day_dict_from_csv_to_day_record(d, year, month):
    day = date(year=year, month=month, day=int(d['day']))
    return DayRecord(
        day=day,
        day_type=d['day_type'],
        checkin=parse_time(d['checkin'], day),
        checkout=parse_time(d['checkout'], day),
    )
예제 #4
0
파일: bot.py 프로젝트: Jaryt/TicketBot
def process_ticket_list(tickets):
    ticket_list = ""
    body = '{months}{days} since we replied to `{link}`\n'
    surpressed = 0

    for ticket_id in tickets:
        ticket = tickets[ticket_id]

        days = ticket['delta']['days']
        months = ticket['delta']['months']
        month_str = ""
        day_str = ""

        if months > 0:
            month_str = multi(" month", months)

        if days > 0:
            if months > 0:
                day_str = " and "
            day_str += multi(" day", days)

        if manager_run and str(ticket['group_id']) == group_id:
            manager_body = '{months}{days} since we replied to `{link}`. *Assignee was first notified {first_days} ago*\n'

            if 'first_notify' in ticket:
                first_notify = util.parse_time(ticket['first_notify'])
                diff = relativedelta(today, first_notify)
                first_day_str = multi(" day", diff.days)

                if diff.days >= notify_after_days or diff.months > 0:
                    link = zd.get_zendesk_url() + tickets_url + ticket_id
                    message = manager_body.format(months=month_str, days=day_str, link=link, first_days=first_day_str)

                    ticket_list += message
        else:
          if layover_days > 0 and 'last_notify' in ticket:
              last_notify = util.parse_time(ticket['last_notify'])
              diff = relativedelta(today, last_notify)

              if diff.days < layover_days and diff.months == 0:
                  surpressed += 1
                  continue

          if not 'first_notify' in ticket:
            ticket['first_notify'] = util.to_string(today)

          ticket['last_notify'] = util.to_string(today)

          link = zd.get_zendesk_url() + tickets_url + ticket_id
          ticket_list += body.format(months=month_str, days=day_str, link=link)

    if ticket_list and surpressed > 0:
        ticket_list += "_Plus " + str(surpressed) + " tickets surpressed._\n"

    return ticket_list
예제 #5
0
def dict_to_day_record(d):
    if d['Data'] == 'Total':
        return None

    day = datetime.strptime(d['Data'], '%Y/%m/%d').date()
    return DayRecord(
        day=day,
        day_type='NOR',
        checkin=parse_time(d['Check-In'], day),
        checkout=parse_time(d['Check-Out'], day),
    )
예제 #6
0
    def __init__(self):
        if os.path.isfile(CONFIG_FILENAME):
            config_dict = json.load(open(CONFIG_FILENAME))
            super(Config, self).__init__(config_dict)
        else:
            raise FileNotFoundError()

        # Default value.
        self.options = [option for option in self.options if option[1]]
        self.sleep_duration = self.sleep_duration or 1.0
        self.start_time = parse_time(self.start_time)
        self.end_time = parse_time(self.end_time)
예제 #7
0
    def __init__(self, elem, svg, target_id=""):
        self.elem = elem
        self.begin = parse_time(elem.attrib["begin"])
        self.dur = parse_time(elem.attrib["dur"])
        self.fill = self.elem.attrib.get("fill", "")

        if HREF_TAG in elem.attrib and elem.attrib[HREF_TAG].startswith("#"):
            self.target_id = elem.attrib[HREF_TAG].lstrip("#")
        else:
            self.target_id = target_id

        self.target = xpath_id(svg, self.target_id)
        self.target_attrib = deepcopy(self.target.attrib)
예제 #8
0
    def will_expire_in_days(self, days: int) -> bool:
        if run_from_src():
            # 使用源码运行不受限制
            return False

        return datetime.now() + timedelta(days=days) > parse_time(
            self.expire_at)
예제 #9
0
async def command_timer(line, message, meta, reng):
    if meta['len'] != 1:
        return '**[Error]** This command cannot be used with other commands in the same message.'

    args = line.split(maxsplit=2)

    if len(args) == 0:
        return '**[Usage]** !timer <time> [message]'
    delay = util.parse_time(args[1])
    if delay <= 0 or delay > 604800:
        return f'**[Error]** Arg 1 ({args[1]}) must represent time between 1 second and 1 week.'

    desc = args[2] if len(args) == 3 else 'Timer is done'
    now = time.time()
    task = reng.client.loop.create_task(
        timer(message.channel.id, message.id, message.author.mention, desc,
              delay, now, reng))
    timer_dict[message.id] = {
        'task': task,
        'user_id': message.author.id,
        'message_id': message.id,
        'channel_id': message.channel.id,
        'message': desc,
        'set_on': now,
        'end_on': now + delay
    }
    await message.add_reaction(emoji.X)
    return f'Timer has been set. Edit, delete, or {emoji.X} the original message to cancel.'
예제 #10
0
 async def remind(self, ctx, time, *, reminder):
     reminder = reminder.strip()
     if len(reminder) > 512:
         await ctx.send(embed=util.error_embed(
             "Maximum reminder length is 512 characters",
             "Foolish user error"))
         return
     extra_data = {
         "author_id": ctx.author.id,
         "channel_id": ctx.message.channel.id,
         "message_id": ctx.message.id,
         "guild_id": ctx.message.guild and ctx.message.guild.id,
         "original_time_spec": time
     }
     tz = await util.get_user_timezone(ctx)
     try:
         now = datetime.now(tz=timezone.utc)
         time = util.parse_time(time, tz)
     except:
         await ctx.send(embed=util.error_embed(
             "Invalid time (wrong format/too large months or years)"))
         return
     utc_time, local_time = util.in_timezone(time, tz)
     id = (await self.bot.database.execute_insert(
         "INSERT INTO reminders (remind_timestamp, created_timestamp, reminder, expired, extra) VALUES (?, ?, ?, ?, ?)",
         (utc_time.timestamp(), now.timestamp(), reminder, 0,
          util.json_encode(extra_data))))["last_insert_rowid()"]
     await self.bot.database.commit()
     await ctx.send(
         f"Reminder scheduled for {util.format_time(local_time)} ({util.format_timedelta(now, utc_time)})."
     )
     self.insert_reminder(id, utc_time.timestamp())
예제 #11
0
 async def remind(ctx, time, *, reminder):
     reminder = reminder.strip()
     if len(reminder) > 512:
         await ctx.send(embed=util.error_embed(
             "Maximum reminder length is 512 characters",
             "Foolish user error"))
         return
     extra_data = {
         "author_id": ctx.author.id,
         "channel_id": ctx.message.channel.id,
         "message_id": ctx.message.id,
         "guild_id": ctx.message.guild and ctx.message.guild.id,
         "original_time_spec": time
     }
     try:
         now = datetime.now(tz=timezone.utc)
         time = util.parse_time(time)
     except:
         await ctx.send(embed=util.error_embed(
             "Invalid time (wrong format/too large/non-integer months or years)"
         ))
         return
     await bot.database.execute(
         "INSERT INTO reminders (remind_timestamp, created_timestamp, reminder, expired, extra) VALUES (?, ?, ?, ?, ?)",
         (time.timestamp(), now.timestamp(), reminder, 0,
          util.json_encode(extra_data)))
     await bot.database.commit()
     await ctx.send(
         f"Reminder scheduled for {util.format_time(time)} ({util.format_timedelta(now, time)})."
     )
예제 #12
0
    def need_show(self) -> bool:
        key = self.get_first_run_key()

        # 判断是否过期
        if get_now() > parse_time(self.expire_at):
            return False

        # 判断是否满足版本需求
        if self.show_only_before_version != "" and not version_less(
                now_version, self.show_only_before_version):
            return False

        # 根据显示类型判断
        if self.show_type == NoticeShowType.ONCE:
            return is_first_run(key)
        elif self.show_type == NoticeShowType.DAILY:
            return is_daily_first_run(key)
        elif self.show_type == NoticeShowType.WEEKLY:
            return is_weekly_first_run(key)
        elif self.show_type == NoticeShowType.MONTHLY:
            return is_monthly_first_run(key)
        elif self.show_type == NoticeShowType.ALWAYS:
            return True
        elif self.show_type == NoticeShowType.DEPRECATED:
            return False
        else:
            return False
예제 #13
0
    def is_active(self):
        if run_from_src():
            # 使用源码运行不受限制
            return True

        now = datetime.now()
        return now <= parse_time(self.expire_at)
예제 #14
0
 def logs(self, start_time, end_time):
     start, end = util.parse_time(start_time, end_time)
     url = self.api_endpoint + 'services/{0}/{1}/logs?start_time={2}&end_time={3}'.format(
         self.namespace, self.name, start, end)
     r = requests.get(url, headers=self.headers)
     util.check_response(r)
     return r.text
예제 #15
0
def cmd_play(bot, user, text, command, parameter):
    global log

    params = parameter.split()
    index = -1
    start_at = 0
    if len(params) > 0:
        if params[0].isdigit() and 1 <= int(params[0]) <= len(var.playlist):
            index = int(params[0])
        else:
            bot.send_msg(tr('invalid_index', index=parameter), text)
            return

        if len(params) > 1:
            try:
                start_at = util.parse_time(params[1])
            except ValueError:
                bot.send_msg(tr('bad_parameter', command=command), text)
                return

    if len(var.playlist) > 0:
        if index != -1:
            bot.play(int(index) - 1, start_at)

        elif bot.is_pause:
            bot.resume()
        else:
            bot.send_msg(var.playlist.current_item().format_current_playing(),
                         text)
    else:
        bot.is_pause = False
        bot.send_msg(tr('queue_empty'), text)
 def load_from_line(line):
     parts = line.rstrip().split("\t")
     ID = int(parts[0])
     cnt = int(parts[1])
     st = parts[2]
     ed = parts[3]
     if st != "None":
         st_time = parse_time(st)
     else:
         st_time = None
     if ed != "None":
         ed_time = parse_time(ed)
     else:
         ed_time = None
     entry = StatEntry(ID, cnt, st_time, ed_time)
     return entry
예제 #17
0
파일: dao.py 프로젝트: zz0809001/djc_helper
    def will_expire_in_days(self, days: int, bypass_run_from_src=True) -> bool:
        from util import parse_time, run_from_src

        if run_from_src() and bypass_run_from_src:
            # 使用源码运行不受限制
            return False

        return datetime.now() + timedelta(days=days) > parse_time(self.expire_at)
예제 #18
0
    def remaining_time(self):
        now = datetime.now()
        expire_at = parse_time(self.expire_at)

        if now < expire_at:
            return expire_at - now
        else:
            return timedelta()
예제 #19
0
def has_buy_in_an_hour(qq):
    db = load_db()

    if key_buy_time not in db:
        return False

    buy_time = db[key_buy_time].get(str(qq), "2021-01-01 00:00:00")

    return parse_time(buy_time) >= datetime.now() - timedelta(hours=1)
예제 #20
0
def has_buy_recently(qq, delta):
    db = load_db()

    if key_buy_time not in db:
        return False

    buy_time = db[key_buy_time].get(str(qq), "2021-01-01 00:00:00")

    return parse_time(buy_time) >= datetime.now() - delta
예제 #21
0
def should_backoff(ctx, managed_object: typing.Union[client.V1Deployment, client.V1DaemonSet]) -> bool:
    if ctx.obj['restart_label'] in managed_object.metadata.annotations.keys():
        last_restarted = parse_time(managed_object.metadata.annotations[ctx.obj['restart_label']])
        backoff_period = parse_duration(ctx.obj['backoff_period'])

        if datetime.utcnow() < last_restarted + backoff_period:
            return True

    return False
def process(filename, outfilename, value_stats):
    stats = {}
    logging.info('process [%s]' % (os.path.basename(filename)))
    cnt = 0
    for line in file(filename, 'r'):
        cnt += 1
        if cnt % 1000000 == 0:
            logging.info("\t %d lines" % cnt)
        parts = extractor.parse_line(line)

        ID = get_id(parts[0])
        if ID is None:
            continue

        rtype = parts[2]
        if not rtype in stats:
            stats[rtype] = Stat(rtype)
        stat = stats[rtype]

        time = parse_time(parts[1])
        stat.add_entry(ID, time, parts[3].strip())

        values = parts[3].split("|&|")
        if len(values) >= 1:
            time = parse_time(values[0])
            if time is not None:
                stat.add_entry(ID, time)
    if outfilename is not None:
        outf = file(outfilename, 'w')
        for rtype in sorted(stats.keys()):
            stats[rtype].write_to_local(outf)
        outf.close()

    if not value_stats is None:
        for rtype in stats.keys():
            stat = stats[rtype]
            total = stat.nrow()
            value_cnts = stat.get_value_stat()
            for i in range(len(value_cnts)):
                key = rtype + "#" + str(i)
                rate = round(value_cnts[i] / (total + 0.0), 3)
                value = (rate, value_cnts[i])
                value_stats[key] = value
예제 #23
0
 def logs(self, start_time, end_time):
     start, end = util.parse_time(start_time, end_time)
     api_endpoint, token, _ = auth.load_token()
     url = api_endpoint + 'services/{0}/{1}/instances/{2}/logs?start_time={3}&end_time={4}'.format(self.service.namespace,
                                                                                                   self.service.name,
                                                                                                   self.uuid, start, end)
     headers = auth.build_headers(token)
     r = requests.get(url, headers=headers)
     util.check_response(r)
     return r.text
예제 #24
0
    def add_all_timestamps(self, smil):

        #clip_src = ""
        for par_elem in xpath_default(smil, ".//i:par"):
            begin = 10000000000000
            end = -1
            for audio_elem in xpath_default(par_elem, ".//i:audio"):
                #clip_src = audio_elem.attrib["src"]
                clip_begin = parse_time(audio_elem.attrib["clipBegin"])
                clip_end = parse_time(audio_elem.attrib["clipEnd"])
                begin = min(clip_begin, begin)
                end = max(clip_end, end)
            for text_elem in xpath_default(par_elem, ".//i:text"):
                src = text_elem.attrib["src"]
                target_id = src.split("#")[-1]
                found = self.addTimestamp(target_id, begin, end)
                if not found:
                    logging.warning(f"SMIL file references an element {target_id} that does not exist in the TEI file")

        self.addMissingTimestamps()
예제 #25
0
def read_stop_times_table(csv_path):
    # returns trip_id -> [(stop_id, arrival, departure), etc...]
    ret = defaultdict(list)

    with open(csv_path) as csv_file:
        reader = csv.reader(csv_file)

        header = make_index_map(next(reader))
        for row in reader:
            trip_id = row[header["trip_id"]]
            stop_id = row[header["stop_id"]]
            arrival_seconds = parse_time(row[header["arrival_time"]])
            departure_seconds = parse_time(row[header["departure_time"]])
            sequence = int(row[header["stop_sequence"]])

            m = ret[trip_id]
            if stop_id in m:
                raise Exception("Stop id %s specified twice for a given trip %s" % (stop_id, trip_id))
            tup = stop_id, sequence, arrival_seconds, departure_seconds
            m.append(tup)
    return ret
예제 #26
0
파일: push.py 프로젝트: mote/dviz
 def single_add(self, timestamp_str, timems, series, value, user_id,
     user_secret):
   if not util.has_value(timestamp_str):
     if util.has_value(timems):
       time_seconds = float(timems) / 1000
       timestamp = util.from_seconds(time_seconds)
     else:
       timestamp = datetime.datetime.now()
   else:
     timestamp = util.parse_time(timestamp_str)
   data.add(name=series, value=value, timestamp=timestamp, user_id=user_id,
       secret=user_secret)
   return 'Added: %s, %s, %s\n' % (series, value, timestamp)
예제 #27
0
def parse_tickets(tickets_json):
    for ticket in tickets_json['tickets']:
        assignee_id = str(ticket['assignee_id'])
        ticket_id = str(ticket['id'])
        created_at = util.parse_time(ticket['created_at'])
        tickets[ticket_id] = {'assignee_id': assignee_id, 'created_at': created_at, 'comments': {
            'public': {}, 'private': {}}, 'group_id': ticket['group_id']}
        agents = users['agents']

        if assignee_id in agents:
            agents[assignee_id]['assigned'].append(ticket_id)
        else:
            agents[assignee_id] = {
                'name': None, 'email': None, 'assigned': [ticket_id], 'commented': {}}
예제 #28
0
def load_ticket_replies():
    rs = (grequests.get(url + comments_hook.format(ticket_id=ticket),
                        auth=(user + '/token', token)) for ticket in tickets)

    for response in grequests.map(rs):
        ticket_id = parse.parse(url + comments_hook, response.url)['ticket_id']
        ticket = tickets[ticket_id]
        comment_json = response.json()

        for comment in comment_json['comments']:
            author_id = str(comment['author_id'])
            is_public = 'public' if comment['public'] else 'private'
            ticket_vis = ticket['comments'][is_public]
            created_at = util.parse_time(comment['created_at'])
            comment_data = {'created_at': created_at, 'body': comment['body']}
            agents = users['agents']
            unknown_users = users['unknown']

            if author_id in agents:
                agent = agents[author_id]
                if not ticket_id in agent:
                    agent_comments = agent['commented']

                    if ticket_id in agent_comments:
                        agent_comments[ticket_id] = agent_comments[ticket_id] + 1
                    else:
                        agent_comments[ticket_id] = 1
            else:
                if author_id in unknown_users:
                    unknown_user = unknown_users[author_id]

                    if ticket_id in unknown_user:
                        unknown_user[ticket_id] = unknown_user[ticket_id] + 1
                    else:
                        unknown_user[ticket_id] = 1
                else:
                    unknown_users[author_id] = {ticket_id: 1}

            if author_id in ticket_vis:
                author_data = ticket_vis[author_id]
                last = ticket_vis[author_id]['last']

                if created_at > last['created_at']:
                    author_data['all'] = last
                    author_data['last'] = comment_data
                else:
                    author_data['all'].append(comment_data)
            else:
                ticket_vis[author_id] = {'last': comment_data, 'all': []}
예제 #29
0
def adjust_timing(smil, smil_dir, begin_percent=0.2, end_percent=0.6):
    audio_library = AudioLibrary()
    for par_elem in xpath_default(smil, ".//i:par"):
        clip_src = ""
        for audio_elem in xpath_default(par_elem, ".//i:audio"):
            clip_src = audio_elem.attrib["src"]
            begin = parse_time(audio_elem.attrib["clipBegin"])
            end = parse_time(audio_elem.attrib["clipEnd"])

            # get the waveform of the clip
            audio_path = os.path.join(smil_dir, clip_src)
            waveform, sr = audio_library.get_clip(audio_path, begin, end)

            # get new begin and end timestamps as percentages of amplitude
            begin_frame_offset, end_frame_offset = get_timestamps_by_percentage(
                waveform, [begin_percent, end_percent])
            new_begin = begin + begin_frame_offset / sr
            new_end = begin + end_frame_offset / sr

            # change the attributes in the SMIL element
            audio_elem.attrib["clipBegin"] = "{:.2f}".format(new_begin)
            audio_elem.attrib["clipEnd"] = "{:.2f}".format(new_end)

    return smil
예제 #30
0
    def get_time_position(self, t):
        """ Returns how far along in this animation element is time t, as a
            fraction. For example, if begin=2s and dur=10s, and we're
            at 7s, the result would be 0.5.  
            
            This is for determining the interpolation of values, so it takes 
            into account things like repeatCount="infinite" and fill="freeze".
            E.g. if repeatCount="infinite" in the above situation and we're at
            17, the result would still be 0.5 because that's how far into the 
            second repeat of the animation we are.

            If time t is outside of the animation's duration (including repeats),
            the result is -1.  (The exception to this is fill="freeze", which
            means that the last value of the animation remains as-is, e.g. the 
            rectangle, after moving, stays moved instead of reappearing where 
            it first started.  If time t is after an animation that's frozen,
            then the return value is always 1.0.)
        """

        time_since_begin = t - self.begin
        if time_since_begin < 0 or self.dur <= 0.0:
            return -1

        cycles_since_begin = math.floor(time_since_begin / self.dur) + 1


        if "repeatCount" not in self.elem.attrib and \
                "repeatDur" not in self.elem.attrib and \
                time_since_begin > self.dur:
            return 1.0 if self.fill == "freeze" else -1

        if "repeatCount" in self.elem.attrib and self.elem.attrib[
                "repeatCount"] != "indefinite":
            repeat_count = float(self.elem.attrib["repeatCount"])
            if cycles_since_begin > repeat_count:
                return 1.0 if self.fill == "freeze" else -1

        if "repeatDur" in self.elem.attrib and self.elem.attrib[
                "repeatDur"] != "indefinite":
            repeat_dur = parse_time(self.elem.attrib["repeatDur"])
            if time_since_begin >= repeat_dur:
                if self.fill == "freeze":
                    return ((repeat_dur - self.begin) % self.dur) / self.dur
                return -1

        time_position = (time_since_begin % self.dur) / self.dur
        return time_position
예제 #31
0
 def get_msg(r):
     msg = dict(
         id=r[0],
         datetime=r[1],
         text=r[2],
         sender=r[3],
         media=r[4],
         **json.loads(r[5]),
     )
     if len(r) > 6:
         msg['dialog'] = r[6]
     for field in DATETIME_FIELDS:
         if field not in msg:
             continue
         tz_field = msg[field]
         if isinstance(tz_field, str):
             msg[field] = parse_time(tz_field)
     return {k: v for k, v in msg.items() if v}  # get rid of Falsey
예제 #32
0
def parse(path):
    print("reading routes...")
    routes = read_map(os.path.join(path, "routes.txt"), "route_id")
    print("reading stops...")
    stops = read_map(os.path.join(path, "stops.txt"), "stop_id")
    print("reading trips...")
    trips = read_map(os.path.join(path, "trips.txt"), "trip_id")
    print("reading calendar...")
    calendar = read_map(os.path.join(path, "calendar.txt"), "service_id")

    

    print("reticulating splines...")
    # mapping of (stop, direction) to list of (start_time, increment, count)
    schedule = defaultdict(Schedule)

    with open(os.path.join(path, "stop_times.txt")) as f:
        reader = csv.DictReader(f)

        for row in reader:
            trip = row["trip_id"]
            key = trips[trip]["trip_headsign"], trips[trip]["route_id"], trips[trip]["service_id"]

            sched = schedule[key]
            sched.trip = trip

            arrival_time = parse_time(row["arrival_time"])
            sched.add_time(arrival_time, row["stop_id"])
        
    # mapping route -> direction -> service -> sched
    ret_with_service = defaultdict(lambda: defaultdict(dict))

    for key, sched in schedule.items():
        direction, route, service = key

        ret_with_service[route][direction][service] = sched

        sched.compress()

    #ret = convert_service_to_weekdays(ret_with_service, calendar)

    return ret_with_service, trips, calendar
예제 #33
0
def demo():
    logger.info(color("bold_yellow") + "尝试启动更新器,等待其执行完毕。若版本有更新,则会干掉这个进程并下载更新文件,之后重新启动进程...(请稍作等待)")

    dlc_path = os.path.realpath("auto_updater.py")
    p = subprocess.Popen(
        [
            dlc_path,
            "--pid",
            str(os.getpid()),
            "--version",
            str(now_version),
            "--cwd",
            os.getcwd(),
            "--exe_name",
            os.path.realpath("DNF蚊子腿小助手.exe"),
        ],
        cwd="utils",
        shell=True,
        creationflags=subprocess.CREATE_NEW_PROCESS_GROUP | subprocess.DETACHED_PROCESS,
        stdin=subprocess.PIPE,
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
    )
    p.wait()

    if p.returncode != 0:
        last_modify_time = parse_timestamp(os.stat(dlc_path).st_mtime)
        logger.error(f"DLC出错了,错误码为{p.returncode},DLC最后一次修改时间为{last_modify_time}")

        uploader = Uploader()
        netdisk_latest_dlc_info = uploader.find_latest_dlc_version()
        latest_version_time = parse_time(netdisk_latest_dlc_info.time)

        if latest_version_time > last_modify_time:
            logger.info(
                f"网盘中最新版本dlc上传于{latest_version_time}左右,在当前版本最后修改时间{last_modify_time}之后,有可能已经修复dlc的该问题,将尝试更新dlc为最新版本"
            )
            uploader.download_file(netdisk_latest_dlc_info, "utils")
        else:
            logger.warning(f"网盘中最新版本dlc上传于{latest_version_time}左右,在当前版本最后修改时间{last_modify_time}之前,请耐心等待修复该问题的新版本发布~")
예제 #34
0
def convert_msg_to_utc(from_tz):
    tz = pytz.timezone(from_tz)
    utc = pytz.utc
    for dialog_id, desc in store.dialog_names.items():
        known = store.known_messages(dialog_id)
        for msg_id, msg in known.items():
            for field in ('datetime', 'edit_date'):
                if msg.get(field):
                    # convert from CET to UTC
                    tz_field = msg[field]
                    if isinstance(tz_field, str):
                        tz_field = parse_time(tz_field)
                    elif isinstance(tz_field, float):
                        tz_field = datetime.datetime.fromtimestamp(tz_field)
                    if not tz_field.tzinfo:
                        utc_date = tz_field.replace(tzinfo=tz).astimezone(utc)
                        msg[field] = utc_date

            store.add_msg(
                dialog_id, msg
            )

        print(desc)
예제 #35
0
     def _parse(self, ses, opt, sniff=0):
 
         # init
         self.chunk_init()
         pt = util.parse_time()
 
         # process the file
         for line in util.file_progress(ses, self.fn, sniff):
 
             # match line
             line = line.strip()
             m = rec.match(line)
             if m:
 
                 # process time_key
                 time = m.group(time_key)
                 if time:
                     for chunk in self.chunk_emit(flush=False):
                         yield chunk
                     self.chunk_extend()
                     self.chunk[time_key][-1] = time
                     self.last_time = time
 
                 # process each data_key
                 for data_key in rec.groupindex:
                     if data_key != time_key:
                         data = m.group(data_key)
                         if data != None:
                             if self.chunk[data_key][-1] != None:
                                 self.chunk_extend()
                                 self.chunk[time_key][-1] = self.last_time
                             self.chunk[data_key][-1] = data
 
         # finish up
         for chunk in self.chunk_emit(flush=True):
             yield chunk
예제 #36
0
        def _parse(self, ses, opt, sniff=0):

            # init
            self.chunk_init()
            pt = util.parse_time()

            # process the file
            for line in util.file_progress(ses, self.fn, sniff):

                # match line
                line = line.strip()
                m = rec.match(line)
                if m:

                    # process time_key
                    time = m.group(time_key)
                    if time:
                        for chunk in self.chunk_emit(flush=False):
                            yield chunk
                        self.chunk_extend()
                        self.chunk[time_key][-1] = time
                        self.last_time = time

                    # process each data_key
                    for data_key in rec.groupindex:
                        if data_key != time_key:
                            data = m.group(data_key)
                            if data != None:
                                if self.chunk[data_key][-1] != None:
                                    self.chunk_extend()
                                    self.chunk[time_key][-1] = self.last_time
                                self.chunk[data_key][-1] = data

            # finish up
            for chunk in self.chunk_emit(flush=True):
                yield chunk
예제 #37
0
파일: lunchbot.py 프로젝트: rpearl/lunchbot
    def add_timer(self, channel, user, message):
        """ Add a timer"""
        when, sep, msg = message.partition(' ')
        delta = parse_time(when)
        if delta is None:
            return "'%s' does not parse as a time" % (when,)

        if msg in self.timers[user]:
            return "there is already a timer for %s" % (msg,)

        end = "timer for " + choice(["%s is done", "%s is ready", "%s is finished"]) % msg
        try:
            end_time = datetime.now()+delta
        except:
            return "Invalid date. Stop that."
        end_ts = time.mktime(end_time.timetuple())

        def response():
            self.reply(channel, user, end)
            del self.timers[user][msg]

        cb = self.io_loop.add_timeout(end_ts, response)
        self.timers[user][msg] = (cb, end_ts)
        return "okay! starting %s timer for %s." % (format_timedelta(delta), msg)
예제 #38
0
 def logs(self, start_time, end_time):
     start, end = util.parse_time(start_time, end_time)
     url = self.api_endpoint + 'services/{0}/{1}/logs?start_time={2}&end_time={3}'.format(self.namespace, self.name, start, end)
     r = requests.get(url, headers=self.headers)
     util.check_response(r)
     return r.text
예제 #39
0
def process(series, fn, opt):

    # to track metrics present in the data but not processed by any series
    unrecognized = set()

    # xxx does time parsing belong here or in the parse routines?
    pt = util.parse_time()

    # process all chunk that we are sent
    while True:

        try:

            # get our next input
            chunk = yield
                    
            def process_series(s, data_key):
                tz = chunk.tz if hasattr(chunk, 'tz') else s.tz
                time_key = s.time_key # e.g. 'serverStatus.localTime'
                if data_key in chunk and time_key in chunk:
                    ts = chunk[time_key]
                    if type(ts[0])==str or type(ts[0])==unicode:
                        for i, t in enumerate(ts):
                            ts[i] = pt.parse_time(t, opt, tz)
                    if ts[0]/s.time_scale > opt.before or ts[-1]/s.time_scale < opt.after:
                        return
                    for i, (t, d) in enumerate(zip(ts, chunk[data_key])):
                        t = t / s.time_scale
                        if t>=opt.after and t<=opt.before:
                            def get_field(key):
                                try: return chunk[key][i]
                                except IndexError: return None
                            if d != None:
                                s.data_point(t, d, get_field, None, opt)

            # send each series our data points
            for s in series:
                if s.special:
                    s.special(chunk)
                if s.split_on_key_match:
                    for data_key in chunk:
                        if data_key==s.time_key:
                            continue
                        m = s.split_on_key_match_re.match(data_key)
                        if m:
                            description = m.groupdict()
                            ss = s.get_split(data_key, description)
                            process_series(ss, data_key)
                else:
                    process_series(s, s.data_key)

            # track what we have used
            unrecognized.update(chunk.keys())

        except GeneratorExit:
            break

        except Exception as e:
            traceback.print_exc()
            raise Exception('error while processing ' + fn + ': ' + str(e))

    # compute and print unrecognized metrics
    ignore = re.compile(
        '^serverStatus.(repl|start|end)|'
        '^replSetGetStatus|slot_closure_rate'
    )
    for s in series:
        unrecognized.discard(s.data_key)
        unrecognized.discard(s.time_key)
    unrecognized = filter(lambda x: not ignore.match(str(x)), unrecognized)
    is_str = lambda x: type(x)==str or type(x)==unicode
    unrecognized = filter(lambda x: x in chunk and not is_str(chunk[x][0]), unrecognized)
    if unrecognized:
        util.msg('unrecognized metrics:')
        for u in sorted(unrecognized):
            util.msg('   ', u)
예제 #40
0
def load_options(rule, conf, args=None):
    """ Converts time objects, sets defaults, and validates some settings.

    :param rule: A dictionary of parsed YAML from a rule config file.
    :param conf: The global configuration dictionary, used for populating defaults.
    """

    try:
        rule_schema.validate(rule)
    except jsonschema.ValidationError as e:
        raise EAException("Invalid Rule: %s\n%s" % (rule.get('name'), e))

    try:
        # Set all time based parameters
        if 'timeframe' in rule:
            rule['timeframe'] = datetime.timedelta(**rule['timeframe'])
        if 'realert' in rule:
            rule['realert'] = datetime.timedelta(**rule['realert'])
        else:
            rule['realert'] = datetime.timedelta(minutes=1)
        if 'aggregation' in rule and not rule['aggregation'].get('schedule'):
            rule['aggregation'] = datetime.timedelta(**rule['aggregation'])
        if 'query_delay' in rule:
            rule['query_delay'] = datetime.timedelta(**rule['query_delay'])
        if 'buffer_time' in rule:
            rule['buffer_time'] = datetime.timedelta(**rule['buffer_time'])
        if 'exponential_realert' in rule:
            rule['exponential_realert'] = datetime.timedelta(**rule['exponential_realert'])
        if 'kibana4_start_timedelta' in rule:
            rule['kibana4_start_timedelta'] = datetime.timedelta(**rule['kibana4_start_timedelta'])
        if 'kibana4_end_timedelta' in rule:
            rule['kibana4_end_timedelta'] = datetime.timedelta(**rule['kibana4_end_timedelta'])
        if 'start_time' in rule:
            rule['start_time'] = parse_time(rule['start_time'])
        if 'end_time' in rule:
            rule['end_time'] = parse_time(rule['end_time'])
    except (KeyError, TypeError) as e:
        raise EAException('Invalid time format used: %s' % (e))

    # Set defaults
    rule.setdefault('realert', datetime.timedelta(seconds=0))
    rule.setdefault('aggregation', datetime.timedelta(seconds=0))
    rule.setdefault('query_delay', datetime.timedelta(seconds=0))
    rule.setdefault('timestamp_field', '@timestamp')
    rule.setdefault('filter', [])
    rule.setdefault('timestamp_type', 'iso')
    rule.setdefault('_source_enabled', True)
    rule.setdefault('use_local_time', True)
    rule.setdefault('es_port', conf.get('es_port'))
    rule.setdefault('es_host', conf.get('es_host'))
    rule.setdefault('es_username', conf.get('es_username'))
    rule.setdefault('es_password', conf.get('es_password'))
    rule.setdefault('max_query_size', conf.get('max_query_size'))
    rule.setdefault('es_conn_timeout', conf.get('es_conn_timeout'))
    rule.setdefault('description', "")

    # Set elasticsearch options from global config
    if 'es_url_prefix' in conf:
        rule.setdefault('es_url_prefix', conf.get('es_url_prefix'))
    if 'use_ssl' in conf:
        rule.setdefault('use_ssl', conf.get('use_ssl'))

    # Set timestamp_type conversion function, used when generating queries and processing hits
    rule['timestamp_type'] = rule['timestamp_type'].strip().lower()
    if rule['timestamp_type'] == 'iso':
        rule['ts_to_dt'] = ts_to_dt
        rule['dt_to_ts'] = dt_to_ts
    elif rule['timestamp_type'] == 'unix':
        rule['ts_to_dt'] = unix_to_dt
        rule['dt_to_ts'] = dt_to_unix
    elif rule['timestamp_type'] == 'unix_ms':
        rule['ts_to_dt'] = unixms_to_dt
        rule['dt_to_ts'] = dt_to_unixms
    else:
        raise EAException('timestamp_type must be one of iso, unix, or unix_ms')

    # Set email options from global config
    rule.setdefault('smtp_host', conf.get('smtp_host', 'localhost'))
    rule.setdefault('from_addr', conf.get('from_addr', 'ElastAlert'))
    if 'smtp_port' in conf:
        rule.setdefault('smtp_port', conf.get('smtp_port'))
    if 'smtp_ssl' in conf:
        rule.setdefault('smtp_ssl', conf.get('smtp_ssl'))
    if 'smtp_auth_file' in conf:
        rule.setdefault('smtp_auth_file', conf.get('smtp_auth_file'))
    if 'email_reply_to' in conf:
        rule.setdefault('email_reply_to', conf['email_reply_to'])

    # Set slack options from global config
    rule.setdefault('slack_webhook_url', conf.get('slack_webhook_url'))

    # Make sure we have required options
    if required_locals - frozenset(rule.keys()):
        raise EAException('Missing required option(s): %s' % (', '.join(required_locals - frozenset(rule.keys()))))

    if 'include' in rule and type(rule['include']) != list:
        raise EAException('include option must be a list')

    if isinstance(rule.get('query_key'), list):
        rule['compound_query_key'] = rule['query_key']
        rule['query_key'] = ','.join(rule['query_key'])

    # Add QK, CK and timestamp to include
    include = rule.get('include', ['*'])
    if 'query_key' in rule:
        include.append(rule['query_key'])
    if 'compound_query_key' in rule:
        include += rule['compound_query_key']
    if 'compare_key' in rule:
        include.append(rule['compare_key'])
    if 'top_count_keys' in rule:
        include += rule['top_count_keys']
    include.append(rule['timestamp_field'])
    rule['include'] = list(set(include))

    # Change top_count_keys to .raw
    if 'top_count_keys' in rule and rule.get('raw_count_keys', True):
        keys = rule.get('top_count_keys')
        rule['top_count_keys'] = [key + '.raw' if not key.endswith('.raw') else key for key in keys]

    # Check that generate_kibana_url is compatible with the filters
    if rule.get('generate_kibana_link'):
        for es_filter in rule.get('filter'):
            if es_filter:
                if 'not' in es_filter:
                    es_filter = es_filter['not']
                if 'query' in es_filter:
                    es_filter = es_filter['query']
                if es_filter.keys()[0] not in ('term', 'query_string', 'range'):
                    raise EAException('generate_kibana_link is incompatible with filters other than term, query_string and range. '
                                      'Consider creating a dashboard and using use_kibana_dashboard instead.')

    # Check that doc_type is provided if use_count/terms_query
    if rule.get('use_count_query') or rule.get('use_terms_query'):
        if 'doc_type' not in rule:
            raise EAException('doc_type must be specified.')

    # Check that query_key is set if use_terms_query
    if rule.get('use_terms_query'):
        if 'query_key' not in rule:
            raise EAException('query_key must be specified with use_terms_query')

    # Warn if use_strf_index is used with %y, %M or %D
    # (%y = short year, %M = minutes, %D = full date)
    if rule.get('use_strftime_index'):
        for token in ['%y', '%M', '%D']:
            if token in rule.get('index'):
                logging.warning('Did you mean to use %s in the index? '
                                'The index will be formatted like %s' % (token,
                                                                         datetime.datetime.now().strftime(rule.get('index'))))
예제 #41
0
def morning():
    time = request.form["time"]
    alarm.set_alarm(util.parse_time(time))
    return jsonify({"alarms": alarm.get_alarms()})