def apply_fudge_factor(fudge_factor): """ Apply the requested scheduling fudge factor. :param fudge_factor: The maximum number of seconds to sleep (a number). Previous implementations of the fudge factor interrupt used UNIX signals (specifically ``SIGUSR1``) but the use of this signal turned out to be sensitive to awkward race conditions and it wasn't very cross platform, so now the creation of a regular file is used to interrupt the fudge factor. """ if fudge_factor: timer = Timer() logger.debug("Calculating fudge factor based on user defined maximum (%s) ..", format_timespan(fudge_factor)) fudged_sleep_time = fudge_factor * random.random() logger.info("Sleeping for %s because of user defined fudge factor ..", format_timespan(fudged_sleep_time)) interrupt_file = get_lock_path(INTERRUPT_FILE) while timer.elapsed_time < fudged_sleep_time: if os.path.isfile(interrupt_file): logger.info("Fudge factor sleep was interrupted! (%s exists)", interrupt_file) break time_to_sleep = min(1, fudged_sleep_time - timer.elapsed_time) if time_to_sleep > 0: time.sleep(time_to_sleep) else: logger.info("Finished sleeping because of fudge factor (took %s).", timer)
def healthcheck(): import psutil import humanfriendly now = time.time() pid = os.getgid() ppid = os.getppid() current_process = psutil.Process(pid=ppid) # how about launching under gunicorn? process_uptime = current_process.create_time() process_uptime_delta = now - process_uptime process_uptime_human = humanfriendly.format_timespan(process_uptime_delta) system_uptime = psutil.boot_time() system_uptime_delta = now - system_uptime system_uptime_human = humanfriendly.format_timespan(system_uptime_delta) free_memory = psutil.disk_usage('/').free free_memory_human = humanfriendly.format_size(free_memory) return { 'status': 'Operational', 'free_disk_space': free_memory_human, 'system_uptime': system_uptime_human, 'process_uptime': process_uptime_human, }
def batch_stats(self, request, batch_id): try: batch = Batch.objects.get(id=batch_id) except ObjectDoesNotExist: messages.error(request, 'Cannot find Batch with ID {}'.format(batch_id)) return redirect(reverse('turkle_admin:turkle_batch_changelist')) stats_users = [] for user in batch.users_that_completed_tasks().order_by('username'): assignments = batch.assignments_completed_by(user) if assignments: last_finished_time = assignments.order_by('updated_at').last().updated_at mean_work_time = '%.2f' % float(statistics.mean( [ta.work_time_in_seconds() for ta in assignments])) median_work_time = '{}s'.format(int(statistics.median( [ta.work_time_in_seconds() for ta in assignments]))) else: last_finished_time = 'N/A' mean_work_time = 'N/A' median_work_time = 'N/A' stats_users.append({ 'username': user.username, 'full_name': user.get_full_name(), 'assignments_completed': batch.total_assignments_completed_by(user), 'mean_work_time': mean_work_time, 'median_work_time': median_work_time, 'last_finished_time': last_finished_time, }) finished_assignments = batch.finished_task_assignments().order_by('updated_at') if finished_assignments: first_finished_time = finished_assignments.first().updated_at last_finished_time = finished_assignments.last().updated_at else: first_finished_time = 'N/A' last_finished_time = 'N/A' return render(request, 'admin/turkle/batch_stats.html', { 'batch': batch, 'batch_total_work_time': humanfriendly.format_timespan( batch.total_work_time_in_seconds(), max_units=6), 'batch_mean_work_time': humanfriendly.format_timespan( batch.mean_work_time_in_seconds(), max_units=6), 'batch_median_work_time': humanfriendly.format_timespan( batch.median_work_time_in_seconds(), max_units=6), 'first_finished_time': first_finished_time, 'last_finished_time': last_finished_time, 'stats_users': stats_users, })
def log_duration(task_desc): start_time = time.time() yield duration_sec = time.time() - start_time duration_string = humanfriendly.format_timespan(duration_sec) logger.debug("Finished {}. Took {} ({:.2f} sec)".format(task_desc, duration_string, duration_sec))
def remove_older_than(directory): """Removes files older than number of seconds given in spec expiry """ try: delta = directory.spec['cutoff'] except KeyError: raise UnregulatableError('Must specify cutoff') if isinstance(delta, timedelta): delta = delta.total_seconds() try: delta = int(delta) except ValueError: raise UnregulatableError('cutoff must be int or timedelta!') now = time.time() cutoff = now - delta logger.debug( 'Remove files that are %s old. Date: %s', humanfriendly.format_timespan(delta), datetime.fromtimestamp(cutoff) ) contents = directory.contents for f in contents: if os.stat(f).st_mtime < cutoff: os.remove(f)
def data_by_user(members, start_date, end_date): qs = Record.objects.filter(username__in=members, start_date__gte=start_date, end_date__lte=end_date) data_by_user = {} for u in members: data_by_user[u] = { 'tickets': set(), 'time_spent_sec': 0, 'time_by_ticket': defaultdict(int), 'comments_by_ticket': defaultdict(list) } for rec in qs.all(): spent_sec = (rec.end_date - rec.start_date).total_seconds() x = data_by_user[rec.username] x['tickets'].add(rec.ticket) x['time_spent_sec'] += spent_sec x['time_by_ticket'][rec.ticket] += spent_sec x['comments_by_ticket'][rec.ticket].append(rec.comment) for x in data_by_user.values(): x['time_spent_sec'] += spent_sec x['time_spent'] = ( humanfriendly.format_timespan(x['time_spent_sec'])) return data_by_user
def __str__(self): return formatkv( [ ("Execution Time", humanfriendly.format_timespan(self.etime - self.stime)), ("Execution Error", str(self.error)), ("Report(To)", self.mailer), ], title="SITEBACKUP", )
def main(): args = parser.parse_args() size_kb = humanfriendly.parse_size(args.size) / 1024 read_units_per_second = humanfriendly.parse_timespan(args.time_span) # Scan operation is eventually consistent, which is half the cost (hence /2 ) # Each 4kb consumes one capacity unit. read_capacity_units_needed = size_kb / 2 / 4 time_needed = read_capacity_units_needed / read_units_per_second print 'It will take (roughly): {0}'.format(humanfriendly.format_timespan(time_needed))
def clean_text(self): """ Check that the user hasn't uploaded too many pastes """ if Limiter.is_limit_reached(self.request, Limiter.PASTE_UPLOAD): action_limit = Limiter.get_action_limit(self.request, Limiter.PASTE_UPLOAD) raise forms.ValidationError( "You can only upload %s pastes every %s." % (action_limit, format_timespan(settings.MAX_PASTE_UPLOADS_PERIOD)) ) return self.cleaned_data.get("text")
def detect( self, in_img_c ): import tensorflow as tf import humanfriendly image_height = in_img_c.height(); image_width = in_img_c.width() if (self.norm_image_type and self.norm_image_type != "none"): print("Normalizing input image") in_img = in_img_c.image().asarray().astype("uint16") bottom, top = self.get_scaling_values(self.norm_image_type, in_img, image_height) in_img = self.lin_normalize_image(in_img, bottom, top) in_img = np.tile(in_img, (1,1,3)) else: in_img = np.array(get_pil_image(in_img_c.image()).convert("RGB")) start_time = time.time() boxes, scores, classes = self.generate_detection(self.detection_graph, in_img) elapsed = time.time() - start_time print("Done running detector in {}".format(humanfriendly.format_timespan(elapsed))) good_boxes = [] detections = DetectedObjectSet() for i in range(0, len(scores)): if(scores[i] >= self.confidence_thresh): bbox = boxes[i] good_boxes.append(bbox) top_rel = bbox[0] left_rel = bbox[1] bottom_rel = bbox[2] right_rel = bbox[3] xmin = left_rel * image_width ymin = top_rel * image_height xmax = right_rel * image_width ymax = bottom_rel * image_height dot = DetectedObjectType(self.category_name, scores[i]) obj = DetectedObject(BoundingBox(xmin, ymin, xmax, ymax), scores[i], dot) detections.add(obj) print("Detected {}".format(len(good_boxes))) return detections
def _can_run_next_block(self) -> bool: if not in_standalone_mode: estimated_time = self.stats_helper.estimate_time_for_next_round(self.run_block_size, all=self.block_run_count < self.min_runs) to_bench_count = len(self.stats_helper.get_program_ids_to_bench()) if round(time.time() + estimated_time) > self.end_time: logging.warning("Ran to long ({}) and is therefore now aborted. " "{} program blocks should've been benchmarked again." .format(humanfriendly.format_timespan(time.time() + estimated_time - self.start_time), to_bench_count)) return False if self.block_run_count >= self.max_runs and self.block_run_count >= self.min_runs: #print("benchmarked too often, block run count ", self.block_run_count, self.block_run_count + self.run_block_size > self.min_runs) logging.warning("Benchmarked program blocks to often and aborted therefore now.") return False return True
def __init__(self, command, timeout): """ Initialize a :class:`CommandTimedOut` object. :param command: The command that timed out (an :class:`~executor.ExternalCommand` object). :param timeout: The timeout that was exceeded (a number). """ super(CommandTimedOut, self).__init__( command=command, error_message=format( "External command exceeded timeout of %s: %s", format_timespan(timeout), quote(command.command_line), ), )
def data_by_ticket(ticket, start_date=None, end_date=None): ret = {'time_spent_sec': 0, 'users': set()} qs = Record.objects.filter(ticket=ticket) if start_date and end_date: qs = qs.filter(start_date__gte=start_date, end_date__lte=end_date) for rec in qs.all(): spent_sec = (rec.end_date - rec.start_date).total_seconds() ret['time_spent_sec'] += spent_sec ret['users'].add(rec.username) ret['time_spent'] = humanfriendly.format_timespan(ret['time_spent_sec']) ret['users'] = sorted(ret['users']) return ret
def report_metrics(manager): """Create a textual summary of Apache web server metrics.""" lines = ["Server metrics:"] for name, value in sorted(manager.server_metrics.items()): if name in ('total_traffic', 'bytes_per_second', 'bytes_per_request'): value = format_size(value) elif name == 'cpu_load': value = '%.1f%%' % value elif name == 'uptime': value = format_timespan(value) name = ' '.join(name.split('_')) name = name[0].upper() + name[1:] lines.append(" - %s: %s" % (name, value)) main_label = "main Apache workers" if manager.wsgi_process_groups else "Apache workers" report_memory_usage(lines, main_label, manager.memory_usage) for name, memory_usage in sorted(manager.wsgi_process_groups.items()): report_memory_usage(lines, "WSGI process group '%s'" % name, memory_usage) return lines
def wait_until_connected(self): """ Wait until connections are being accepted. :raises: :exc:`TimeoutError` when the SSH server isn't fast enough to initialize. """ timer = Timer() with Spinner(timer=timer) as spinner: while not self.is_connected: if timer.elapsed_time > self.wait_timeout: raise TimeoutError(format( "Failed to establish connection to %s within configured timeout of %s!", self, format_timespan(self.wait_timeout), )) spinner.step(label="Waiting for %s to accept connections" % self) spinner.sleep() logger.debug("Waited %s for %s to accept connections.", timer, self)
def search(artist, title, duration=None): try: string = ' '.join([artist, title]) parsingChannelUrl = "https://www.googleapis.com/youtube/v3/search" parsingChannelHeader = {'cache-control': "no-cache"} parsingChannelQueryString = {"part": "id,snippet", "maxResults": "10", "key": DEVELOPER_KEY, "type": "video", "q": string, "safeSearch": "none", "videoDuration": youtube_duration(duration)} parsingChannel = None resp = requests.get(parsingChannelUrl, headers=parsingChannelHeader, params=parsingChannelQueryString) parsingChannel = resp.json() parsingChannelItems = parsingChannel.get("items") if parsingChannelItems is None or not parsingChannelItems: return [] VideoIds = ",".join(str(x.get("id").get("videoId")) for x in parsingChannelItems) parsingVideoUrl = "https://www.googleapis.com/youtube/v3/videos" parsingVideoHeader = {'cache-control': "no-cache"} parsingVideoQueryString = {"part": 'id,snippet,contentDetails', "id": VideoIds, "key": DEVELOPER_KEY} parsingVideo = None resp = requests.get(parsingVideoUrl, headers=parsingVideoHeader, params=parsingVideoQueryString) parsingVideo = resp.json() results = parsingVideo.get("items") if results is None: return [] for r in results: logger.info('{} {}'.format(r["snippet"]["title"], format_timespan(isodate.parse_duration(r["contentDetails"]["duration"]).total_seconds()))) if duration: mapping = {"https://www.youtube.com/watch?v=" + r["id"]: abs(isodate.parse_duration(r["contentDetails"]["duration"]).total_seconds() - duration) for r in results} links = sorted(mapping, key=mapping.get) return links return ["https://www.youtube.com/watch?v=" + r["id"] for r in results] except Exception as e: logger.info(type(e)) logger.info('Cannot find video for artist: %s title: %s duration: %s', artist, title, duration) return []
def test_cli(self): """Test the command line interface.""" # Test that the usage message is printed by default. returncode, output = main() assert 'Usage:' in output # Test that the usage message can be requested explicitly. returncode, output = main('--help') assert 'Usage:' in output # Test handling of invalid command line options. returncode, output = main('--unsupported-option') assert returncode != 0 # Test `humanfriendly --format-number'. returncode, output = main('--format-number=1234567') assert output.strip() == '1,234,567' # Test `humanfriendly --format-size'. random_byte_count = random.randint(1024, 1024 * 1024) returncode, output = main('--format-size=%i' % random_byte_count) assert output.strip() == humanfriendly.format_size(random_byte_count) # Test `humanfriendly --format-table'. returncode, output = main('--format-table', '--delimiter=\t', input='1\t2\t3\n4\t5\t6\n7\t8\t9') assert output.strip() == dedent(''' ------------- | 1 | 2 | 3 | | 4 | 5 | 6 | | 7 | 8 | 9 | ------------- ''').strip() # Test `humanfriendly --format-timespan'. random_timespan = random.randint(5, 600) returncode, output = main('--format-timespan=%i' % random_timespan) assert output.strip() == humanfriendly.format_timespan(random_timespan) # Test `humanfriendly --parse-size'. returncode, output = main('--parse-size=5 KB') assert int(output) == humanfriendly.parse_size('5 KB') # Test `humanfriendly --run-command'. returncode, output = main('--run-command', 'bash', '-c', 'sleep 2 && exit 42') assert returncode == 42
def show_duration(seconds, unit='seconds'): """Instead of just showing the integer number of seconds we display it nicely like:: 125 seconds <span>(2 minutes, 5 seconds)</span> If we can't do it, just return as is. """ template = engines['backend'].from_string( '{{ seconds_str }} {{ unit }} ' '{% if seconds > 60 %}' '<span class="humanized" title="{{ seconds_str }} {{ unit }}">' '({{ humanized }})</span>' '{% endif %}' ) try: seconds = int(seconds) except (ValueError, TypeError): # ValueErrors happen when `seconds` is not a number. # TypeErrors happen when you try to convert a None to an integer. # Bail, but note how it's NOT marked as safe. # That means that if `seconds` is literally '<script>' # it will be sent to the template rendering engine to be # dealt with and since it's not marked safe, it'll be automatically # escaped. return seconds humanized = humanfriendly.format_timespan(seconds) return mark_safe(template.render({ 'seconds_str': format(seconds, ','), 'seconds': seconds, 'unit': unit, 'humanized': humanized, }).strip())
def smart_update(self, *args, **kw): """ Update the system's package lists (switching mirrors if necessary). :param args: Command line arguments to ``apt-get update`` (zero or more strings). :param max_attempts: The maximum number of attempts at successfully updating the system's package lists (an integer, defaults to 10). :param switch_mirrors: :data:`True` if we're allowed to switch mirrors on 'hash sum mismatch' errors, :data:`False` otherwise. :raises: If updating of the package lists fails 10 consecutive times (`max_attempts`) an exception is raised. While :func:`dumb_update()` simply runs ``apt-get update`` the :func:`smart_update()` function works quite differently: - First the system's package lists are updated using :func:`dumb_update()`. If this is successful we're done. - If the update fails we check the command's output for the phrase 'hash sum mismatch'. If we find this phrase we assume that the current mirror is faulty and switch to another one. - Failing ``apt-get update`` runs are retried up to `max_attempts`. """ backoff_time = 10 max_attempts = kw.get('max_attempts', 10) switch_mirrors = kw.get('switch_mirrors', True) for i in range(1, max_attempts + 1): with CaptureOutput() as session: try: self.dumb_update(*args) return except Exception: if i < max_attempts: output = session.get_text() # Check for EOL releases. This somewhat peculiar way of # checking is meant to ignore 404 responses from # `secondary package mirrors' like PPAs. If the output # of `apt-get update' implies that the release is EOL # we need to verify our assumption. if any(self.current_mirror in line and u'404' in line.split() for line in output.splitlines()): logger.warning("%s may be EOL, checking ..", self.release) if self.release_is_eol: if switch_mirrors: logger.warning("Switching to old releases mirror because %s is EOL ..", self.release) self.change_mirror(self.old_releases_url, update=False) continue else: raise Exception(compact(""" Failed to update package lists because it looks like the current release (%s) is end of life but I'm not allowed to switch mirrors! (there's no point in retrying so I'm not going to) """, self.distribution_codename)) # Check for `hash sum mismatch' errors. if switch_mirrors and u'hash sum mismatch' in output.lower(): logger.warning("Detected 'hash sum mismatch' failure, switching to other mirror ..") self.ignore_mirror(self.current_mirror) self.change_mirror(update=False) else: logger.warning("Retrying after `apt-get update' failed (%i/%i) ..", i, max_attempts) # Deal with unidentified (but hopefully transient) failures by retrying but backing off # to give the environment (network connection, mirror state, etc.) time to stabilize. logger.info("Sleeping for %s before retrying update ..", format_timespan(backoff_time)) time.sleep(backoff_time) if backoff_time <= 120: backoff_time *= 2 else: backoff_time += backoff_time / 3 raise Exception("Failed to update package lists %i consecutive times?!" % max_attempts)
def repr_effects(g): for t, act, m in g.effects: print '{0} {1} at {2}'.format( 'Add' if act else 'Remove', m.velocity, format_timespan(time.time() - t))
def lineBot(op): try: if op.type == 0: return if op.type == 5: ge = ("u66d4c27e8f45f025cf5774883b67ddc1") contact = cl.getContact(op.param1) print("[ ADDNEWFRIEND ] 通知添加好友 名字: " + contact.displayName) cl.sendMessage( ge, "《好友通知》\n》新增好友:" + contact.displayName + "\n》好友Mid:\n" + op.param1) cl.findAndAddContactsByMid(op.param1) cl.sendMessage( op.param1, "哈囉{}~要跟莉姆露成為好朋友哦>///<".format(str(contact.displayName))) cl.sendMessage(op.param1, "!使用前請至主頁貼文詳讀使用說明!") cl.sendMessage(op.param1, "如果有其他疑問可以使用《Creator》指令私訊主人\n(主頁有說明之問題不予回應)") if op.type == 11: group = cl.getGroup(op.param1) contact = cl.getContact(op.param2) if op.type == 13: ge = ("u66d4c27e8f45f025cf5774883b67ddc1") contact1 = cl.getContact(op.param2) contact2 = cl.getContact(op.param3) group = cl.getGroup(op.param1) print("[ JOIN ] 莉姆露收到群組邀請: " + str(group.name) + "\n邀請的人: " + contact1.displayName + "\n被邀請的人" + contact2.displayName) if settings["autoJoin"] == True: if op.param2 in settings['blacklist']: if op.param3 in admin: print("[ BLACKJOIN ]黑單邀請加入群組: " + str(group.name)) cl.sendMessage( ge, "《黑單使用者邀請》" + "\n》群組名稱:" + str(group.name) + "\n" + op.param1 + "\n》邀請者名稱:" + contact1.displayName + "\n》邀請者MID:\n" + op.param2 + "\n》被邀請者名稱:" + contact2.displayName + "\n》被邀請者mid:\n" + op.param3) cl.acceptGroupInvitation(op.param1) cl.sendMessage(op.param1, "《黑單使用者》") time.sleep(0.5) cl.leaveGroup(op.param1) else: pass else: if op.param3 in admin: print("[ NEWJOIN ]使用者邀請加入群組: " + str(group.name)) cl.sendMessage( ge, "《普通使用者邀請》" + "\n》群組名稱:" + str(group.name) + "\n" + op.param1 + "\n》邀請者名稱:" + contact1.displayName + "\n》邀請者MID:\n" + op.param2 + "\n》被邀請者名稱:" + contact2.displayName + "\n》被邀請者mid:\n" + op.param3) cl.acceptGroupInvitation(op.param1) time.sleep(0.5) if op.param2 in master: cl.sendMessage( op.param1, "《作成者 " + contact1.displayName + " 邀請》") else: cl.sendMessage( op.param1, "《使用者 " + contact1.displayName + " 邀請》") else: pass else: if op.param2 in master: cl.acceptGroupInvitation(op.param1) time.sleep(0.5) cl.sendMessage(op.param1, "《作成者 " + contact1.displayName + " 邀請》") elif op.type == 19: ge = ("u66d4c27e8f45f025cf5774883b67ddc1") contact1 = cl.getContact(op.param2) group = cl.getGroup(op.param1) contact2 = cl.getContact(op.param3) print("[ KICK ]有人把人踢出群組 群組名稱: " + str(group.name) + "\n" + op.param1 + "\n踢人的人: " + contact1.displayName + "\nMid: " + contact1.mid + "\n被踢的人" + contact2.displayName + "\nMid:" + contact2.mid) cl.sendMessage( ge, "《踢出群組》" + "\n》群組名稱:" + str(group.name) + "\n》踢人的人: " + contact1.displayName + "\n》Mid: " + contact1.mid + "\n》被踢的人" + contact2.displayName + "\n》Mid: " + contact2.mid) try: if op.param3 not in admin or master: arrData = "" text = "%s " % ('#') arr = [] mention = "@x " slen = str(len(text)) elen = str(len(text) + len(mention) - 1) arrData = {'S': slen, 'E': elen, 'M': op.param3} arr.append(arrData) text += mention + '掰掰QAO/' cl.sendMessage(op.param1, text, { 'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}') }, 0) except: settings["blacklist"][op.param2] = True cl.sendMessage( op.param2, "《BLACK》\n不好意思,您違反了使用規定\n因此被莉姆露列為黑單\n無法再使用任何指令功能\n詳情請看主頁公告" ) cl.sendMessage( ge, "《黑單通知》" + "\n》顯示名稱:" + contact1.displayName + "\n》黑單者MID:\n" + op.param2) print("《黑單通知》" + "顯示名稱:" + contact1.displayName + "》黑單者MID:" + op.param2) if op.type == 24: cl.leaveRoom(op.param1) if op.type == 26 or op.type == 25: msg = op.message text = msg.text msg_id = msg.id receiver = msg.to sender = msg._from if msg.toType == 0: if sender != cl.profile.mid: to = sender else: to = receiver else: to = receiver if sender in master or admin: if "KICK " in msg.text: key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] targets = [] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: if target in master: pass else: try: cl.kickoutFromGroup(to, [target]) except: pass elif "MJOIN " in msg.text: midd = msg.text.replace("MJOIN ", "") cl.findAndAddContactsByMid(midd) cl.inviteIntoGroup(msg.to, [midd]) elif "MIDK:" in msg.text: midd = text.replace("MIDK:", "") cl.kickoutFromGroup(to, [midd]) elif "NAMEK " in msg.text: _name = text.replace("NAMEK ", "") gs = cl.getGroup(to) targets = [] for g in gs.members: if _name in g.displayName: targets.append(g.mid) if targets == []: pass else: for target in targets: if target in master: pass else: try: cl.kickoutFromGroup(to, [target]) except: pass elif "CLEANK " in msg.text: vkick0 = msg.text.replace("CLEANK ", "") vkick1 = vkick0.rstrip() vkick2 = vkick1.replace("@", "") vkick3 = vkick2.rstrip() _name = vkick3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: pass else: for target in targets: try: cl.kickoutFromGroup(msg.to, [target]) cl.findAndAddContactsByMid(target) cl.inviteIntoGroup(msg.to, [target]) cl.cancelGroupInvitation(msg.to, [target]) except: pass elif "RK " in msg.text: Ri0 = text.replace("RK ", "") Ri1 = Ri0.rstrip() Ri2 = Ri1.replace("@", "") Ri3 = Ri2.rstrip() _name = Ri3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: pass else: for target in targets: if target in admin: pass else: try: cl.kickoutFromGroup(to, [target]) cl.findAndAddContactsByMid(target) cl.inviteIntoGroup(to, [target]) except: pass elif msg.text.lower().startswith("KICKF:"): list_ = msg.text.split(":") gid = list_[1] mid = list_[2] try: cl.kickoutFromGroup(gid, [mid]) return cl.sendMessage(to, "《踢出人選完成》") except: pass elif msg.text in ["SET"]: try: ret_ = "《設定》" if settings["reread"] == True: ret_ += "\n查詢收回 🆗" else: ret_ += "\n查詢收回 🈲" if settings["autoJoin"] == True: ret_ += "\n自動加入群組 🆗" else: ret_ += "\n自動加入群組 🈲" if settings["autoLeave"] == True: ret_ += "\n自動離開副本 🆗" else: ret_ += "\n自動離開副本 🈲" cl.sendMessage(to, str(ret_)) except Exception as e: cl.sendMessage(msg.to, str(e)) elif msg.text in ["RIMURUREBOT"]: cl.sendMessage(to, "《莉姆露重啟~》") restartBot() elif msg.text in ["AUTOJOIN On"]: settings["autoJoin"] = True cl.sendMessage(to, "《莉姆露自己加入群組》") elif msg.text in ["AUTOJOIN Off"]: settings["autoJoin"] = False cl.sendMessage(to, "《由主人決定加入群組》") elif msg.text in ["LEAVE On"]: settings["autoLeave"] = True cl.sendMessage(to, "《莉姆露會離開副本》") elif msg.text in ["LEAVE Off"]: settings["autoLeave"] = False cl.sendMessage(to, "《莉姆露會留在副本》") elif msg.text in ["REREAD On"]: settings["reread"] = True cl.sendMessage(to, "《查詢收回開啟》") elif msg.text in ["REREAD Off"]: settings["reread"] = False cl.sendMessage(to, "《查詢收回關閉》") elif msg.text in ["GRL"]: groups = cl.groups ret_ = "《莉姆露的群組》" no = 0 + 1 for gid in groups: group = cl.getGroup(gid) ret_ += "\n☆ {}. {} | {}".format( str(no), str(group.name), str(len(group.members))) no += 1 ret_ += "\n《總共 {} 個》".format(str(len(groups))) cl.sendMessage(to, str(ret_)) elif "JBLACK @" in msg.text: if msg.toType == 2: print("[ JBAN ] 成功") key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] targets = [] for x in key["MENTIONEES"]: targets.append(x["M"]) if targets == []: pass else: for target in targets: try: settings["blacklist"][target] = True cl.sendMessage(to, "《加入黑名單》") cl.sendMessage( target, "《BLACK》\n不好意思,您違反了使用規定\n因此被莉姆露列為黑單\n無法再使用任何指令功能\n詳情請看主頁公告" ) except: pass elif "JMBLACK " in msg.text: mmid = msg.text.replace("JMBLACK ", "") print("[ JMBAN ] 成功") try: settings["blacklist"][mmid] = True cl.sendMessage(to, "《加入黑名單》") cl.sendMessage( mmid, "《BLACK》\n不好意思,您違反了使用規定\n因此被莉姆露列為黑單\n無法再使用任何指令功能\n詳情請看主頁公告" ) except: pass elif "MBK " in msg.text: mmid = msg.text.replace("MBK ", "") print("[ JMBAN ] 成功") try: settings["blacklist"][mmid] = True cl.sendMessage(to, "《加入黑名單》") except: pass elif msg.text in ["CLEAR BLACKLIST"]: for mi_d in settings["blacklist"]: settings["blacklist"] = {} cl.sendMessage(to, "《清空黑名單》") elif "UMBLACK " in msg.text: mmid = msg.text.replace("UMBLACK ", "") print("[ UMBAN ] 成功") try: del settings["blacklist"][mmid] cl.sendMessage(to, "《解除黑名單》") cl.sendMessage(mmid, "《UBLACK》\n解除黑名單") except: pass elif "UBLACK @" in msg.text: if msg.toType == 2: print("[ UBAN ] 成功") key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] targets = [] for x in key["MENTIONEES"]: targets.append(x["M"]) if targets == []: pass else: for target in targets: try: del settings["blacklist"][target] cl.sendMessage(to, "《解除黑名單》") cl.sendMessage(target, "《UBLACK》\n解除黑名單") except: pass elif msg.text in ["BLACKLIST"]: if settings["blacklist"] == {}: cl.sendMessage(to, "《沒有黑名單》") else: mc = "《黑名單列表》" for mi_d in settings["blacklist"]: mc += "\n》" + cl.getContact(mi_d).displayName cl.sendMessage(to, mc) elif msg.text in ["BLACKMID"]: if settings["blacklist"] == {}: cl.sendMessage(to, "《沒有黑名單》") else: mc = "《黑名單列表》" for mi_d in settings["blacklist"]: mc += "\n》" + mi_d cl.sendMessage(to, mc) elif msg.text in ["KBLACK"]: if msg.toType == 2: group = cl.getGroup(to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in settings["blacklist"]: matched_list += filter(lambda str: str == tag, gMembMids) if matched_list == []: print("1") cl.sendMessage(to, "《沒有黑名單》") return for jj in matched_list: cl.kickoutFromGroup(to, [jj]) cl.sendMessage(to, "《黑名單已清除》") elif msg.text in ["KALLBLACK"]: gid = cl.getGroupIdsJoined() group = cl.getGroup(to) gMembMids = [contact.mid for contact in group.members] ban_list = [] for tag in settings["blacklist"]: ban_list += filter(lambda str: str == tag, gMembMids) if ban_list == []: cl.sendMessage(to, "《沒有黑名單》") else: for i in gid: for jj in ban_list: cl.kickoutFromGroup(i, [jj]) cl.sendMessage(i, "《剔除所有群組黑單人選》") elif "Friendbc:" in msg.text: bctxt = text.replace("Friendbc:", "") t = cl.getAllContactIds() for manusia in t: cl.sendMessage(manusia, (bctxt)) elif "Groupsbc:" in msg.text: bctxt = text.replace("Groupsbc:", "") n = cl.getGroupIdsJoined() for manusia in n: cl.sendMessage(manusia, (bctxt)) elif "LEAVE:" in msg.text: try: leave = text.replace("LEAVE:", "") cl.leaveGroup(leave) cl.sendMessage(to, "《退出群組完成》") except: pass elif msg.text.lower().startswith("mid "): if 'MENTION' in msg.contentMetadata.keys() != None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval( msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) ret_ = "《MID》" for ls in lists: ret_ += "\n" + "\n" + ls cl.sendMessage(msg.to, str(ret_)) elif "USER " in msg.text: mmid = msg.text.replace("user ", "") cl.sendContact(to, mmid) if op.type == 25 or op.type == 26: msg = op.message text = msg.text msg_id = msg.id receiver = msg.to sender = msg._from if msg.toType == 0: if sender != cl.profile.mid: to = sender else: to = receiver else: to = receiver if msg.contentType == 0: if text is None: return if not sender in settings['blacklist']: if sender in prevents['limit']: if msg.text in prevents['limit'][sender]['text']: if prevents['limit'][sender]['text'][ msg.text] >= 3: prevents['limit'][sender]['text'][ 'react'] = False else: prevents['limit'][sender]['text'][ msg.text] += 1 prevents['limit'][sender]['text'][ 'react'] = True else: try: del prevents['limit'][sender]['text'] except: pass prevents['limit'][sender]['text'] = {} prevents['limit'][sender]['text'][msg.text] = 1 prevents['limit'][sender]['text']['react'] = True else: prevents['limit'][sender] = {} prevents['limit'][sender]['stick'] = {} prevents['limit'][sender]['text'] = {} prevents['limit'][sender]['text'][msg.text] = 1 prevents['limit'][sender]['text']['react'] = True if sender not in master: if prevents['limit'][sender]['text']['react'] == False: return if msg.text in ["help", "Help", "HELP"]: if sender in master or admin: ret_ = "《BLACKLIST》\n《BLACKMID》\n《JMBLACK 》\n《MBK 》\n《JBLACK @》\n《UMBLACK 》\n《UBLACK @》\n《KBLACK》\n《KALLBLACK》\n《GRL》\n《REREAD On/Off》\n《LEAVE On/Off》\n《AUTOJOIN On/Off》\n《LEAVE:》\n《KICKF:》\n《USER》\n《SET》\n《CLEANK @》 \n《NAMEK @》\n《MJOIN @》\n《KICK @》\n《MIDK:》\n《RK @》" cl.sendMessage(to, str(ret_)) else: helpMessage = helpmessage() cl.sendMessage(to, str(helpMessage)) elif msg.text in ["Creator", "creator"]: cl.sendContact(to, "u66d4c27e8f45f025cf5774883b67ddc1") elif text.lower() == '@bye': if msg.toType == 2: ge = ("u66d4c27e8f45f025cf5774883b67ddc1") ginfo = cl.getGroup(to) try: cl.sendMessage(to, "大家不喜歡莉姆露了嗎QAQ") time.sleep(1) cl.leaveGroup(to) except: pass elif text.lower() == 'runtime': timeNow = time.time() runtime = timeNow - botStart runtime = format_timespan(runtime) cl.sendMessage(to, "《機器運行時間 {}》".format(str(runtime))) elif text.lower() == 'time': tz = pytz.timezone("Asia/Taipei") timeNow = datetime.now(tz=tz) day = [ "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" ] hari = [ "星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六" ] bulan = [ "Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember" ] hr = timeNow.strftime("%A") bln = timeNow.strftime("%m") for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k - 1] readTime = "《現在時間/GMT+8》\n" + timeNow.strftime( '%Y') + "年" + bln + "月" + timeNow.strftime( '%d' ) + "日\n" + hasil + "\n" + timeNow.strftime( '%H:%M:%S') cl.sendMessage(msg.to, readTime) elif msg.text in ["SR", "Setread"]: cl.sendMessage(msg.to, "《已讀設置》") try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] except: pass now2 = datetime.now() wait2['readPoint'][msg.to] = msg.id wait2['readMember'][msg.to] = "" wait2['setTime'][msg.to] = datetime.strftime( now2, "%H:%M") wait2['ROM'][msg.to] = {} elif msg.text in ["LR", "Lookread"]: if msg.to in wait2['readPoint']: if wait2["ROM"][msg.to].items() == []: chiya = "" else: chiya = "" for rom in wait2["ROM"][msg.to].items(): chiya += rom[1] + "\n" cl.sendMessage( msg.to, "《已讀的人》%s\n[%s]" % (wait2['readMember'][msg.to], setTime[msg.to])) else: cl.sendMessage(msg.to, "《還沒設定已讀點哦¨》") elif text.lower() == 'me': sendMessageWithMention(to, sender) cl.sendContact(to, sender) elif text.lower() == 'myname': me = cl.getContact(sender) cl.sendMessage(msg.to, "《顯示名稱》\n" + me.displayName) elif text.lower() == 'mybio': me = cl.getContact(sender) cl.sendMessage(msg.to, "《狀態消息》\n" + me.statusMessage) elif text.lower() == 'mypicture': me = cl.getContact(sender) cl.sendImageWithURL( msg.to, "http://dl.profile.line-cdn.net/" + me.pictureStatus) elif text.lower() == 'myvideoprofile': me = cl.getContact(sender) cl.sendVideoWithURL( msg.to, "http://dl.profile.line-cdn.net/" + me.pictureStatus + "/vp") elif text.lower() == 'mycover': me = cl.getContact(sender) cover = cl.getProfileCoverURL(sender) cl.sendImageWithURL(msg.to, cover) elif text.lower() == 'mymid': cl.sendMessage(msg.to, sender) elif msg.text.lower().startswith("picture "): if 'MENTION' in msg.contentMetadata.keys() != None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval( msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: path = "http://dl.profile.line-cdn.net/" + cl.getContact( ls).pictureStatus cl.sendImageWithURL(msg.to, str(path)) elif msg.text in ["CANCEL"]: if msg.toType == 2: X = cl.getGroup(msg.to) if X.invitee is not None: gInviMids = (contact.mid for contact in X.invitee) ginfo = cl.getGroup(msg.to) sinvitee = str(len(ginfo.invitee)) start = time.time() for cancelmod in gInviMids: cl.cancelGroupInvitation(msg.to, [cancelmod]) elapsed_time = time.time() - start cl.sendMessage(to, "《已取消所有邀請》") else: cl.sendMessage(to, "《沒有邀請可以取消》") elif msg.text in ["speed", "Speed", "SPEED"]: time0 = timeit.timeit( '"-".join(str(n) for n in range(100))', number=10000) str1 = str(time0) start = time.time() cl.sendMessage(to, '《處理速度》\n' + str1 + '秒') elapsed_time = time.time() - start cl.sendMessage( to, '《指令反應》\n' + format(str(elapsed_time)) + '秒') elif msg.text in ["About", "about", "ABOUT"]: try: arr = [] owner = "u66d4c27e8f45f025cf5774883b67ddc1" creator = cl.getContact(owner) contact = cl.getContact(clMID) grouplist = cl.getGroupIdsJoined() contactlist = cl.getAllContactIds() blockedlist = cl.getBlockedContactIds() ret_ = "《關於自己》" ret_ += "\n版本 : v9.0" ret_ += "\n名稱 : {}".format(contact.displayName) ret_ += "\n群組 : {}".format(str(len(grouplist))) ret_ += "\n好友 : {}".format(str(len(contactlist))) cl.sendMessage(to, str(ret_)) except Exception as e: cl.sendMessage(msg.to, str(e)) elif msg.text in ["Gurl", "gurl", "GURL"]: if msg.toType == 2: group = cl.getGroup(to) if group.preventedJoinByTicket == False: ticket = cl.reissueGroupTicket(to) cl.sendMessage( to, "《群組網址》\nhttps://line.me/R/ti/g/{}".format( str(ticket))) else: cl.sendMessage( to, "《群組網址》\nhttps://line.me/R/ti/g/{}".format( str(ticket))) elif msg.text in ["URL On"]: if msg.toType == 2: G = cl.getGroup(to) if G.preventedJoinByTicket == False: cl.sendMessage(to, "《群組網址已開啟》") else: G.preventedJoinByTicket = False cl.updateGroup(G) cl.sendMessage(to, "《成功開啟群組網址》") elif msg.text in ["URL Off"]: if msg.toType == 2: G = cl.getGroup(to) if G.preventedJoinByTicket == True: cl.sendMessage(to, "《群組網址已關閉》") else: G.preventedJoinByTicket = True cl.updateGroup(G) cl.sendMessage(to, "《成功關閉群組網址》") elif msg.text in ["Ginfo", "ginfo", "GINFO"]: group = cl.getGroup(to) try: gCreator = group.creator.displayName except: gCreator = "未找到" if group.invitee is None: gPending = "0" else: gPending = str(len(group.invitee)) if group.preventedJoinByTicket == True: gQr = "關閉" gTicket = "https://line.me/R/ti/g/{}".format( str(cl.reissueGroupTicket(group.id))) else: gQr = "開啟" gTicket = "https://line.me/R/ti/g/{}".format( str(cl.reissueGroupTicket(group.id))) path = "http://dl.profile.line-cdn.net/" + group.pictureStatus ret_ = "《群組資料》" ret_ += "\n群組名稱 : {}".format(str(group.name)) ret_ += "\n群組ID : {}".format(group.id) ret_ += "\n群組作者 : {}".format(str(gCreator)) ret_ += "\n成員數量 : {}".format(str(len(group.members))) ret_ += "\n邀請數量 : {}".format(gPending) ret_ += "\n網址狀態 : {}".format(gQr) ret_ += "\n群組網址 : {}".format(gTicket) cl.sendMessage(to, str(ret_)) cl.sendImageWithURL(to, path) elif msg.text in ["Tagall", "tagall", "TAGALL"]: group = cl.getGroup(msg.to) nama = [contact.mid for contact in group.members] k = len(nama) // 100 for a in range(k + 1): txt = u'' s = 0 b = [] for i in group.members[a * 100:(a + 1) * 100]: b.append({ "S": str(s), "E": str(s + 6), "M": i.mid }) s += 7 txt += u'@Alin \n' cl.sendMessage(to, text=txt, contentMetadata={ u'MENTION': json.dumps({'MENTIONEES': b}) }, contentType=0) cl.sendMessage( to, "《總共 {} 個成員》".format(str(len(nama)))) if op.type == 26: msg = op.message text = msg.text msg_id = msg.id receiver = msg.to sender = msg._from if msg.toType == 0: if sender != cl.profile.mid: to = sender else: to = receiver else: to = receiver if msg.contentType == 16: try: msg.contentType = 0 f_mid = msg.contentMetadata["postEndUrl"].split("userMid=") s_mid = f_mid[1].split("&") mid = s_mid[0] try: arrData = "" text = "%s " % ("《文章作者》\n") arr = [] mention = "@x " slen = str(len(text)) elen = str(len(text) + len(mention) - 1) arrData = {'S': slen, 'E': elen, 'M': mid} arr.append(arrData) text += mention + "\n《文章預覽》\n" + msg.contentMetadata[ "text"] + "\n《文章網址》\n " + msg.contentMetadata[ "postEndUrl"] cl.sendMessage( msg.to, text, { 'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}') }, 0) except Exception as error: print(error) except: msg.contentType = 0 ret_ = "《文章預覽》\n" + msg.contentMetadata["text"] cl.sendMessage(msg.to, ret_) if op.type == 26: try: msg = op.message if settings["reread"] == True: if msg.toType == 0: cl.log("[%s]" % (msg._from) + msg.text) else: cl.log("[%s]" % (msg.to) + msg.text) if msg.contentType == 0: msg_dict[msg.id] = { "text": msg.text, "from": msg._from, "createdTime": msg.createdTime } else: pass except Exception as e: print(e) if op.type == 60: if op.param2 in settings['blacklist']: cl.sendMessage(op.param1, "《!黑名單使用者加入!》") else: if op.param2 not in admin: try: arrData = "" text = "%s " % ('#') arr = [] mention = "@x " slen = str(len(text)) elen = str(len(text) + len(mention) - 1) arrData = {'S': slen, 'E': elen, 'M': op.param2} arr.append(arrData) text += mention + '莉姆露歡迎你的加入~' cl.sendMessage( op.param1, text, { 'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}') }, 0) except Exception as error: print(error) if op.type == 65: try: ge = ("u66d4c27e8f45f025cf5774883b67ddc1") at = op.param1 msg_id = op.param2 if settings["reread"] == True: if msg_dict[msg_id]["from"] in settings["blacklist"]: pass else: if msg_id in msg_dict: if msg_dict[msg_id]["from"] not in bl: print(msg_dict[msg_id]["from"]) arrData = "" text = "%s " % ("《莉姆露看到有人收回訊息》\n") arr = [] mention = "@x " slen = str(len(text)) elen = str(len(text) + len(mention) - 1) arrData = { 'S': slen, 'E': elen, 'M': msg_dict[msg_id]["from"] } arr.append(arrData) text += mention + "\n" + msg_dict[msg_id][ "text"] cl.sendMessage( op.param1, text, { 'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}') }, 0) del msg_dict[msg_id] else: pass except Exception as e: print(e) if op.type == 55: try: if op.param1 in wait2['readPoint']: Name = cl.getContact(op.param2).displayName if Name in wait2['readMember'][op.param1]: pass else: wait2['readMember'][op.param1] += "\n☆" + Name wait2['ROM'][op.param1][op.param2] = "☆" + Name else: pass except: pass except Exception as error: logError(error)
def _do_write_events(self, events, storage_uri=None): """ Sends an iterable of events as a series of batch operations. note: metric send does not raise on error""" assert isinstance(events, (list, tuple)) assert all(isinstance(x, MetricsEventAdapter) for x in events) # def event_key(ev): # return (ev.metric, ev.variant) # # events = sorted(events, key=event_key) # multiple_events_for = [k for k, v in groupby(events, key=event_key) if len(list(v)) > 1] # if multiple_events_for: # log.warning( # 'More than one metrics event sent for these metric/variant combinations in a report: %s' % # ', '.join('%s/%s' % k for k in multiple_events_for)) storage_uri = storage_uri or self._storage_uri now = time() def update_and_get_file_entry(ev): entry = ev.get_file_entry() kwargs = {} if entry: key, url = ev.get_target_full_upload_uri( storage_uri, self.storage_key_prefix) kwargs[entry.key_prop] = key kwargs[entry.url_prop] = url if not entry.stream: # if entry has no stream, we won't upload it entry = None else: if not hasattr(entry.stream, 'read'): raise ValueError('Invalid file object %s' % entry.stream) entry.url = url ev.update(task=self._task_id, **kwargs) return entry # prepare event needing file upload entries = [] for ev in events: try: e = update_and_get_file_entry(ev) if e: entries.append(e) except Exception as ex: log.warning(str(ex)) # upload the needed files if entries: # upload files def upload(e): upload_uri = e.upload_uri or storage_uri try: storage = self._get_storage(upload_uri) url = storage.upload_from_stream(e.stream, e.url) e.event.update(url=url) except Exception as exp: log.debug("Failed uploading to {} ({})".format( upload_uri if upload_uri else "(Could not calculate upload uri)", exp, )) e.set_exception(exp) res = file_upload_pool.map_async(upload, entries) res.wait() # remember the last time we uploaded a file self._file_upload_time = time() t_f, t_u, t_ref = \ (self._file_related_event_time, self._file_upload_time, self._file_upload_starvation_warning_sec) if t_f and t_u and t_ref and (t_f - t_u) > t_ref: log.warning( 'Possible metrics file upload starvation: files were not uploaded for %s' % format_timespan(t_ref)) # send the events in a batched request good_events = [ev for ev in events if ev.upload_exception is None] error_events = [ev for ev in events if ev.upload_exception is not None] if error_events: log.error( "Not uploading {}/{} events because the data upload failed". format( len(error_events), len(events), )) if good_events: batched_requests = [ api_events.AddRequest(event=ev.get_api_event()) for ev in good_events ] req = api_events.AddBatchRequest(requests=batched_requests) return self.send(req, raise_on_errors=False) return None
def schedule_suite(self): """ Schedule the suite-run. Returns the number of jobs scheduled. """ name = self.name if self.args.arch: arch = self.args.arch log.debug("Using '%s' as an arch" % arch) else: arch = util.get_arch(self.base_config.machine_type) suite_name = self.base_config.suite suite_path = os.path.normpath( os.path.join( self.suite_repo_path, self.args.suite_relpath, 'suites', self.base_config.suite.replace(':', '/'), )) log.debug('Suite %s in %s' % (suite_name, suite_path)) configs = build_matrix(suite_path, subset=self.args.subset, seed=self.args.seed) log.info('Suite %s in %s generated %d jobs (not yet filtered)' % (suite_name, suite_path, len(configs))) if self.args.dry_run: log.debug("Base job config:\n%s" % self.base_config) # create, but do not write, the temp file here, so it can be # added to the args in collect_jobs, but not filled until # any backtracking is done base_yaml_path = NamedTemporaryFile(prefix='schedule_suite_', delete=False).name self.base_yaml_paths.insert(0, base_yaml_path) # compute job limit in respect of --sleep-before-teardown job_limit = self.args.limit or 0 sleep_before_teardown = int(self.args.sleep_before_teardown or 0) if sleep_before_teardown: if job_limit == 0: log.warning('The --sleep-before-teardown option was provided: ' 'only 1 job will be scheduled. ' 'Use --limit to run more jobs') # give user a moment to read this warning time.sleep(5) job_limit = 1 elif self.args.non_interactive: log.warning('The --sleep-before-teardown option is active. ' 'There will be a maximum {} jobs running ' 'which will fall asleep for {}'.format( job_limit, format_timespan(sleep_before_teardown))) elif job_limit > 4: are_you_insane = ( 'There are {total} configs and {maximum} job limit is used. ' 'Do you really want to lock all machines needed for ' 'this run for {that_long}? (y/N):'.format( that_long=format_timespan(sleep_before_teardown), total=len(configs), maximum=job_limit)) while True: insane = (input(are_you_insane) or 'n').lower() if insane == 'y': break elif insane == 'n': exit(0) # if newest, do this until there are no missing packages # if not, do it once backtrack = 0 limit = self.args.newest while backtrack <= limit: jobs_missing_packages, jobs_to_schedule = \ self.collect_jobs(arch, util.filter_configs(configs, filter_in=self.args.filter_in, filter_out=self.args.filter_out, filter_all=self.args.filter_all, filter_fragments=self.args.filter_fragments, suite_name=suite_name), self.args.newest, job_limit) if jobs_missing_packages and self.args.newest: new_sha1 = \ util.find_git_parent('ceph', self.base_config.sha1) if new_sha1 is None: util.schedule_fail('Backtrack for --newest failed', name) # rebuild the base config to resubstitute sha1 self.config_input['ceph_hash'] = new_sha1 self.base_config = self.build_base_config() backtrack += 1 continue if backtrack: log.info("--newest supplied, backtracked %d commits to %s" % (backtrack, self.base_config.sha1)) break else: if self.args.newest: util.schedule_fail( 'Exceeded %d backtracks; raise --newest value' % limit, name, ) if self.args.dry_run: log.debug("Base job config:\n%s" % self.base_config) with open(base_yaml_path, 'w+b') as base_yaml: base_yaml.write(str(self.base_config).encode()) if jobs_to_schedule: self.write_rerun_memo() self.schedule_jobs(jobs_missing_packages, jobs_to_schedule, name) os.remove(base_yaml_path) count = len(jobs_to_schedule) missing_count = len(jobs_missing_packages) log.info('Suite %s in %s scheduled %d jobs.' % (suite_name, suite_path, count)) log.info('%d/%d jobs were filtered out.', (len(configs) - count), len(configs)) if missing_count: log.warn('Scheduled %d/%d jobs that are missing packages!', missing_count, count) return count
def bot(op): try: if op.type == 0: return if op.type == 13: cl.acceptGroupInvitation(op.param1) if op.type == 17: if op.param2 not in admin: if settings["joinkick"] == True: threading.Thread(target=cl.kickoutFromGroup, args=(op.param1,[op.param2],)).start() if op.type == 26 or op.type == 25: msg = op.message text = msg.text msg_id = msg.id receiver = msg.to sender = msg._from if msg.toType == 0: if sender != cl.profile.mid: to = sender else: to = receiver elif msg.toType == 2: to = receiver if text.lower() is None: return if msg.contentType == 0: if text is None: return if sender in settings["Owner"] or sender in admin: if op.message.text.lower().startswith(".t ") and "MENTION" in op.message.contentMetadata: if settings["kick"] == True: key = eval(op.message.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] n = 0 for x in key["MENTIONEES"]: if n == len(bots): n = 0 threading.Thread(target=Kick, args=(n, op.message.to, [x["M"]],)).start() n += 1 return else: cl.sendMessage(op.message.to, "你的專武已關閉,機器不會踢人") elif op.message.text.lower().startswith(".k ") and "MENTION" in op.message.contentMetadata: if settings["kick"] == True: key = eval(op.message.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] n = 0 for x in key["MENTIONEES"]: if n == len(bots): n = 0 threading.Thread(target=Kick, args=(n, op.message.to, [x["M"]],)).start() n += 1 return else: cl.sendMessage(op.message.to, "你的專武已關閉,機器不會踢人") elif "sp" == op.message.text.lower(): t1 = time.time() threading.Thread(target=cl.sendMessage, args=(op.message.to, "速度測試",)).start() t2 = time.time() - t1 time.sleep(1) return cl.sendMessage(op.message.to, "速度偵測為\n%s 秒" %t2) elif op.message.text.lower().startswith("加入 ") and "MENTION" in op.message.contentMetadata: key = eval(op.message.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: klist.append(x["M"]) return cl.sendMessage(op.message.to, "完成") elif "列表踢" == op.message.text.lower(): if settings["kick"] == True: n = 0 for x in klist: if n == len(bots): n = 0 threading.Thread(target=Kick, args=(n, op.message.to, [x],)).start() n += 1 return else: cl.sendMessage(op.message.to, "你的專武已關閉,機器不會踢人") elif op.message.text.lower().startswith("刪除 "): input1 = op.message.text.replace("刪除 ","") sep = input1.split(" ") for x in sep: try: kill = klist[(int(x)-1)] klist.remove(kill) cl.sendMessage(op.message.to, "完成") except: cl.sendMessage(op.message.to, "無效範圍刪除失敗") elif op.message.text.lower().startswith("op ") and "MENTION" in op.message.contentMetadata: key = eval(op.message.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] targets = [] for x in key["MENTIONEES"]: targets.append(x["M"]) if targets == []: pass else: for target in targets: try: settings["Owner"][target] = True backupData() cl.sendMessage(op.message.to, "已加入權限") except: pass elif op.message.text.lower().startswith("del ") and "MENTION" in op.message.contentMetadata: key = eval(op.message.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] targets = [] for x in key["MENTIONEES"]: targets.append(x["M"]) if targets == []: pass else: for target in targets: try: del settings["Owner"][target] backupData() cl.sendMessage(op.message.to, "已刪除權限") except: pass elif "權限列表" == op.message.text: if settings["Owner"] == {}: cl.sendMessage(to=op.message.to, text="列表沒人") else: no = 1 rlist = "╭───「 admin List 」" for x in settings["Owner"]: rlist += "\n├≽{}. {}".format(str(no), cl.getContact(x).displayName) no += 1 rlist += "\n╰───「 admin {} Members 」".format(len(settings["Owner"])) cl.sendMessage(to=op.message.to, text=str(rlist)) elif op.message.text.lower().startswith("刪除權限 "): input1 = op.message.text.replace("刪除權限 ","") sep = input1.split(" ") for x in sep: try: a = settings["Owner"] kill = settings["Owner"][(int(x)-1)] a.remove(kill) cl.sendMessage(op.message.to, "完成") except: cl.sendMessage(op.message.to, "無效範圍刪除失敗") elif "踢人列表" == op.message.text: if klist == []: cl.sendMessage(to=op.message.to, text="列表沒人") else: no = 1 rlist = "╭───「 kill List 」" for x in klist: rlist += "\n├≽{}. {}".format(str(no), cl.getContact(x).displayName) no += 1 rlist += "\n╰───「 kill {} Members 」".format(len(klist)) cl.sendMessage(to=op.message.to, text=str(rlist)) elif "runtime" == op.message.text: timeNow = time.time() runtime = timeNow - botStart runtime = format_timespan(runtime) cl.sendMessage(op.message.to, "機器運行時間 {}".format(str(runtime))) elif "進群踢人開" == op.message.text: settings["joinkick"] = True with open('temp.json', 'w') as fp: json.dump(settings, fp, sort_keys=True, indent=4) cl.sendMessage(to=op.message.to, text="已開啟") elif "進群踢人關" == op.message.text: settings["joinkick"] = False with open('temp.json', 'w') as fp: json.dump(settings, fp, sort_keys=True, indent=4) cl.sendMessage(to=op.message.to, text="已關閉") elif "專武開" == op.message.text: settings["kick"] = True with open('temp.json', 'w') as fp: json.dump(settings, fp, sort_keys=True, indent=4) cl.sendMessage(to=op.message.to, text="已開啟") elif "專武關" == op.message.text: settings["kick"] = False with open('temp.json', 'w') as fp: json.dump(settings, fp, sort_keys=True, indent=4) cl.sendMessage(to=op.message.to, text="已關閉") elif "清空列表" == op.message.text: klist.clear() cl.sendMessage(to=op.message.to, text="清除快取完成") elif op.message.text.lower() == ".set": t0 = time.time() t1 = time.time() - t0 t2 = time.time() t3 = time.time() - t2 time.sleep(0.5) ret_ = "偵測訊息\n" if settings["kick"] == 0: ret_ +="專武已關閉\n" else: ret_ += "專武已開啟\n" if settings["joinkick"] == True: ret_ +="進群踢已開啟\n" else: ret_ += "進群踢已關閉\n" ret_ += "終端登入的次數 {}\n".format(len(bots)) ret_ += "登入版本 測試版專武" cl.sendMessage(op.message.to, str(ret_)) return except Exception as e: print(e)
def test_format_timespan(self): """Test :func:`humanfriendly.format_timespan()`.""" minute = 60 hour = minute * 60 day = hour * 24 week = day * 7 year = week * 52 self.assertEqual('1 millisecond', humanfriendly.format_timespan(0.001, detailed=True)) self.assertEqual('500 milliseconds', humanfriendly.format_timespan(0.5, detailed=True)) self.assertEqual('0.5 seconds', humanfriendly.format_timespan(0.5, detailed=False)) self.assertEqual('0 seconds', humanfriendly.format_timespan(0)) self.assertEqual('0.54 seconds', humanfriendly.format_timespan(0.54321)) self.assertEqual('1 second', humanfriendly.format_timespan(1)) self.assertEqual('3.14 seconds', humanfriendly.format_timespan(math.pi)) self.assertEqual('1 minute', humanfriendly.format_timespan(minute)) self.assertEqual('1 minute and 20 seconds', humanfriendly.format_timespan(80)) self.assertEqual('2 minutes', humanfriendly.format_timespan(minute * 2)) self.assertEqual('1 hour', humanfriendly.format_timespan(hour)) self.assertEqual('2 hours', humanfriendly.format_timespan(hour * 2)) self.assertEqual('1 day', humanfriendly.format_timespan(day)) self.assertEqual('2 days', humanfriendly.format_timespan(day * 2)) self.assertEqual('1 week', humanfriendly.format_timespan(week)) self.assertEqual('2 weeks', humanfriendly.format_timespan(week * 2)) self.assertEqual('1 year', humanfriendly.format_timespan(year)) self.assertEqual('2 years', humanfriendly.format_timespan(year * 2)) self.assertEqual('6 years, 5 weeks, 4 days, 3 hours, 2 minutes and 500 milliseconds', humanfriendly.format_timespan(year*6 + week*5 + day*4 + hour*3 + minute*2 + 0.5, detailed=True)) self.assertEqual( '1 year, 2 weeks and 3 days', humanfriendly.format_timespan(year + week * 2 + day * 3 + hour * 12))
async def on_member_join(self, member): premium = self.bot.premium_guilds usedinvite = None if not member.bot: if member.guild.id in premium: before = await self.bot.get_invites(member.guild.id) after = await self.bot.load_invites(member.guild.id) for inv in before: a = after.get(inv, False) b = before[inv] if b != a: usedinvite = inv if usedinvite: self.bot.dispatch('invite_join', member, usedinvite) if not usedinvite and 'PUBLIC' in member.guild.features: usedinvite = 'Joined without an invite (Lurking/Server Discovery)' conf = self.bot.get_config(member.guild) logch = conf.get('log.moderation') if conf.get('mod.globalbans'): try: banned = await self.bot.ksoft.bans_check(member.id) if banned: try: await member.guild.ban( member, reason=f'{member} was found on global ban list') self.recentgban.append( f'{member.id}-{member.guild.id}') if logch: embed = discord.Embed( color=discord.Color.red(), timestamp=datetime.datetime.now( datetime.timezone.utc), description=f'**{member.mention} was banned**') embed.set_author(name=member, icon_url=str( member.avatar_url_as( static_format='png', size=2048))) embed.add_field( name='Reason', value=f'{member} was found on global ban list', inline=False) embed.set_footer(text=f"Member ID: {member.id}") try: return await logch.send(embed=embed) except Exception: pass except discord.HTTPException: return except Exception: pass if conf.get('greet.joinmsg'): joinchan = conf.get('greet.joinchannel') joinmsg = conf.get('greet.joinmsg') vars = { '{user.mention}': member.mention, '{user}': str(member), '{user.name}': member.name, '{user.discrim}': member.discriminator, '{server}': str(member.guild), '{guild}': str(member.guild), '{count}': str(member.guild.member_count), '@everyone': u'@\u200beveryone', # Nobody needs @everyone in a join/leave message '@here': u'@\u200bhere' } if joinchan and joinmsg: message = joinmsg for var, value in vars.items(): message = message.replace(var, value) await joinchan.send(message) if logch: embed = discord.Embed( title='Member Joined', url='https://i.giphy.com/media/Nx0rz3jtxtEre/giphy.gif', color=discord.Color.green(), timestamp=datetime.datetime.now(datetime.timezone.utc)) embed.set_author(name=f'{member}', icon_url=str( member.avatar_url_as(static_format='png', size=2048))) embed.add_field( name='Account Created', value=humanfriendly.format_timespan( datetime.datetime.utcnow() - member.created_at) + ' ago', inline=False) if usedinvite and member.guild.id in premium: embed.add_field(name='Invite Used', value=usedinvite, inline=False) if member.guild.id not in premium and randint(0, 100) == 69: # Nice embed.add_field( name='Want to see what invite they used?', value= 'Fire Premium allows you to do that and more.\n[Learn More](https://gaminggeek.dev/premium)', inline=False) if member.bot: try: async for e in member.guild.audit_logs( action=discord.AuditLogAction.bot_add, limit=10): if e.target.id == member.id: embed.add_field(name='Invited By', value=f'{e.user} ({e.user.id})', inline=False) break except Exception as e: pass embed.set_footer(text=f'User ID: {member.id}') try: await logch.send(embed=embed) except Exception: pass try: badname = conf.get( 'utils.badname') or f'John Doe {member.discriminator}' if conf.get( 'mod.autodecancer' ) and member.guild.me.guild_permissions.manage_nicknames: if not self.bot.isascii( member.name.replace('‘', '\'').replace( '“', '"').replace( '“', '"')): #fix weird mobile characters return await member.edit( nick=badname, reason= f'Name changed due to auto-decancer. The name contains non-ascii characters' ) if conf.get( 'mod.autodehoist' ) and member.guild.me.guild_permissions.manage_nicknames: if self.bot.ishoisted(member.name): return await member.edit( nick=badname, reason= f'Name changed due to auto-dehoist. The name starts with a hoisted character' ) except Exception: pass
def see_time(note): end = time.perf_counter() elapsed = end - start print (note, hf.format_timespan(elapsed, detailed=True))
minutesLater="180" #get current time for passing to functions today = datetime.datetime.now() #manually override time for testing with below, adjust time as needed #today = datetime.datetime.strptime("2018-04-13 17:40:00","%Y-%m-%d %H:%M:%S") #check if there is new train data getTrainData() #figure out stuff servicelist=getTodayServiceIdList(today) tripslist=getValidTrips(servicelist,destStations) stopTimes=getStopTimes(tripslist,pickupStops) nextTrains=getUpCommingTrains(stopTimes,minutesLater,today) #print(nextTrains) #pprint(nextTrains) dak=[] nownow = datetime.datetime.now() for train in nextTrains: timeTilTrain=train['depart_time']-nownow print(train['stop_id']," in ",round(timeTilTrain.total_seconds()/60)," minutes",nownow.strftime('%H%M%S'),train['depart_time'].strftime('%H%M%S')) dak.append({'value':train['stop_id']+' in '+humanfriendly.format_timespan(round(timeTilTrain.total_seconds()),max_units=2),'title':'Trains to Millennium Station','subtitle':"schd: "+str(train['depart_time'].strftime("%H:%M %p"))}) dak_json=json.dumps(dak) #pprint(dak) #pprint(dak_json) print(json.dumps(dak, indent=4, sort_keys=True))
def build(): # Create our logger to generate coloured output on stderr logger = Logger(prefix='[{} build] '.format(sys.argv[0])) # Register our supported command-line arguments parser = argparse.ArgumentParser(prog='{} build'.format(sys.argv[0])) BuildConfiguration.addArguments(parser) # If no command-line arguments were supplied, display the help message and exit if len(sys.argv) < 2: parser.print_help() sys.exit(0) # Parse the supplied command-line arguments try: config = BuildConfiguration(parser, sys.argv[1:]) except RuntimeError as e: logger.error('Error: {}'.format(e)) sys.exit(1) # Verify that Docker is installed if DockerUtils.installed() == False: logger.error('Error: could not detect Docker version. Please ensure Docker is installed.') sys.exit(1) # Create an auto-deleting temporary directory to hold our build context with tempfile.TemporaryDirectory() as tempDir: # Copy our Dockerfiles to the temporary directory contextOrig = join(os.path.dirname(os.path.abspath(__file__)), 'dockerfiles') contextRoot = join(tempDir, 'dockerfiles') shutil.copytree(contextOrig, contextRoot) # Create the builder instance to build the Docker images builder = ImageBuilder(contextRoot, config.containerPlatform, logger) # Determine if we are building a custom version of UE4 if config.custom == True: logger.info('CUSTOM ENGINE BUILD:', False) logger.info('Custom name: ' + config.release, False) logger.info('Repository: ' + config.repository, False) logger.info('Branch/tag: ' + config.branch + '\n', False) # Determine if we are building Windows or Linux containers if config.containerPlatform == 'windows': # Provide the user with feedback so they are aware of the Windows-specific values being used logger.info('WINDOWS CONTAINER SETTINGS', False) logger.info('Isolation mode: {}'.format(config.isolation), False) logger.info('Base OS image tag: {} (host OS is {})'.format(config.basetag, WindowsUtils.systemStringShort()), False) logger.info('Memory limit: {}'.format('No limit' if config.memLimit is None else '{:.2f}GB'.format(config.memLimit)), False) logger.info('Detected max image size: {:.0f}GB'.format(DockerUtils.maxsize()), False) logger.info('Directory to copy DLLs from: {}\n'.format(config.dlldir), False) # Verify that the user is not attempting to build images with a newer kernel version than the host OS if WindowsUtils.isNewerBaseTag(config.hostBasetag, config.basetag): logger.error('Error: cannot build container images with a newer kernel version than that of the host OS!') sys.exit(1) # Check if the user is building a different kernel version to the host OS but is still copying DLLs from System32 differentKernels = WindowsUtils.isInsiderPreview() or config.basetag != config.hostBasetag if config.pullPrerequisites == False and differentKernels == True and config.dlldir == config.defaultDllDir: logger.error('Error: building images with a different kernel version than the host,', False) logger.error('but a custom DLL directory has not specified via the `-dlldir=DIR` arg.', False) logger.error('The DLL files will be the incorrect version and the container OS will', False) logger.error('refuse to load them, preventing the built Engine from running correctly.', False) sys.exit(1) # Attempt to copy the required DLL files from the host system if we are building the prerequisites image if config.pullPrerequisites == False: for dll in WindowsUtils.requiredHostDlls(config.basetag): shutil.copy2(join(config.dlldir, dll), join(builder.context('ue4-build-prerequisites'), dll)) # Ensure the Docker daemon is configured correctly requiredLimit = WindowsUtils.requiredSizeLimit() if DockerUtils.maxsize() < requiredLimit: logger.error('SETUP REQUIRED:') logger.error('The max image size for Windows containers must be set to at least {}GB.'.format(requiredLimit)) logger.error('See the Microsoft documentation for configuration instructions:') logger.error('https://docs.microsoft.com/en-us/visualstudio/install/build-tools-container#step-4-expand-maximum-container-disk-size') logger.error('Under Windows Server, the command `{} setup` can be used to automatically configure the system.'.format(sys.argv[0])) sys.exit(1) elif config.containerPlatform == 'linux': # Determine if we are building CUDA-enabled container images capabilities = 'CUDA {} + OpenGL'.format(config.cuda) if config.cuda is not None else 'OpenGL' logger.info('LINUX CONTAINER SETTINGS', False) logger.info('Building GPU-enabled images compatible with NVIDIA Docker ({} support).\n'.format(capabilities), False) # Report which Engine components are being excluded (if any) logger.info('GENERAL SETTINGS', False) if len(config.excludedComponents) > 0: logger.info('Excluding the following Engine components:', False) for component in config.describeExcludedComponents(): logger.info('- {}'.format(component), False) else: logger.info('Not excluding any Engine components.', False) # Determine if we are performing a dry run if config.dryRun == True: # Don't bother prompting the user for any credentials logger.info('Performing a dry run, `docker build` commands will be printed and not executed.', False) username = '' password = '' else: # Retrieve the Git username and password from the user print('\nRetrieving the Git credentials that will be used to clone the UE4 repo') username = _getUsername(config.args) password = _getPassword(config.args) print() # Start the HTTP credential endpoint as a child process and wait for it to start endpoint = CredentialEndpoint(username, password) endpoint.start() try: # Keep track of our starting time startTime = time.time() # Compute the build options for the UE4 build prerequisites image # (This is the only image that does not use any user-supplied tag suffix, since the tag always reflects any customisations) prereqsArgs = ['--build-arg', 'BASEIMAGE=' + config.baseImage] if config.containerPlatform == 'windows': prereqsArgs = prereqsArgs + ['--build-arg', 'HOST_VERSION=' + WindowsUtils.getWindowsBuild()] # Build or pull the UE4 build prerequisites image if config.pullPrerequisites == True: builder.pull('adamrehn/ue4-build-prerequisites:{}'.format(config.prereqsTag), config.rebuild, config.dryRun) else: builder.build('adamrehn/ue4-build-prerequisites', [config.prereqsTag], config.platformArgs + prereqsArgs, config.rebuild, config.dryRun) # Build the UE4 source image mainTags = ['{}{}-{}'.format(config.release, config.suffix, config.prereqsTag), config.release + config.suffix] prereqConsumerArgs = ['--build-arg', 'PREREQS_TAG={}'.format(config.prereqsTag)] ue4SourceArgs = prereqConsumerArgs + [ '--build-arg', 'GIT_REPO={}'.format(config.repository), '--build-arg', 'GIT_BRANCH={}'.format(config.branch) ] builder.build('ue4-source', mainTags, config.platformArgs + ue4SourceArgs + endpoint.args(), config.rebuild, config.dryRun) # Build the UE4 Engine source build image, unless requested otherwise by the user ue4BuildArgs = prereqConsumerArgs + [ '--build-arg', 'TAG={}'.format(mainTags[1]), '--build-arg', 'NAMESPACE={}'.format(GlobalConfiguration.getTagNamespace()) ] if config.noEngine == False: builder.build('ue4-engine', mainTags, config.platformArgs + ue4BuildArgs, config.rebuild, config.dryRun) else: logger.info('User specified `--no-engine`, skipping ue4-engine image build.') # Build the minimal UE4 CI image, unless requested otherwise by the user buildUe4Minimal = config.noMinimal == False if buildUe4Minimal == True: builder.build('ue4-minimal', mainTags, config.platformArgs + config.exclusionFlags + ue4BuildArgs, config.rebuild, config.dryRun) else: logger.info('User specified `--no-minimal`, skipping ue4-minimal image build.') # Build the full UE4 CI image, unless requested otherwise by the user buildUe4Full = buildUe4Minimal == True and config.noFull == False if buildUe4Full == True: builder.build('ue4-full', mainTags, config.platformArgs + ue4BuildArgs, config.rebuild, config.dryRun) else: logger.info('Not building ue4-minimal or user specified `--no-full`, skipping ue4-full image build.') # Report the total execution time endTime = time.time() logger.action('Total execution time: {}'.format(humanfriendly.format_timespan(endTime - startTime))) # Stop the HTTP server endpoint.stop() except Exception as e: # One of the images failed to build logger.error('Error: {}'.format(e)) endpoint.stop() sys.exit(1)
def print_formatted_timespan(value): """Print a human readable timespan.""" output(format_timespan(float(value)))
def __exit__(self, type_, value_, traceback_): self.elapsed = time.time() - self._tstart if self._verbose: print('Elapsed time: %s.' % hf.format_timespan(self.elapsed))
def run( cls, model: AbsESPnetModel, optimizers: Sequence[torch.optim.Optimizer], schedulers: Sequence[Optional[AbsScheduler]], train_iter_factory: AbsIterFactory, valid_iter_factory: AbsIterFactory, plot_attention_iter_factory: Optional[AbsIterFactory], reporter: Reporter, output_dir: Path, max_epoch: int, seed: int, patience: Optional[int], keep_nbest_models: int, early_stopping_criterion: Sequence[str], best_model_criterion: Sequence[Sequence[str]], val_scheduler_criterion: Sequence[str], trainer_options, distributed_option: DistributedOption, ) -> None: """Perform training. This method performs the main process of training.""" assert check_argument_types() # NOTE(kamo): Don't check the type more strictly as far trainer_options assert is_dataclass(trainer_options), type(trainer_options) # NOTE(kamo): trainer_options doesn't always have "train_dtype" use_apex = getattr(trainer_options, "train_dtype", "") in ( "O0", "O1", "O2", "O3", ) if use_apex: try: from apex import amp except ImportError: logging.error( f"You need to install apex. " f"See https://github.com/NVIDIA/apex#linux" ) start_epoch = reporter.get_epoch() + 1 if start_epoch == max_epoch + 1: logging.warning( f"The training has already reached at max_epoch: {start_epoch}" ) if distributed_option.distributed: # Use torch DDP instead of apex DDP # https://github.com/NVIDIA/apex/issues/494 dp_model = torch.nn.parallel.DistributedDataParallel( model, device_ids=( # Perform multi-Process with multi-GPUs [torch.cuda.current_device()] if distributed_option.ngpu == 1 # Perform single-Process with multi-GPUs else None ), output_device=( torch.cuda.current_device() if distributed_option.ngpu == 1 else None ), ) elif distributed_option.ngpu > 1: # apex.amp supports DataParallel now. dp_model = torch.nn.parallel.DataParallel( model, device_ids=list(range(distributed_option.ngpu)), ) else: # NOTE(kamo): DataParallel also should work with ngpu=1, # but for debuggability it's better to keep this block. dp_model = model if not distributed_option.distributed or distributed_option.dist_rank == 0: summary_writer = SummaryWriter(str(output_dir / "tensorboard")) else: summary_writer = None start_time = time.perf_counter() for iepoch in range(start_epoch, max_epoch + 1): if iepoch != start_epoch: logging.info( "{}/{}epoch started. Estimated time to finish: {}".format( iepoch, max_epoch, humanfriendly.format_timespan( (time.perf_counter() - start_time) / (iepoch - start_epoch) * (max_epoch - iepoch + 1) ), ) ) else: logging.info(f"{iepoch}/{max_epoch}epoch started") set_all_random_seed(seed + iepoch) reporter.set_epoch(iepoch) # 1. Train and validation for one-epoch with reporter.observe("train") as sub_reporter: all_steps_are_invalid = cls.train_one_epoch( model=dp_model, optimizers=optimizers, schedulers=schedulers, iterator=train_iter_factory.build_iter(iepoch), reporter=sub_reporter, options=trainer_options, ) with reporter.observe("valid") as sub_reporter: cls.validate_one_epoch( model=dp_model, iterator=valid_iter_factory.build_iter(iepoch), reporter=sub_reporter, options=trainer_options, ) if not distributed_option.distributed or distributed_option.dist_rank == 0: # att_plot doesn't support distributed if plot_attention_iter_factory is not None: with reporter.observe("att_plot") as sub_reporter: cls.plot_attention( model=model, output_dir=output_dir / "att_ws", summary_writer=summary_writer, iterator=plot_attention_iter_factory.build_iter(iepoch), reporter=sub_reporter, options=trainer_options, ) # 2. LR Scheduler step for scheduler in schedulers: if isinstance(scheduler, AbsValEpochStepScheduler): scheduler.step(reporter.get_value(*val_scheduler_criterion)) elif isinstance(scheduler, AbsEpochStepScheduler): scheduler.step() if not distributed_option.distributed or distributed_option.dist_rank == 0: # 3. Report the results logging.info(reporter.log_message()) reporter.matplotlib_plot(output_dir / "images") reporter.tensorboard_add_scalar(summary_writer) # 4. Save/Update the checkpoint torch.save( { "model": model.state_dict(), "reporter": reporter.state_dict(), "optimizers": [o.state_dict() for o in optimizers], "schedulers": [ s.state_dict() if s is not None else None for s in schedulers ], "amp": amp.state_dict() if use_apex else None, }, output_dir / "checkpoint.pth", ) # 5. Save the model and update the link to the best model torch.save(model.state_dict(), output_dir / f"{iepoch}epoch.pth") # Creates a sym link latest.pth -> {iepoch}epoch.pth p = output_dir / f"latest.pth" if p.is_symlink() or p.exists(): p.unlink() p.symlink_to(f"{iepoch}epoch.pth") _improved = [] for _phase, k, _mode in best_model_criterion: # e.g. _phase, k, _mode = "train", "loss", "min" if reporter.has(_phase, k): best_epoch = reporter.get_best_epoch(_phase, k, _mode) # Creates sym links if it's the best result if best_epoch == iepoch: p = output_dir / f"{_phase}.{k}.best.pth" if p.is_symlink() or p.exists(): p.unlink() p.symlink_to(f"{iepoch}epoch.pth") _improved.append(f"{_phase}.{k}") if len(_improved) == 0: logging.info(f"There are no improvements in this epoch") else: logging.info( f"The best model has been updated: " + ", ".join(_improved) ) # 6. Remove the model files excluding n-best epoch and latest epoch _removed = [] # Get the union set of the n-best among multiple criterion nbests = set().union( *[ set(reporter.sort_epochs(ph, k, m)[:keep_nbest_models]) for ph, k, m in best_model_criterion if reporter.has(ph, k) ] ) for e in range(1, iepoch): p = output_dir / f"{e}epoch.pth" if p.exists() and e not in nbests: p.unlink() _removed.append(str(p)) if len(_removed) != 0: logging.info( f"The model files were removed: " + ", ".join(_removed) ) # 7. If any updating haven't happened, stops the training if all_steps_are_invalid: logging.warning( f"The gradients at all steps are invalid in this epoch. " f"Something seems wrong. This training was stopped at {iepoch}epoch" ) break # 8. Check early stopping if patience is not None: if reporter.check_early_stopping(patience, *early_stopping_criterion): break else: logging.info(f"The training was finished at {max_epoch} epochs ")
def make_results_table(gt_bbox_dir, pred_bbox_dir, outputFile, gt_format='xywh_pix', pred_format='y1x1y2x2_pix'): ''' Creates dataframe from directories and format for ground truth boxes and predicted boxes. Args: gt_bbox_dir: directory of ground truth bounding boxes pred_bbox_dir: directory of ground truth bounding boxes outputFile : filepath to .csv results table to write (results_{set}_{model}.csv) gt_format (Optional): format of ground truth bounding boxes 'y1x1y2x2_pix' or 'xywh_pix'. Default is 'xywh_pix' pred_format (Optional): format of ground truth bounding boxes 'y1x1y2x2_pix' or 'xywh_pix' . Default is 'y1x1y2x2_pix' Returns: dataframe with cols: ['img_id','gt_bbox','gt_format','pred_bbox','pred_format','confidence','gt_size','iou'] ''' print('Loading GT & predicted bboxes...') startTime = time.time() # collect list of filenames pred_bbox_fn = [fn[:-4] for fn in os.listdir(pred_bbox_dir)] gt_bbox_fn = [fn[:-4] for fn in os.listdir(gt_bbox_dir)] elapsed = time.time() - startTime print("Finished loaded {} GT bboxes and {} predicted bboxes in {}".format( len(gt_bbox_fn), len(pred_bbox_fn), humanfriendly.format_timespan(elapsed))) # create dictionaries of ground_truth bboxes, predicted bboxes, and confidence ground_truth_dict = {} for fn in gt_bbox_fn: bbox = parse_txt(os.path.join(gt_bbox_dir, fn + '.txt'), gt_format, 'ground_truth') ground_truth_dict[fn] = bbox predicted_dict = {} conf_dict = {} for fn in pred_bbox_fn: bbox, conf = parse_txt(os.path.join(pred_bbox_dir, fn + '.txt'), pred_format, 'predicted') predicted_dict[fn] = bbox conf_dict[fn] = conf # build dataframe of image, ground_truth bboxes, predicted bboxes, and confidence print('Creating results table with ...') data = {'img_id': gt_bbox_fn[:-4]} detect_df = pd.DataFrame(data, columns=['img_id']) print('... GT bboxes') detect_df['gt_bbox'] = detect_df['img_id'].map( pd.Series(ground_truth_dict)) detect_df['gt_format'] = gt_format print('... predicted bboxes') detect_df['pred_bbox'] = detect_df['img_id'].map(pd.Series(predicted_dict)) detect_df['pred_format'] = pred_format print('... confidence') detect_df['confidence'] = detect_df['img_id'].map(pd.Series(conf_dict)) # add ground_truth bbox size to dataframe print('... Bbox size') detect_df['gt_size'] = detect_df.apply( lambda row: None if row.gt_bbox is None else int((row.gt_bbox[2] - row.gt_bbox[0]) * (row.gt_bbox[3] - row.gt_bbox[1])), axis=1) # add IoU - cameratraps to dataframe print('... IoU') iou_dict = {} for fn in predicted_dict: if 'not_a_dam' not in fn: iou = calc_IoU(ground_truth_dict[fn], predicted_dict[fn], gt_format, pred_format) iou_dict[fn] = iou else: iou_dict[fn] = None detect_df['iou'] = detect_df['img_id'].map(pd.Series(iou_dict)) print('... Done \n') # set index to img_id detect_df = detect_df.set_index('img_id') print('Writing CSV ...') detect_df.to_csv(outputFile) print('... Done') return detect_df
def process_images(db_path, output_dir, image_base_dir, options=None): """ Writes images and html to output_dir to visualize the annotations in the json file db_path. db_path can also be a previously-loaded database. Returns the html filename and the database: return htmlOutputFile,image_db """ if options is None: options = DbVizOptions() print(options.__dict__) os.makedirs(os.path.join(output_dir, 'rendered_images'), exist_ok=True) assert (os.path.isdir(image_base_dir)) if isinstance(db_path, str): assert (os.path.isfile(db_path)) print('Loading database from {}...'.format(db_path)) image_db = json.load(open(db_path)) print('...done') elif isinstance(db_path, dict): print('Using previously-loaded DB') image_db = db_path else: raise ValueError('Illegal dictionary or filename') annotations = image_db['annotations'] images = image_db['images'] categories = image_db['categories'] # Optionally remove all images without bounding boxes, *before* sampling if options.trim_to_images_with_bboxes: bHasBbox = [False] * len(annotations) for iAnn, ann in enumerate(annotations): if 'bbox' in ann: assert isinstance(ann['bbox'], list) bHasBbox[iAnn] = True annotationsWithBboxes = list(compress(annotations, bHasBbox)) imageIDsWithBboxes = [x['image_id'] for x in annotationsWithBboxes] imageIDsWithBboxes = set(imageIDsWithBboxes) bImageHasBbox = [False] * len(images) for iImage, image in enumerate(images): imageID = image['id'] if imageID in imageIDsWithBboxes: bImageHasBbox[iImage] = True imagesWithBboxes = list(compress(images, bImageHasBbox)) images = imagesWithBboxes # Optionally remove images with specific labels, *before* sampling if options.classes_to_exclude is not None: print('Indexing database') indexed_db = IndexedJsonDb(image_db) bValidClass = [True] * len(images) for iImage, image in enumerate(images): classes = indexed_db.get_classes_for_image(image) for excludedClass in options.classes_to_exclude: if excludedClass in classes: bValidClass[iImage] = False break imagesWithValidClasses = list(compress(images, bValidClass)) images = imagesWithValidClasses # Put the annotations in a dataframe so we can select all annotations for a given image print('Creating data frames') df_anno = pd.DataFrame(annotations) df_img = pd.DataFrame(images) # Construct label map label_map = {} for cat in categories: label_map[int(cat['id'])] = cat['name'] # Take a sample of images if options.num_to_visualize is not None: df_img = df_img.sample(n=options.num_to_visualize, random_state=options.random_seed) images_html = [] # Set of dicts representing inputs to render_db_bounding_boxes: # # bboxes, boxClasses, image_path rendering_info = [] print('Preparing rendering list') # iImage = 0 for iImage in tqdm(range(len(df_img))): img_id = df_img.iloc[iImage]['id'] img_relative_path = df_img.iloc[iImage]['file_name'] img_path = os.path.join( image_base_dir, image_filename_to_path(img_relative_path, image_base_dir)) annos_i = df_anno.loc[df_anno['image_id'] == img_id, :] # all annotations on this image bboxes = [] boxClasses = [] # All the class labels we've seen for this image (with out without bboxes) imageCategories = set() annotationLevelForImage = '' # Iterate over annotations for this image # iAnn = 0; anno = annos_i.iloc[iAnn] for iAnn, anno in annos_i.iterrows(): if 'sequence_level_annotation' in anno: bSequenceLevelAnnotation = anno['sequence_level_annotation'] if bSequenceLevelAnnotation: annLevel = 'sequence' else: annLevel = 'image' if annotationLevelForImage == '': annotationLevelForImage = annLevel elif annotationLevelForImage != annLevel: annotationLevelForImage = 'mixed' categoryID = anno['category_id'] categoryName = label_map[categoryID] if options.add_search_links: categoryName = categoryName.replace('"', '') categoryName = '<a href="https://www.bing.com/images/search?q={}">{}</a>'.format( categoryName, categoryName) imageCategories.add(categoryName) if 'bbox' in anno: bbox = anno['bbox'] if isinstance(bbox, float): assert math.isnan( bbox ), "I shouldn't see a bbox that's neither a box nor NaN" continue bboxes.append(bbox) boxClasses.append(anno['category_id']) imageClasses = ', '.join(imageCategories) file_name = '{}_gtbbox.jpg'.format(img_id.lower().split('.jpg')[0]) file_name = file_name.replace('/', '~') rendering_info.append({ 'bboxes': bboxes, 'boxClasses': boxClasses, 'img_path': img_path, 'output_file_name': file_name }) labelLevelString = '' if len(annotationLevelForImage) > 0: labelLevelString = ' (annotation level: {})'.format( annotationLevelForImage) # We're adding html for an image before we render it, so it's possible this image will # fail to render. For applications where this script is being used to debua a database # (the common case?), this is useful behavior, for other applications, this is annoying. # # TODO: optionally write html only for images where rendering succeeded images_html.append({ 'filename': '{}/{}'.format('rendered_images', file_name), 'title': '{}<br/>{}, number of boxes: {}, class labels: {}{}'.format( img_relative_path, img_id, len(bboxes), imageClasses, labelLevelString), 'textStyle': 'font-family:verdana,arial,calibri;font-size:80%;text-align:left;margin-top:20;margin-bottom:5' }) # ...for each image def render_image_info(rendering_info): img_path = rendering_info['img_path'] bboxes = rendering_info['bboxes'] bboxClasses = rendering_info['boxClasses'] output_file_name = rendering_info['output_file_name'] if not os.path.exists(img_path): print('Image {} cannot be found'.format(img_path)) return try: original_image = vis_utils.open_image(img_path) original_size = original_image.size image = vis_utils.resize_image(original_image, options.viz_size[0], options.viz_size[1]) except Exception as e: print('Image {} failed to open. Error: {}'.format(img_path, e)) return vis_utils.render_db_bounding_boxes(boxes=bboxes, classes=bboxClasses, image=image, original_size=original_size, label_map=label_map) image.save( os.path.join(output_dir, 'rendered_images', output_file_name)) # ...def render_image_info print('Rendering images') start_time = time.time() if options.parallelize_rendering: if options.parallelize_rendering_n_cores is None: pool = ThreadPool() else: print('Rendering images with {} workers'.format( options.parallelize_rendering_n_cores)) pool = ThreadPool(options.parallelize_rendering_n_cores) tqdm(pool.imap(render_image_info, rendering_info), total=len(rendering_info)) else: for file_info in tqdm(rendering_info): render_image_info(file_info) elapsed = time.time() - start_time print('Rendered {} images in {}'.format( len(rendering_info), humanfriendly.format_timespan(elapsed))) if options.sort_by_filename: images_html = sorted(images_html, key=lambda x: x['filename']) htmlOutputFile = os.path.join(output_dir, 'index.html') htmlOptions = options.htmlOptions if isinstance(db_path, str): htmlOptions[ 'headerHtml'] = '<h1>Sample annotations from {}</h1>'.format( db_path) else: htmlOptions['headerHtml'] = '<h1>Sample annotations</h1>' write_html_image_list(filename=htmlOutputFile, images=images_html, options=htmlOptions) print('Visualized {} images, wrote results to {}'.format( len(images_html), htmlOutputFile)) return htmlOutputFile, image_db
def test_format_timespan(self): """Test :func:`humanfriendly.format_timespan()`.""" minute = 60 hour = minute * 60 day = hour * 24 week = day * 7 year = week * 52 assert '1 millisecond' == humanfriendly.format_timespan(0.001, detailed=True) assert '500 milliseconds' == humanfriendly.format_timespan(0.5, detailed=True) assert '0.5 seconds' == humanfriendly.format_timespan(0.5, detailed=False) assert '0 seconds' == humanfriendly.format_timespan(0) assert '0.54 seconds' == humanfriendly.format_timespan(0.54321) assert '1 second' == humanfriendly.format_timespan(1) assert '3.14 seconds' == humanfriendly.format_timespan(math.pi) assert '1 minute' == humanfriendly.format_timespan(minute) assert '1 minute and 20 seconds' == humanfriendly.format_timespan(80) assert '2 minutes' == humanfriendly.format_timespan(minute * 2) assert '1 hour' == humanfriendly.format_timespan(hour) assert '2 hours' == humanfriendly.format_timespan(hour * 2) assert '1 day' == humanfriendly.format_timespan(day) assert '2 days' == humanfriendly.format_timespan(day * 2) assert '1 week' == humanfriendly.format_timespan(week) assert '2 weeks' == humanfriendly.format_timespan(week * 2) assert '1 year' == humanfriendly.format_timespan(year) assert '2 years' == humanfriendly.format_timespan(year * 2) assert '6 years, 5 weeks, 4 days, 3 hours, 2 minutes and 500 milliseconds' == \ humanfriendly.format_timespan(year * 6 + week * 5 + day * 4 + hour * 3 + minute * 2 + 0.5, detailed=True) assert '1 year, 2 weeks and 3 days' == \ humanfriendly.format_timespan(year + week * 2 + day * 3 + hour * 12) # Make sure milliseconds are never shown separately when detailed=False. # https://github.com/xolox/python-humanfriendly/issues/10 assert '1 minute, 1 second and 100 milliseconds' == humanfriendly.format_timespan(61.10, detailed=True) assert '1 minute and 1.1 second' == humanfriendly.format_timespan(61.10, detailed=False) # Test for loss of precision as reported in issue 11: # https://github.com/xolox/python-humanfriendly/issues/11 assert '1 minute and 0.3 seconds' == humanfriendly.format_timespan(60.300) assert '5 minutes and 0.3 seconds' == humanfriendly.format_timespan(300.300) assert '1 second and 15 milliseconds' == humanfriendly.format_timespan(1.015, detailed=True) assert '10 seconds and 15 milliseconds' == humanfriendly.format_timespan(10.015, detailed=True)
) parser.add_argument("--backup", action='store_true', help="keep a backup of the original .env file") parser.add_argument("--netloc", type=str, help="netloc (public dns. e.g., example.com)") parser.add_argument( "--ec2", action='store_true', help= "if run on an AWS EC2 instance, automatically detect the public DNS of the instance and use that for the netloc" ) parser.add_argument("--ssl", action='store_true', help="use SSL (i.e., https:// and wss://)") parser.add_argument("--debug", action='store_true', help="output debugging info") global args args = parser.parse_args() if args.debug: logger.setLevel(logging.DEBUG) logger.debug('debug mode is on') else: logger.setLevel(logging.INFO) main(args) total_end = timer() logger.info('all finished. total time: {}'.format( format_timespan(total_end - total_start)))
def lineBot(op): try: if op.type == 0: print ("[ 0 ] END OF OPERATION") return if op.type == 5: print ("[ 5 ] NOTIFIED AUTOBLOCK") if settings["autoAdd"] == True: nadya.findAndAddContactsByMid(op.param1) if(settings["addmsg"]in[""," ","\n",None]): pass else: nadya.sendMessage(op.param1,nadya.getContact(op.param2).displayName + str(settings["addmsg"])) if op.type == 13: print ("[ 13 ] NOTIFIED INVITE GROUP") group = nadya.getGroup(op.param1) if settings["autoJoin"] == True: nadya.acceptGroupInvitation(op.param1) if op.type == 15: if wait["bcommentOn"] and "bcomment" in wait: h = nadya.getContact(op.param2) nadya.sendMessage(op.param1,nadya.getContact(op.param2).displayName + "\n\n" + str(wait["bcomment"])) nadya.sendImageWithUrl(op.param1, "http://dl.profile.line-cdn.net/" + h.pictureStatus) if op.type == 17: if wait["acommentOn"] and "acomment" in wait: cnt = nadya.getContact(op.param2) h = nadya.getContact(op.param2) nadya.sendMessage(op.param1,cnt.displayName + "\n\n" + str(wait["acomment"])) nadya.sendImageWithUrl(op.param1, "http://dl.profile.line-cdn.net/" + h.pictureStatus) if op.type == 24: print ("[ 24 ] NOTIFIED LEAVE ROOM") if settings["autoLeave"] == True: nadya.leaveRoom(op.param1) if op.type == 25: print ("[ 25 ] SEND MESSAGE") msg = op.message text = msg.text msg_id = msg.id receiver = msg.to sender = msg._from if msg.toType == 0: if sender != nadya.profile.mid: to = sender else: to = receiver else: to = receiver if msg.contentType == 0: if text is None: return #==============================================================================# #==============================================================================# if op.type == 26: print ("[ 26 ] RECEIVE MESSAGE") msg = op.message text = msg.text msg_id = msg.id receiver = msg.to sender = msg._from if msg.toType == 0: if sender != nadya.profile.mid: to = sender else: to = receiver else: to = receiver if settings["autoRead"] == True: nadya.sendChatChecked(to, msg_id) if to in read["readPoint"]: if sender not in read["ROM"][to]: read["ROM"][to][sender] = True if sender in settings["mimic"]["target"] and settings["mimic"]["status"] == True and settings["mimic"]["target"][sender] == True: text = msg.text if text is not None: nadya.sendMessage(msg.to,text) #==============================================================================# if msg.text in ["คำสั่ง","Help","help"]: helpMessage = helpmessage() nadya.sendMessage(to, str(helpMessage)) if text.lower() == 'คำสั่ง2': if msg._from in admin: helpTextToSpeech = helptexttospeech() nadya.sendMessage(to, str(helpTextToSpeech)) if text.lower() == 'คำสั่ง3': if msg._from in superadmin: helpTranslate = helptranslate() nadya.sendMessage(to, str(helpTranslate)) #==============================================================================# #===================SUPERADMIN================================================# if "!ชื่อ: " in msg.text.lower(): if msg._from in superadmin: spl = re.split("!ชื่อ: ",msg.text,flags=re.IGNORECASE) if spl[0] == "": prof = nadya.getProfile() prof.displayName = spl[1] nadya.updateProfile(prof) nadya.sendMessage(to, "เปลี่ยนชื่อสำเร็จแล้ว(。-`ω´-)") if "!ตัส: " in msg.text.lower(): if msg._from in superadmin: spl = re.split("!ตัส: ",msg.text,flags=re.IGNORECASE) if spl[0] == "": prof = nadya.getProfile() prof.statusMessage = spl[1] nadya.updateProfile(prof) nadya.sendMessage(to, "เปลี่ยนตัสสำเร็จแล้ว(。-`ω´-)") if text.lower() == "!เปลี่ยนรูป": if msg._from in superadmin: settings["changePictureProfile"] = True nadya.sendMessage(to, "ส่งรูปมา(。-`ω´-)") if text.lower() == "!เปลี่ยนกลุ่ม": if msg._from in superadmin: settings["changeGroupPicture"] = True nadya.sendMessage(to, "ส่งรูปมา(。-`ω´-)") #===================SUPERADMIN================================================# #==============================================================================# if msg.text.lower().startswith(".เตะ "): if msg._from in admin: targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: nadya.kickoutFromGroup(msg.to,[target]) except: nadya.sendText(msg.to,"Error") if ".ยกเลิก" == msg.text.lower(): if msg._from in admin: if msg.toType == 2: group = nadya.getGroup(msg.to) gMembMids = [contact.mid for contact in group.invitee] for _mid in gMembMids: nadya.cancelGroupInvitation(msg.to,[_mid]) nadya.sendMessage(to,"ยกเลิกค้างเชิญเสร็จสิ้น(。-`ω´-)") if text.lower() == '.ลบรัน': if msg._from in admin: gid = nadya.getGroupIdsInvited() start = time.time() for i in gid: nadya.rejectGroupInvitation(i) elapsed_time = time.time() - start nadya.sendMessage(to, "กำลังดำเนินการ(。-`ω´-)") nadya.sendMessage(to, "เวลาที่ใช้: %sวินาที(。-`ω´-)" % (elapsed_time)) if ".โทร" in msg.text.lower(): if msg._from in admin: if msg.toType == 2: sep = text.split(" ") strnum = text.replace(sep[0] + " ","") num = int(strnum) nadya.sendMessage(to, "เชิญเข้าร่วมการโทร(。-`ω´-)") for var in range(0,num): group = nadya.getGroup(to) members = [mem.mid for mem in group.members] nadya.acquireGroupCallRoute(to) nadya.inviteIntoGroupCall(to, contactIds=members) if text.lower() == '.ออก': if msg._from in admin: if msg.toType == 2: ginfo = nadya.getGroup(to) try: nadya.sendMessage(to, "บอทออกเรียบร้อย(。-`ω´-)") nadya.leaveGroup(to) except: pass if "ดึง" in msg.text: if msg._from in admin: key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] targets = [] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: nadya.findAndAddContactsByMid(target) nadya.inviteIntoGroup(msg.to,[target]) nadya.sendMessage(msg.to,"เชิญเสร็จสิ้น(。-`ω´-)") except: pass #==============================================================================# if text.lower() == 'เชคค่า': try: ret_ = "╔════════════" if settings["autoAdd"] == True: ret_ += "\n║ ระบบแอดออโต้ ✔" else: ret_ += "\n║ ระบบแอดออโต้ ✘" if settings["autoJoin"] == True: ret_ += "\n║ ระบบเข้ากลุ่มออโต้ ✔" else: ret_ += "\n║ ระบบเข้ากลุ่มออโต้ ✘" if settings["autoLeave"] == True: ret_ += "\n║ ระบบออกกลุ่มออโต้ ✔" else: ret_ += "\n║ ระบบออกกลุ่มออโต้ ✘" if wait["acommentOn"] == True: ret_ += "\n║ ระบบตอนรับคนเข้ากลุ่ม ✔" else: ret_ += "\n║ ระบบตอนรับคนเข้ากลุ่ม ✘" if wait["bcommentOn"] == True: ret_ += "\n║ ระบบตอนรับคนออกกลุ่ม ✔" else: ret_ += "\n║ ระบบตอนรับคนออกกลุ่ม ✘" ret_ += "\n╚════════════" nadya.sendMessage(to, str(ret_)) except Exception as e: nadya.sendMessage(msg.to, str(e)) if text.lower() == 'youtube': sep = text.split(" ") search = text.replace(sep[0] + " ","") params = {"search_query": search} with requests.session() as web: web.headers["User-Agent"] = random.choice(settings["userAgent"]) r = web.get("https://www.youtube.com/results", params = params) soup = BeautifulSoup(r.content, "html.parser") ret_ = "╔══[ผลการค้นหา]" datas = [] for data in soup.select(".yt-lockup-title > a[title]"): if "&lists" not in data["href"]: datas.append(data) for data in datas: ret_ += "\n╠══[ {} ]".format(str(data["title"])) ret_ += "\n╠ https://www.youtube.com{}".format(str(data["href"])) ret_ += "\n╚══[ จำนวนที่ค้นพบ {} ]".format(len(datas)) nadya.sendMessage(to, str(ret_)) if "google " in msg.text.lower(): spl = re.split("google ",msg.text,flags=re.IGNORECASE) if spl[0] == "": if spl[1] != "": try: if msg.toType != 0: nadya.sendMessage(msg.to,"กำลังรับข้อมูล กรุณารอสักครู่..") else: nadya.sendMessage(msg.from_,"กำลังรับข้อมูล กรุณารอสักครู่..") resp = BeautifulSoup(requests.get("https://www.google.co.th/search",params={"q":spl[1],"gl":"th"}).content,"html.parser") text = "ผลการค้นหาจาก Google:\n\n" for el in resp.findAll("h3",attrs={"class":"r"}): try: tmp = el.a["class"] continue except: pass try: if el.a["href"].startswith("/search?q="): continue except: continue text += el.a.text+"\n" text += str(el.a["href"][7:]).split("&sa=U")[0]+"\n\n" text = text[:-2] if msg.toType != 0: nadya.sendMessage(msg.to,str(text)) else: nadya.sendMessage(msg.from_,str(text)) except Exception as e: if "ทีมงาน" == msg.text.lower(): msg.contentType = 13 nadya.sendMessage(to, "✍️ ᴛ⃢ᴇ⃢ᴀ⃢ᴍ⃢ 🔝ͲᎻᎬᖴ͙͛Ꮮ͙͛ᗩ͙͛ᔑ͙͛Ꮋ͙ ̾⚡") nadya.sendContact(to, "u07fb5496b409998a4f1f0af307d2c6e9") nadya.sendContact(to, "ua11927d673a2ae7bab9c737e4bd206d2") if msg.text in ["Me","me","คท",".คท","!me","!Me",".me",".Me"]: nadya.sendMessage(receiver, None, contentMetadata={'mid': sender}, contentType=13) if msg.text in ["Speed","speed","Sp","sp",".Sp",".sp",".Speed",".speed","\Sp","\sp","\speed","\Speed","สปีด",".สปีด"]: start = time.time() nadya.sendMessage(to, "การตอบสนองของบอท(。-`ω´-)") elapsed_time = time.time() - start nadya.sendMessage(msg.to, "[ %s Seconds ]\n[ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]") if msg.text in ["ออน",".ออน","\ออน",".uptime",".Uptime"]: timeNow = time.time() runtime = timeNow - botStart runtime = format_timespan(runtime) nadya.sendMessage(to, "ระยะเวลาการทำงานของบอท(。-`ω´-)\n{}".format(str(runtime))) if msg.text in ["Tag","tagall","แทค","แทก","Tagall","tag","แท็ค","แท็ก"]: group = nadya.getGroup(msg.to) nama = [contact.mid for contact in group.members] k = len(nama)//100 for a in range(k+1): txt = u'' s=0 b=[] for i in group.members[a*100 : (a+1)*100]: b.append({"S":str(s), "E" :str(s+6), "M":i.mid}) s += 7 txt += u'@Alin \n' nadya.sendMessage(to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0) nadya.sendMessage(to, "จำนวนสมาชิก {} คน(。-`ω´-)".format(str(len(nama)))) if msg.text.lower().startswith("คท "): if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: contact = nadya.getContact(ls) mi_d = contact.mid nadya.sendContact(msg.to, mi_d) if msg.text.lower().startswith("มิด "): if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) ret_ = "" for ls in lists: ret_ += ls nadya.sendMessage(msg.to, str(ret_)) if msg.text.lower().startswith("ชื่อ "): if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: contact = nadya.getContact(ls) nadya.sendMessage(msg.to, contact.displayName) if msg.text.lower().startswith("ตัส "): if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: contact = nadya.getContact(ls) nadya.sendMessage(msg.to, contact.statusMessage) if msg.text.lower().startswith("ดิส "): if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: path = "http://dl.profile.line-cdn.net/" + nadya.getContact(ls).pictureStatus nadya.sendImageWithURL(msg.to, str(path)) if msg.text.lower().startswith("ดิสวีดีโอ "): if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: path = "http://dl.profile.line-cdn.net/" + nadya.getContact(ls).pictureStatus + "/vp" nadya.sendImageWithURL(msg.to, str(path)) if msg.text.lower().startswith("ดิสปก "): if line != None: if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: path = "http://dl.profile.line-cdn.net/" + nadya.getProfileCoverURL(ls) nadya.sendImageWithURL(msg.to, str(path)) if text.lower() == '!แทค': gs = nadya.getGroup(to) targets = [] for g in gs.members: if g.displayName in "": targets.append(g.mid) if targets == []: nadya.sendMessage(to, "ไม่มีคนใส่ชื่อร่องหน(。-`ω´-)") else: mc = "" for target in targets: mc += sendMessageWithMention(to,target) + "\n" nadya.sendMessage(to, mc) if text.lower() == '!มิด': gs = nadya.getGroup(to) lists = [] for g in gs.members: if g.displayName in "": lists.append(g.mid) if lists == []: nadya.sendMessage(to, "ไม่มีคนใส่ชื่อร่องหน(。-`ω´-)") else: mc = "" for mi_d in lists: mc += "->" + mi_d + "\n" nadya.sendMessage(to,mc) if text.lower() == '!คท': gs = nadya.getGroup(to) lists = [] for g in gs.members: if g.displayName in "": lists.append(g.mid) if lists == []: nadya.sendMessage(to, "ไม่มีคนใส่ชื่อร่องหน(。-`ω´-)") else: for ls in lists: contact = nadya.getContact(ls) mi_d = contact.mid nadya.sendContact(to, mi_d) #==============================================================================# if text.lower() == 'เชคแอด': group = nadya.getGroup(to) GS = group.creator.mid nadya.sendContact(to, GS) if text.lower() == 'ไอดีกลุ่ม': gid = nadya.getGroup(to) nadya.sendMessage(to, "\n" + gid.id) if text.lower() == 'รูปกลุ่ม': group = nadya.getGroup(to) path = "http://dl.profile.line-cdn.net/" + group.pictureStatus nadya.sendImageWithURL(to, path) if text.lower() == 'ชื่อกลุ่ม': gid = nadya.getGroup(to) nadya.sendMessage(to, "\n" + gid.name) if text.lower() == 'รายชื่อสมาชิก': if msg.toType == 2: group = nadya.getGroup(to) ret_ = "╔══[ รายชื่อสมชิกกลุ่ม ]" no = 0 + 1 for mem in group.members: ret_ += "\n╠ {}. {}".format(str(no), str(mem.displayName)) no += 1 ret_ += "\n╚══[ จำนวนสมาชิก {} คน(。-`ω´-) ]".format(str(len(group.members))) nadya.sendMessage(to, str(ret_)) if text.lower() == '.รายชื่อกลุ่ม': if msg._from in admin: groups = nadya.groups ret_ = "╔══[ รายชื่อกลุ่ม ]" no = 0 + 1 for gid in groups: group = nadya.getGroup(gid) ret_ += "\n╠ {}. {} | {}".format(str(no), str(group.name), str(len(group.members))) no += 1 ret_ += "\n╚══[ จำนวนกลุ่ม {} กลุ่ม(。-`ω´-)]".format(str(len(groups))) nadya.sendMessage(to, str(ret_)) if text.lower() == 'ขอลิ้งกลุ่ม': if msg.toType == 2: group = nadya.getGroup(to) if group.preventedJoinByTicket == False: ticket = nadya.reissueGroupTicket(to) nadya.sendMessage(to, "https://line.me/R/ti/g/{}".format(str(ticket))) else: nadya.sendMessage(to, "กรุณาเปิดลิ้งกลุ่มก่อนลงคำสั่งนี้ด้วยครับ(。-`ω´-)".format(str(settings["keyCommand"]))) if text.lower() == 'ลิ้งกลุ่ม on': if msg.toType == 2: group = nadya.getGroup(to) if group.preventedJoinByTicket == False: nadya.sendMessage(to, "ลิ้งกลุ่มเปิดอยู่แล้ว(。-`ω´-)") else: group.preventedJoinByTicket = False nadya.updateGroup(group) nadya.sendMessage(to, "เปิดลิ้งกลุ่มเรียบร้อย(。-`ω´-)") if text.lower() == 'ลิ้งกลุ่ม off': if msg.toType == 2: group = nadya.getGroup(to) if group.preventedJoinByTicket == True: nadya.sendMessage(to, "ลิ้งกลุ่มปิดอยู่แล้ว(。-`ω´-)") else: group.preventedJoinByTicket = True nadya.updateGroup(group) nadya.sendMessage(to, "ลิ้งกลุ่มปิดเรียบร้อย(。-`ω´-)") #=================THEFLASH====================================================# if text.lower() == '.แอดออโต้ on': if msg._from in admin: settings["autoAdd"] = True nadya.sendMessage(to, "เปิดระบบออโต้บล็อค(。-`ω´-)") if text.lower() == '.แอดออโต้ off': if msg._from in admin: settings["autoAdd"] = False nadya.sendMessage(to, "ปิดระบบออโต้บล็อค(。-`ω´-)") if text.lower() == '.เข้ากลุ่ม on': if msg._from in admin: settings["autoJoin"] = True nadya.sendMessage(to, "เปิดระบบเข้ากลุ่มออโต้(。-`ω´-)") if text.lower() == '.เข้ากลุ่ม off': if msg._from in admin: settings["autoJoin"] = False nadya.sendMessage(to, "ปิดระบบเข้ากลุ่มออโต้(。-`ω´-)") if text.lower() == '.ออกกลุ่ม on': if msg._from in admin: settings["autoLeave"] = True nadya.sendMessage(to, "เปิดระบบออกกลุ่มออโต้(。-`ω´-)") if text.lower() == '.ออกกลุ่ม off': if msg._from in admin: settings["autoLeave"] = False nadya.sendMessage(to, "ปิดระบบออกกลุ่มออโต้(。-`ω´-)") if msg.text.lower() == '.ตอนรับเข้า on': if msg._from in admin: wait['acommentOn'] = True nadya.sendMessage(msg.to,"เปิดระบบตอนรับสมาชิกเข้ากลุ่ม(。-`ω´-)") if msg.text.lower() == '.ตอนรับเข้า off': if msg._from in admin: wait['acommentOn'] = False nadya.sendMessage(msg.to,"ปิดระบบตอนรับสมาชิกเข้ากลุ่ม(。-`ω´-)") if msg.text.lower() == '.ตอนรับออก on': if msg._from in admin: wait["bcommentOn"] = True nadya.sendMessage(msg.to,"เปิดระบบตอนรับสมาชิกออกกลุ่ม(。-`ω´-)") if msg.text.lower() == '.ตอนรับออก off': if msg._from in admin: wait['bcommentOn'] = False nadya.sendMessage(msg.to,"ปิดระบบตอนรับสมาชิกออกกลุ่ม(。-`ω´-)") if ".ตั้งแอด: " in msg.text: if msg._from in admin: settings["addmsg"] = msg.text.replace(".ตั้งแอด: ","") nadya.sendMessage(msg.to,"ตั้งค่าข้อความแอดเสร็จสิ้น(。-`ω´-)") if ".ตั้งเข้า:" in msg.text.lower(): if msg._from in admin: c = msg.text.replace(".ตั้งเข้า:","") if c in [""," ","\n",None]: nadya.sendMessage(msg.to,"เกิดข้อผิดพลาด(。-`ω´-)") else: wait["acomment"] = c nadya.sendMessage(msg.to,"ตั้งค่าข้อความตอนรับเสร็จสิ้น(。-`ω´-)") if ".ตั้งออก:" in msg.text.lower(): if msg._from in admin: c = msg.text.replace(".ตั้งออก:","") if c in [""," ","\n",None]: nadya.sendMessage(msg.to,"เกิดข้อผิดพลาด(。-`ω´-)") else: wait["bcomment"] = c nadya.sendMessage(msg.to,"ตั้งค่าข้อความตอนรับออกเสร็จสิ้น(。-`ω´-)") if msg.text in [".เชคตั้งเข้า"]: if msg._from in admin: nadya.sendMessage(msg.to,"เช็คข้อความตอนรับล่าสุด(。-`ω´-)" + "\n\n➤" + str(wait["acomment"])) if msg.text in [".เชคตั้งออก"]: if msg._from in admin: nadya.sendMessage(msg.to,"เช็คข้อความตอนรับออกล่าสุด(。-`ω´-)" + "\n\n➤" + str(wait["bcomment"])) if msg.text in [".เชคตั้งแอด"]: if msg._from in admin: nadya.sendMessage(msg.to,"เช็คข้อความแอดล่าสุด(。-`ω´-)" + "\n\n➤" + str(settings["addmsg"])) #=================THEFLASH====================================================# #==============================================================================# if msg.text.lower().startswith("พูด "): sep = text.split(" ") say = text.replace(sep[0] + " ","") lang = 'th' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") nadya.sendAudio(msg.to,"hasil.mp3") #==============================================================================# if text.lower() == 'ตั้งเวลา': tz = pytz.timezone("Asia/Jakarta") timeNow = datetime.now(tz=tz) day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] hr = timeNow.strftime("%A") bln = timeNow.strftime("%m") for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k-1] readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]" if msg.to in read['readPoint']: try: del read['readPoint'][msg.to] del read['readMember'][msg.to] del read['readTime'][msg.to] except: pass read['readPoint'][msg.to] = msg.id read['readMember'][msg.to] = "" read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S') read['ROM'][msg.to] = {} with open('read.json', 'w') as fp: json.dump(read, fp, sort_keys=True, indent=4) nadya.sendMessage(msg.to,"เปิดหาคนซุ่ม(。-`ω´-)") else: try: del read['readPoint'][msg.to] del read['readMember'][msg.to] del read['readTime'][msg.to] except: pass read['readPoint'][msg.to] = msg.id read['readMember'][msg.to] = "" read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S') read['ROM'][msg.to] = {} with open('read.json', 'w') as fp: json.dump(read, fp, sort_keys=True, indent=4) nadya.sendMessage(msg.to, "Set reading point:\n" + readTime) if text.lower() == 'อ่าน': tz = pytz.timezone("Asia/Jakarta") timeNow = datetime.now(tz=tz) day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] hr = timeNow.strftime("%A") bln = timeNow.strftime("%m") for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k-1] readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]" if receiver in read['readPoint']: if read["ROM"][receiver].items() == []: nadya.sendMessage(receiver,"[ Reader ]:\nNone") else: chiya = [] for rom in read["ROM"][receiver].items(): chiya.append(rom[1]) cmem = nadya.getContacts(chiya) zx = "" zxc = "" zx2 = [] xpesan = '[ Reader ]:\n' for x in range(len(cmem)): xname = str(cmem[x].displayName) pesan = '' pesan2 = pesan+"@c\n" xlen = str(len(zxc)+len(xpesan)) xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1) zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid} zx2.append(zx) zxc += pesan2 text = xpesan+ zxc + "\n[ Lurking time ]: \n" + readTime try: nadya.sendMessage(receiver, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0) except Exception as error: print (error) pass else: nadya.sendMessage(receiver,"Lurking has not been set.") if msg.contentType == 1: if msg._from in superadmin: if settings["changePictureProfile"] == True: path = nadya.downloadObjectMsg(msg_id) settings["changePictureProfile"] = False nadya.updateProfilePicture(path) nadya.sendMessage(to, "เปลี่ยนโปรไฟล์สำเร็จแล้ว(。-`ω´-)") if msg.contentType == 2: if msg._from in superadmin: if settings["changeGroupPicture"] == True: path = nadya.downloadObjectMsg(msg_id) settings["changeGroupPicture"] == False: nadya.updateGroupPicture(path) nadya.sendMessage(to, "Update group picture Succes") #==============================================================================# if op.type == 55: print ("[ 55 ] NOTIFIED READ MESSAGE") try: if op.param1 in read['readPoint']: if op.param2 in read['readMember'][op.param1]: pass else: read['readMember'][op.param1] += op.param2 read['ROM'][op.param1][op.param2] = op.param2 backupData() else: pass except: pass except Exception as error: logError(error)
def render(self, context): time_string = humanfriendly.format_timespan(self.seconds.resolve(context)) return time_string
sample_pairs.add(pair) outf.write('\t'.join([df.loc[source]['arxiv_id'], df.loc[source]['mag_id'], df.loc[target]['arxiv_id'], df.loc[target]['mag_id']])) outf.write('\n') outf.close() if __name__ == "__main__": total_start = timer() logger = logging.getLogger(__name__) logger.info(" ".join(sys.argv)) logger.info( '{:%Y-%m-%d %H:%M:%S}'.format(datetime.now()) ) import argparse parser = argparse.ArgumentParser(description="get sample pairs by broad category", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("input", help="filename for input (TSV)") parser.add_argument("output", help="filename for output (TSV)") parser.add_argument("--sep", default='\t', help="delimiter for the input file (default: tab)") parser.add_argument("--category-colname", default='category_broad', help="column name for the category") parser.add_argument("--min-papers", type=int, default=500, help="categories with fewer than this many papers will be filtered out") parser.add_argument("--seed", type=int, default=99, help="random seed") parser.add_argument("--sample-size", type=int, default=500, help="number of sample pairs for each pair of categories") parser.add_argument("--debug", action='store_true', help="output debugging info") global args args = parser.parse_args() if args.debug: logger.setLevel(logging.DEBUG) logger.debug('debug mode is on') else: logger.setLevel(logging.INFO) main(args) total_end = timer() logger.info('all finished. total time: {}'.format(format_timespan(total_end-total_start)))
print("Ah! The old verbosaroo") print("~~~~~~~~~~~~~~~~~~~~~~") print("Using arguments:") print(args) print("Now getting work done.") print("~~~~~~~~~~~~~~~~~~~~~~") if args.addtext == "True": # This arg is read as a string, not boolean # First read the file f = open(args.filename) # Open the output file out_filename = args.output_filename if not out_filename.endswith('.txt'): out_filename = out_filename + '.txt' with open(out_filename, 'w') as g: for line in f.readlines(): g.write(line) g.write( '\n') # This could be an argument, wether or not to add a newline. g.write(args.message) f.close() else: print("Nothing shall be done.") end_of_time = timer() print("We are done! Wall time elapsed:", humanfriendly.format_timespan(end_of_time - beginning_of_time))
def smart_update(self, *args, **kw): """ Update the system's package lists (switching mirrors if necessary). :param args: Command line arguments to ``apt-get update`` (zero or more strings). :param max_attempts: The maximum number of attempts at successfully updating the system's package lists (an integer, defaults to 10). :param switch_mirrors: :data:`True` if we're allowed to switch mirrors on 'hash sum mismatch' errors, :data:`False` otherwise. :raises: If updating of the package lists fails 10 consecutive times (`max_attempts`) an exception is raised. While :func:`dumb_update()` simply runs ``apt-get update`` the :func:`smart_update()` function works quite differently: - First the system's package lists are updated using :func:`dumb_update()`. If this is successful we're done. - If the update fails we check the command's output for the phrase 'hash sum mismatch'. If we find this phrase we assume that the current mirror is faulty and switch to another one. - Failing ``apt-get update`` runs are retried up to `max_attempts`. """ backoff_time = 10 max_attempts = kw.get('max_attempts', 10) switch_mirrors = kw.get('switch_mirrors', True) for i in range(1, max_attempts + 1): with CaptureOutput() as session: try: self.dumb_update(*args) return except Exception: if i < max_attempts: output = session.get_text() # Check for EOL releases. This somewhat peculiar way of # checking is meant to ignore 404 responses from # `secondary package mirrors' like PPAs. If the output # of `apt-get update' implies that the release is EOL # we need to verify our assumption. if any(self.current_mirror in line and u'404' in line.split() for line in output.splitlines()): logger.warning("%s may be EOL, checking ..", self.release) if self.release_is_archived: if switch_mirrors: logger.warning( "Switching to old releases mirror because %s is EOL and has been archived ..", self.release, ) self.change_mirror(self.old_releases_url, update=False) continue else: raise Exception( compact( """ Failed to update package lists because it looks like the current release (%s) is end of life but I'm not allowed to switch mirrors! (there's no point in retrying so I'm not going to) """, self.distribution_codename)) # Check for `hash sum mismatch' errors. if switch_mirrors and u'hash sum mismatch' in output.lower( ): logger.warning( "Detected 'hash sum mismatch' failure, switching to other mirror .." ) self.ignore_mirror(self.current_mirror) self.change_mirror(update=False) else: logger.warning( "Retrying after `apt-get update' failed (%i/%i) ..", i, max_attempts) # Deal with unidentified (but hopefully transient) failures by retrying but backing off # to give the environment (network connection, mirror state, etc.) time to stabilize. logger.info( "Sleeping for %s before retrying update ..", format_timespan(backoff_time)) time.sleep(backoff_time) if backoff_time <= 120: backoff_time *= 2 else: backoff_time += backoff_time / 3 raise Exception( "Failed to update package lists %i consecutive times?!" % max_attempts)
async def ping(self, ctx: commands.Context, region_name: str, *, message_for_ping): for region in self.config.vet_ping_unofficials: # Make sure the command was run in an allowed channel by a permitted person if region["command_channel_id"] == ctx.channel.id and region[ "region_name"] == region_name: ping_target_channel: discord.TextChannel ping_target_role: discord.Role ping_target_channel = ctx.guild.get_channel( region["ping_channel_id"]) ping_target_role = ctx.guild.get_role(region["ping_role_id"]) errors = [] # Read region and times from config check_time = datetime.now( pytz.timezone(region["timezone_name"])).time() begin_time = time(region["start_ping_allow_utc"][0], region["start_ping_allow_utc"][1]) end_time = time(region["end_ping_allow_utc"][0], region["end_ping_allow_utc"][1]) # If we're not between time add it as an error with time until it's allowed if not is_time_between(check_time, begin_time, end_time): if begin_time > check_time: time_until_ping = datetime.combine( date.min, begin_time) - datetime.combine( date.min, check_time) time_string = humanfriendly.format_timespan( time_until_ping) errors.append( f"Pings are not allowed right now, you're allowed to ping in {time_string}" ) else: time_until_ping = datetime.combine( date.min, begin_time) + timedelta(days=1) - datetime.combine( date.min, check_time) time_string = humanfriendly.format_timespan( time_until_ping) errors.append( f"Pings are not allowed right now, you're allowed to ping in {time_string}" ) # Check if pings for the current region are on cooldown async for message_for_check in ping_target_channel.history( oldest_first=False): # The message is relevant if it's from Echo and a previous unofficials ping if message_for_check.author == self.bot.user and self.embeds_contain_unofficials_ping( message_for_check.embeds): # The message is on cooldown if it's younger than the cooldown timer if datetime.utcnow( ) - message_for_check.created_at < timedelta( seconds=region["ping_cooldown_seconds"]): cooldown_time = timedelta( seconds=region['ping_cooldown_seconds']) - ( datetime.utcnow() - message_for_check.created_at) cooldown_time_string = humanfriendly.format_timespan( cooldown_time) errors.append( f"Pings for that region are on cooldown, try in {cooldown_time_string}" ) if len(errors) == 0: await ctx.message.add_reaction("👍") await send_ping(ping_target_channel, ping_target_role, ctx.author, message_for_ping) self.log.warning( f"{ctx.author.display_name} pinged unofficials in {region_name} with message {message_for_ping}" ) else: await ctx.message.add_reaction("❌") await ctx.send("Got errors:\n - " + "\n - ".join(errors))
def load_and_run_detector(model_file, image_file_names, output_dir, render_confidence_threshold=TFDetector.DEFAULT_RENDERING_CONFIDENCE_THRESHOLD, crop_images=False): """Load and run detector on target images, and visualize the results.""" if len(image_file_names) == 0: print('Warning: no files available') return start_time = time.time() tf_detector = TFDetector(model_file) elapsed = time.time() - start_time print('Loaded model in {}'.format(humanfriendly.format_timespan(elapsed))) detection_results = [] time_load = [] time_infer = [] # Dictionary mapping output file names to a collision-avoidance count. # # Since we'll be writing a bunch of files to the same folder, we rename # as necessary to avoid collisions. output_filename_collision_counts = {} def input_file_to_detection_file(fn, crop_index=-1): """Creates unique file names for output files. This function does 3 things: 1) If the --crop flag is used, then each input image may produce several output crops. For example, if foo.jpg has 3 detections, then this function should get called 3 times, with crop_index taking on 0, 1, then 2. Each time, this function appends crop_index to the filename, resulting in foo_crop00_detections.jpg foo_crop01_detections.jpg foo_crop02_detections.jpg 2) If the --recursive flag is used, then the same file (base)name may appear multiple times. However, we output into a single flat folder. To avoid filename collisions, we prepend an integer prefix to duplicate filenames: foo_crop00_detections.jpg 0000_foo_crop00_detections.jpg 0001_foo_crop00_detections.jpg 3) Prepends the output directory: out_dir/foo_crop00_detections.jpg Args: fn: str, filename crop_index: int, crop number Returns: output file path """ fn = os.path.basename(fn).lower() name, ext = os.path.splitext(fn) if crop_index >= 0: name += '_crop{:0>2d}'.format(crop_index) fn = '{}{}{}'.format(name, ImagePathUtils.DETECTION_FILENAME_INSERT, '.jpg') if fn in output_filename_collision_counts: n_collisions = output_filename_collision_counts[fn] fn = '{:0>4d}'.format(n_collisions) + '_' + fn output_filename_collision_counts[fn] += 1 else: output_filename_collision_counts[fn] = 0 fn = os.path.join(output_dir, fn) return fn for im_file in tqdm(image_file_names): try: start_time = time.time() image = viz_utils.load_image(im_file) elapsed = time.time() - start_time time_load.append(elapsed) except Exception as e: print('Image {} cannot be loaded. Exception: {}'.format(im_file, e)) result = { 'file': im_file, 'failure': TFDetector.FAILURE_IMAGE_OPEN } detection_results.append(result) continue try: start_time = time.time() result = tf_detector.generate_detections_one_image(image, im_file) detection_results.append(result) elapsed = time.time() - start_time time_infer.append(elapsed) except Exception as e: print('An error occurred while running the detector on image {}. Exception: {}'.format(im_file, e)) continue try: if crop_images: images_cropped = viz_utils.crop_image(result['detections'], image) for i_crop, cropped_image in enumerate(images_cropped): output_full_path = input_file_to_detection_file(im_file, i_crop) cropped_image.save(output_full_path) else: # only draw bouding boxes and save file if max confidence is > 0.80 if result["max_detection_conf"] > 0.80: # image is modified in place viz_utils.render_detection_bounding_boxes(result['detections'], image, label_map=TFDetector.DEFAULT_DETECTOR_LABEL_MAP, confidence_threshold=render_confidence_threshold) output_full_path = input_file_to_detection_file(im_file) image.save(output_full_path) except Exception as e: print('Visualizing results on the image {} failed. Exception: {}'.format(im_file, e)) continue # ...for each image ave_time_load = statistics.mean(time_load) ave_time_infer = statistics.mean(time_infer) if len(time_load) > 1 and len(time_infer) > 1: std_dev_time_load = humanfriendly.format_timespan(statistics.stdev(time_load)) std_dev_time_infer = humanfriendly.format_timespan(statistics.stdev(time_infer)) else: std_dev_time_load = 'not available' std_dev_time_infer = 'not available' print('On average, for each image,') print('- loading took {}, std dev is {}'.format(humanfriendly.format_timespan(ave_time_load), std_dev_time_load)) print('- inference took {}, std dev is {}'.format(humanfriendly.format_timespan(ave_time_infer), std_dev_time_infer))
def stop(self): self.stop_time = time.time() self.elapsed_time = self.stop_time - self.start_time self.elapsed_time_formatted = format_timespan(self.elapsed_time)
def repr_sections(g): for t, a, v in g.sections: print '{0} + {1}t at {2}'.format( a, sum(v), format_timespan(time.time() - t))
def build(): # Create our logger to generate coloured output on stderr logger = Logger(prefix='[{} build] '.format(sys.argv[0])) # Register our supported command-line arguments parser = argparse.ArgumentParser(prog='{} build'.format(sys.argv[0])) BuildConfiguration.addArguments(parser) # If no command-line arguments were supplied, display the help message and exit if len(sys.argv) < 2: parser.print_help() sys.exit(0) # Parse the supplied command-line arguments try: config = BuildConfiguration(parser, sys.argv[1:]) except RuntimeError as e: logger.error('Error: {}'.format(e)) sys.exit(1) # Verify that Docker is installed if DockerUtils.installed() == False: logger.error( 'Error: could not detect Docker version. Please ensure Docker is installed.' ) sys.exit(1) # Verify that we aren't trying to build Windows containers under Windows 10 when in Linux container mode (or vice versa) dockerPlatform = DockerUtils.info()['OSType'].lower() if config.containerPlatform == 'windows' and dockerPlatform == 'linux': logger.error( 'Error: cannot build Windows containers when Docker Desktop is in Linux container', False) logger.error( 'mode. Use the --linux flag if you want to build Linux containers instead.', False) sys.exit(1) elif config.containerPlatform == 'linux' and dockerPlatform == 'windows': logger.error( 'Error: cannot build Linux containers when Docker Desktop is in Windows container', False) logger.error( 'mode. Remove the --linux flag if you want to build Windows containers instead.', False) sys.exit(1) # Create an auto-deleting temporary directory to hold our build context with tempfile.TemporaryDirectory() as tempDir: # Copy our Dockerfiles to the temporary directory contextOrig = join(os.path.dirname(os.path.abspath(__file__)), 'dockerfiles') contextRoot = join(tempDir, 'dockerfiles') shutil.copytree(contextOrig, contextRoot) # Create the builder instance to build the Docker images builder = ImageBuilder(contextRoot, config.containerPlatform, logger, config.rebuild, config.dryRun) # Resolve our main set of tags for the generated images mainTags = [ '{}{}-{}'.format(config.release, config.suffix, config.prereqsTag), config.release + config.suffix ] # Print the command-line invocation that triggered this build, masking any supplied passwords args = [ '*******' if config.args.password is not None and arg == config.args.password else arg for arg in sys.argv ] logger.info('COMMAND-LINE INVOCATION:', False) logger.info(str(args), False) # Print the details of the Unreal Engine version being built logger.info('UNREAL ENGINE VERSION SETTINGS:') logger.info( 'Custom build: {}'.format('Yes' if config.custom == True else 'No'), False) if config.custom == True: logger.info('Custom name: ' + config.release, False) else: logger.info('Release: ' + config.release, False) logger.info('Repository: ' + config.repository, False) logger.info('Branch/tag: ' + config.branch + '\n', False) # Determine if we are using a custom version for ue4cli or conan-ue4cli if config.ue4cliVersion is not None or config.conanUe4cliVersion is not None: logger.info('CUSTOM PACKAGE VERSIONS:', False) logger.info( 'ue4cli: {}'.format( config.ue4cliVersion if config. ue4cliVersion is not None else 'default'), False) logger.info( 'conan-ue4cli: {}\n'.format( config.conanUe4cliVersion if config. conanUe4cliVersion is not None else 'default'), False) # Determine if we are building Windows or Linux containers if config.containerPlatform == 'windows': # Provide the user with feedback so they are aware of the Windows-specific values being used logger.info('WINDOWS CONTAINER SETTINGS', False) logger.info( 'Isolation mode: {}'.format(config.isolation), False) logger.info( 'Base OS image tag: {} (host OS is {})'.format( config.basetag, WindowsUtils.systemStringShort()), False) logger.info( 'Memory limit: {}'.format( 'No limit' if config.memLimit is None else '{:.2f}GB'. format(config.memLimit)), False) logger.info( 'Detected max image size: {:.0f}GB'.format( DockerUtils.maxsize()), False) logger.info( 'Directory to copy DLLs from: {}\n'.format(config.dlldir), False) # Verify that the specified base image tag is not a release that has reached End Of Life (EOL) if WindowsUtils.isEndOfLifeWindowsVersion(config.basetag) == True: logger.error( 'Error: detected EOL base OS image tag: {}'.format( config.basetag), False) logger.error( 'This version of Windows has reached End Of Life (EOL), which means', False) logger.error( 'Microsoft no longer supports or maintains container base images for it.', False) logger.error( 'You will need to use a base image tag for a supported version of Windows.', False) sys.exit(1) # Verify that the host OS is not a release that is blacklisted due to critical bugs if config.ignoreBlacklist == False and WindowsUtils.isBlacklistedWindowsVersion( ) == True: logger.error( 'Error: detected blacklisted host OS version: {}'.format( WindowsUtils.systemStringShort()), False) logger.error('', False) logger.error( 'This version of Windows contains one or more critical bugs that', False) logger.error( 'render it incapable of successfully building UE4 container images.', False) logger.error( 'You will need to use an older or newer version of Windows.', False) logger.error('', False) logger.error('For more information, see:', False) logger.error( 'https://unrealcontainers.com/docs/concepts/windows-containers', False) sys.exit(1) # Verify that the user is not attempting to build images with a newer kernel version than the host OS if WindowsUtils.isNewerBaseTag(config.hostBasetag, config.basetag): logger.error( 'Error: cannot build container images with a newer kernel version than that of the host OS!' ) sys.exit(1) # Check if the user is building a different kernel version to the host OS but is still copying DLLs from System32 differentKernels = WindowsUtils.isInsiderPreview( ) or config.basetag != config.hostBasetag if config.pullPrerequisites == False and differentKernels == True and config.dlldir == config.defaultDllDir: logger.error( 'Error: building images with a different kernel version than the host,', False) logger.error( 'but a custom DLL directory has not specified via the `-dlldir=DIR` arg.', False) logger.error( 'The DLL files will be the incorrect version and the container OS will', False) logger.error( 'refuse to load them, preventing the built Engine from running correctly.', False) sys.exit(1) # Attempt to copy the required DLL files from the host system if we are building the prerequisites image if config.pullPrerequisites == False: for dll in WindowsUtils.requiredHostDlls(config.basetag): shutil.copy2( join(config.dlldir, dll), join(builder.context('ue4-build-prerequisites'), dll)) # Ensure the Docker daemon is configured correctly requiredLimit = WindowsUtils.requiredSizeLimit() if DockerUtils.maxsize() < requiredLimit: logger.error('SETUP REQUIRED:') logger.error( 'The max image size for Windows containers must be set to at least {}GB.' .format(requiredLimit)) logger.error( 'See the Microsoft documentation for configuration instructions:' ) logger.error( 'https://docs.microsoft.com/en-us/visualstudio/install/build-tools-container#step-4-expand-maximum-container-disk-size' ) logger.error( 'Under Windows Server, the command `{} setup` can be used to automatically configure the system.' .format(sys.argv[0])) sys.exit(1) elif config.containerPlatform == 'linux': # Determine if we are building CUDA-enabled container images capabilities = 'CUDA {} + OpenGL'.format( config.cuda) if config.cuda is not None else 'OpenGL' logger.info('LINUX CONTAINER SETTINGS', False) logger.info( 'Building GPU-enabled images compatible with NVIDIA Docker ({} support).\n' .format(capabilities), False) # Report which Engine components are being excluded (if any) logger.info('GENERAL SETTINGS', False) if len(config.excludedComponents) > 0: logger.info('Excluding the following Engine components:', False) for component in config.describeExcludedComponents(): logger.info('- {}'.format(component), False) else: logger.info('Not excluding any Engine components.', False) # Determine if we need to prompt for credentials if config.dryRun == True: # Don't bother prompting the user for any credentials during a dry run logger.info( 'Performing a dry run, `docker build` commands will be printed and not executed.', False) username = '' password = '' elif builder.willBuild('ue4-source', mainTags) == False: # Don't bother prompting the user for any credentials if we're not building the ue4-source image logger.info( 'Not building the ue4-source image, no Git credentials required.', False) username = '' password = '' else: # Retrieve the Git username and password from the user when building the ue4-source image print( '\nRetrieving the Git credentials that will be used to clone the UE4 repo' ) username = _getUsername(config.args) password = _getPassword(config.args) print() # If resource monitoring has been enabled, start the resource monitoring background thread resourceMonitor = ResourceMonitor(logger, config.args.interval) if config.args.monitor == True: resourceMonitor.start() # Start the HTTP credential endpoint as a child process and wait for it to start endpoint = CredentialEndpoint(username, password) endpoint.start() try: # Keep track of our starting time startTime = time.time() # Compute the build options for the UE4 build prerequisites image # (This is the only image that does not use any user-supplied tag suffix, since the tag always reflects any customisations) prereqsArgs = ['--build-arg', 'BASEIMAGE=' + config.baseImage] if config.containerPlatform == 'windows': prereqsArgs = prereqsArgs + [ '--build-arg', 'HOST_VERSION=' + WindowsUtils.getWindowsBuild() ] # Build or pull the UE4 build prerequisites image if config.pullPrerequisites == True: builder.pull('adamrehn/ue4-build-prerequisites:{}'.format( config.prereqsTag)) else: builder.build('adamrehn/ue4-build-prerequisites', [config.prereqsTag], config.platformArgs + prereqsArgs) # Build the UE4 source image prereqConsumerArgs = [ '--build-arg', 'PREREQS_TAG={}'.format(config.prereqsTag) ] ue4SourceArgs = prereqConsumerArgs + [ '--build-arg', 'GIT_REPO={}'.format(config.repository), '--build-arg', 'GIT_BRANCH={}'.format( config.branch), '--build-arg', 'VERBOSE_OUTPUT={}'.format( '1' if config.verbose == True else '0') ] builder.build( 'ue4-source', mainTags, config.platformArgs + ue4SourceArgs + endpoint.args()) # Build the UE4 Engine source build image, unless requested otherwise by the user ue4BuildArgs = prereqConsumerArgs + [ '--build-arg', 'TAG={}'.format(mainTags[1]), '--build-arg', 'NAMESPACE={}'.format(GlobalConfiguration.getTagNamespace()) ] if config.noEngine == False: builder.build('ue4-engine', mainTags, config.platformArgs + ue4BuildArgs) else: logger.info( 'User specified `--no-engine`, skipping ue4-engine image build.' ) # Build the minimal UE4 CI image, unless requested otherwise by the user buildUe4Minimal = config.noMinimal == False if buildUe4Minimal == True: builder.build( 'ue4-minimal', mainTags, config.platformArgs + config.exclusionFlags + ue4BuildArgs) else: logger.info( 'User specified `--no-minimal`, skipping ue4-minimal image build.' ) # Build the full UE4 CI image, unless requested otherwise by the user buildUe4Full = buildUe4Minimal == True and config.noFull == False if buildUe4Full == True: # If custom version strings were specified for ue4cli and/or conan-ue4cli, use them infrastructureFlags = [] if config.ue4cliVersion is not None: infrastructureFlags.extend([ '--build-arg', 'UE4CLI_VERSION={}'.format(config.ue4cliVersion) ]) if config.conanUe4cliVersion is not None: infrastructureFlags.extend([ '--build-arg', 'CONAN_UE4CLI_VERSION={}'.format( config.conanUe4cliVersion) ]) # Build the image builder.build( 'ue4-full', mainTags, config.platformArgs + ue4BuildArgs + infrastructureFlags) else: logger.info( 'Not building ue4-minimal or user specified `--no-full`, skipping ue4-full image build.' ) # Report the total execution time endTime = time.time() logger.action('Total execution time: {}'.format( humanfriendly.format_timespan(endTime - startTime))) # Stop the resource monitoring background thread if it is running resourceMonitor.stop() # Stop the HTTP server endpoint.stop() except (Exception, KeyboardInterrupt) as e: # One of the images failed to build logger.error('Error: {}'.format(e)) resourceMonitor.stop() endpoint.stop() sys.exit(1)
def print_formatted_timespan(value): """Print a human readable timespan.""" print(format_timespan(float(value)))
def lineBot(op): try: if op.type == 0: print("[ 0 ] END OF OPERATION") return if op.type == 5: print("[ 5 ] NOTIFIED AUTOBLOCK") if settings['autoBlock'] == True: nadya.blockContact(op.param1) if op.type == 5: print("[ 5 ] NOTIFIED AUTOADD") if settings["autoAdd"] == True: nadya.sendMessage( op.param1, "สวัสดีครับ {} ขอบคุณที่แอดเข้ามานะครับ(。-`ω´-)".format( str(nadya.getContact(op.param1).displayName))) if op.type == 13: print("[ 13 ] NOTIFIED INVITE GROUP") group = nadya.getGroup(op.param1) if settings["autoJoin"] == True: nadya.acceptGroupInvitation(op.param1) if op.type == 15: if wait["bcommentOn"] and "bcomment" in wait: h = nadya.getContact(op.param2) nadya.sendMessage( op.param1, nadya.getContact(op.param2).displayName + "\n\n" + str(wait["bcomment"])) nadya.sendImageWithUrl( op.param1, "http://dl.profile.line-cdn.net/" + h.pictureStatus) if op.type == 17: if wait["acommentOn"] and "acomment" in wait: cnt = nadya.getContact(op.param2) h = nadya.getContact(op.param2) nadya.sendMessage( op.param1, cnt.displayName + "\n\n" + str(wait["acomment"])) nadya.sendImageWithUrl( op.param1, "http://dl.profile.line-cdn.net/" + h.pictureStatus) if op.type == 24: print("[ 24 ] NOTIFIED LEAVE ROOM") if settings["autoLeave"] == True: nadya.leaveRoom(op.param1) if op.type == 25: print("[ 25 ] SEND MESSAGE") msg = op.message text = msg.text msg_id = msg.id receiver = msg.to sender = msg._from if msg.toType == 0: if sender != nadya.profile.mid: to = sender else: to = receiver else: to = receiver if msg.contentType == 0: if text is None: return #==============================================================================# #==============================================================================# if op.type == 26: print("[ 26 ] RECEIVE MESSAGE") msg = op.message text = msg.text msg_id = msg.id receiver = msg.to sender = msg._from if msg.toType == 0: if sender != nadya.profile.mid: to = sender else: to = receiver else: to = receiver if settings["autoRead"] == True: nadya.sendChatChecked(to, msg_id) if to in read["readPoint"]: if sender not in read["ROM"][to]: read["ROM"][to][sender] = True if sender in settings["mimic"]["target"] and settings["mimic"][ "status"] == True and settings["mimic"]["target"][ sender] == True: text = msg.text if text is not None: nadya.sendMessage(msg.to, text) #==============================================================================# if msg.text in ["คำสั่ง", "Help", "help"]: helpMessage = helpmessage() nadya.sendMessage(to, str(helpMessage)) if text.lower() == 'คำสั่ง2': if msg._from in admin: helpTextToSpeech = helptexttospeech() nadya.sendMessage(to, str(helpTextToSpeech)) #==============================================================================# if ".ชื่อ: " in msg.text.lower(): if msg._from in admin: spl = re.split(".ชื่อ: ", msg.text, flags=re.IGNORECASE) if spl[0] == "": prof = nadya.getProfile() prof.displayName = spl[1] nadya.updateProfile(prof) nadya.sendMessage(to, "เปลี่ยนชื่อสำเร็จแล้ว(。-`ω´-)") if ".ตัส: " in msg.text.lower(): if msg._from in admin: spl = re.split(".ตัส: ", msg.text, flags=re.IGNORECASE) if spl[0] == "": prof = nadya.getProfile() prof.statusMessage = spl[1] nadya.updateProfile(prof) nadya.sendMessage(to, "เปลี่ยนตัสสำเร็จแล้ว(。-`ω´-)") if text.lower() == ".เปลี่ยนรูป": if msg._from in admin: settings["changePictureProfile"] = True nadya.sendMessage(to, "ส่งรูปมา(。-`ω´-)") #==============================================================================# if msg.text.lower().startswith(".เตะ "): if msg._from in admin: targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: nadya.kickoutFromGroup(msg.to, [target]) except: nadya.sendText(msg.to, "Error") if text.lower() == 'เชคค่า': try: ret_ = "╔════════════" if settings["autoJoin"] == True: ret_ += "\n║ ระบบเข้ากลุ่มออโต้ ✔" else: ret_ += "\n║ ระบบเข้ากลุ่มออโต้ ✘" if settings["autoLeave"] == True: ret_ += "\n║ ระบบออกกลุ่มออโต้ ✔" else: ret_ += "\n║ ระบบออกกลุ่มออโต้ ✘" if wait["acommentOn"] == True: ret_ += "\n║ ระบบตอนรับคนเข้ากลุ่ม ✔" else: ret_ += "\n║ ระบบตอนรับคนเข้ากลุ่ม ✘" if wait["bcommentOn"] == True: ret_ += "\n║ ระบบตอนรับคนออกกลุ่ม ✔" else: ret_ += "\n║ ระบบตอนรับคนออกกลุ่ม ✘" ret_ += "\n╚════════════" nadya.sendMessage(to, str(ret_)) except Exception as e: nadya.sendMessage(msg.to, str(e)) if ".ยกเลิก" == msg.text.lower(): if msg._from in admin: if msg.toType == 2: group = nadya.getGroup(msg.to) gMembMids = [ contact.mid for contact in group.invitee ] for _mid in gMembMids: nadya.cancelGroupInvitation(msg.to, [_mid]) nadya.sendMessage( to, "ยกเลิกค้างเชิญเสร็จสิ้น(。-`ω´-)") if "google " in msg.text.lower(): spl = re.split("google ", msg.text, flags=re.IGNORECASE) if spl[0] == "": if spl[1] != "": try: if msg.toType != 0: nadya.sendMessage( msg.to, "กำลังรับข้อมูล กรุณารอสักครู่..") else: nadya.sendMessage( msg.from_, "กำลังรับข้อมูล กรุณารอสักครู่..") resp = BeautifulSoup( requests.get( "https://www.google.co.th/search", params={ "q": spl[1], "gl": "th" }).content, "html.parser") text = "ผลการค้นหาจาก Google:\n\n" for el in resp.findAll("h3", attrs={"class": "r"}): try: tmp = el.a["class"] continue except: pass try: if el.a["href"].startswith( "/search?q="): continue except: continue text += el.a.text + "\n" text += str(el.a["href"][7:]).split( "&sa=U")[0] + "\n\n" text = text[:-2] if msg.toType != 0: nadya.sendMessage(msg.to, str(text)) else: nadya.sendMessage(msg.from_, str(text)) except Exception as e: print(e) if text.lower() == '.ลบรัน': if msg._from in admin: gid = nadya.getGroupIdsInvited() start = time.time() for i in gid: nadya.rejectGroupInvitation(i) elapsed_time = time.time() - start nadya.sendMessage(to, "「 Starting 」") nadya.sendMessage( to, "• TakeTime: %ssecond" % (elapsed_time)) if ".โทร" in msg.text.lower(): if msg._from in admin: if msg.toType == 2: sep = text.split(" ") strnum = text.replace(sep[0] + " ", "") num = int(strnum) nadya.sendMessage(to, "「 Callspam 」\n• Success ") for var in range(0, num): group = nadya.getGroup(to) members = [mem.mid for mem in group.members] nadya.acquireGroupCallRoute(to) nadya.inviteIntoGroupCall(to, contactIds=members) if text.lower() == '.ออก': if msg._from in admin: if msg.toType == 2: ginfo = nadya.getGroup(to) try: nadya.sendMessage(to, "「 Leave」\n• Success ") nadya.leaveGroup(to) except: pass if "ทีมงาน" == msg.text.lower(): msg.contentType = 13 nadya.sendMessage( to, "✍️ ᴛ⃢ᴇ⃢ᴀ⃢ᴍ⃢ 🔝ͲᎻᎬᖴ͙͛Ꮮ͙͛ᗩ͙͛ᔑ͙͛Ꮋ͙ ̾⚡") nadya.sendContact(to, "ue9781edcd4eecd9abfd6e50fc3ea95b1") nadya.sendContact(to, "uad73a8119f2accea9e8f39e39291ac9a") if msg.text in [ "Speed", "speed", "Sp", "sp", ".Sp", ".sp", ".Speed", ".speed", "\Sp", "\sp", "\speed", "\Speed", "สปีด" ]: start = time.time() nadya.sendMessage(to, "「 Speed Test」") elapsed_time = time.time() - start nadya.sendMessage( msg.to, "[ %s Seconds ]\n[ " % (elapsed_time) + str(int(round( (time.time() - start) * 1000))) + " ms ]") if msg.text in ["ออน", ".ออน", "\ออน", ".uptime", ".Uptime"]: timeNow = time.time() runtime = timeNow - botStart runtime = format_timespan(runtime) nadya.sendMessage(to, "「 Online 」\n•{}".format(str(runtime))) if msg.text in ["คท", ".คท", "Me", "me", ".Me", ".me"]: nadya.sendMentionFooter( to, '「 MY SELF 」\n•', sender, "https://line.me/ti/p/~hietocih", "http://dl.profile.line-cdn.net/" + nadya.getContact(sender).pictureStatus, nadya.getContact(sender).displayName) nadya.sendMessage( to, nadya.getContact(sender).displayName, contentMetadata={ 'previewUrl': 'http://dl.profile.line-cdn.net/' + nadya.getContact(sender).pictureStatus, 'i-installUrl': 'https://line.me/ti/p/~hietocih', 'type': 'mt', 'subText': "BOT THE FLASH", 'a-installUrl': 'https://line.me/ti/p/~hietocih', 'a-installUrl': ' https://line.me/ti/p/~hietocih', 'a-packageName': 'com.spotify.music', 'countryCode': 'ID', 'a-linkUri': 'https://line.me/ti/p/~hietocih', 'i-linkUri': 'https://line.me/ti/p/~hietocih', 'id': 'mt000000000a6b79f9', 'text': 'THEFLASH', 'linkUri': 'https://line.me/ti/p/~hietocih' }, contentType=19) if msg.text in ["ไอดี", "Mid", "mid", "MID", "มิด"]: nadya.sendMessage(msg.to, "「 MY ID 」\n•" + sender) if msg.text in ["ชื่อ", "เนม"]: me = nadya.getGroup(sender) nadya.sendMessage(msg.to, "「 MY NAME 」\n•" + me.displayName) if msg.text in ["ตัส", "สถานะ"]: me = nadya.getGroup(sender) nadya.sendMessage(msg.to, "「 MY STATUS 」\n•" + me.statusMessage) if msg.text in ["รูป", "ดิส", "โปรไฟล์"]: me = nadya.getGroup(sender) nadya.sendImageWithURL( msg.to, "http://dl.profile.line-cdn.net/" + me.pictureStatus) if msg.text in [ "Tag", "tagall", "แทค", "แทก", "Tagall", "tag" ]: group = nadya.getGroup(msg.to) nama = [contact.mid for contact in group.members] k = len(nama) // 20 for a in range(k + 1): txt = u'' s = 0 b = [] for i in group.members[a * 20:(a + 1) * 20]: b.append({ "S": str(s), "E": str(s + 6), "M": i.mid }) s += 7 txt += u'@Alin \n' nadya.sendMessage(to, text=txt, contentMetadata={ u'MENTION': json.dumps({'MENTIONEES': b}) }, contentType=0) nadya.sendMessage( to, "จำนวนสมาชิก {} คน(。-`ω´-)".format(str(len(nama)))) #==============================================================================# if msg.text.lower().startswith("ข้อมูล "): if 'MENTION' in msg.contentMetadata.keys() != None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval( msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: contact = nadya.getContact(ls) mi_d = contact.mid nadya.sendContact(msg.to, mi_d) ret_ = "「 MY ID 」 " for ls in lists: ret_ += "\n•" + ls nadya.sendMessage(msg.to, str(ret_)) nadya.sendMessage( msg.to, "「 MY NAME 」\n•" + contact.displayName) nadya.sendMessage( msg.to, "「 MY STATUS 」\n•" + contact.statusMessage) path = "http://dl.profile.line-cdn.net/" + nadya.getContact( ls).pictureStatus nadya.sendImageWithURL(msg.to, str(path)) path = "http://dl.profile.line-cdn.net/" + nadya.getProfileCoverURL( ls) nadya.sendImageWithURL(msg.to, str(path)) #==============================================================================# if msg.text.lower().startswith("คท "): if 'MENTION' in msg.contentMetadata.keys() != None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval( msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: contact = nadya.getContact(ls) mi_d = contact.mid nadya.sendContact(msg.to, mi_d) if msg.text.lower().startswith("มิด "): if 'MENTION' in msg.contentMetadata.keys() != None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval( msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) ret_ = "" for ls in lists: ret_ += ls nadya.sendMessage(msg.to, str(ret_)) if msg.text.lower().startswith("ชื่อ "): if 'MENTION' in msg.contentMetadata.keys() != None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval( msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: contact = nadya.getContact(ls) nadya.sendMessage(msg.to, contact.displayName) if msg.text.lower().startswith("ตัส "): if 'MENTION' in msg.contentMetadata.keys() != None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval( msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: contact = nadya.getContact(ls) nadya.sendMessage(msg.to, contact.statusMessage) if msg.text.lower().startswith("ดิส "): if 'MENTION' in msg.contentMetadata.keys() != None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval( msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: path = "http://dl.profile.line-cdn.net/" + nadya.getContact( ls).pictureStatus nadya.sendImageWithURL(msg.to, str(path)) if msg.text.lower().startswith("ดิสวีดีโอ "): if 'MENTION' in msg.contentMetadata.keys() != None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval( msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: path = "http://dl.profile.line-cdn.net/" + nadya.getContact( ls).pictureStatus + "/vp" nadya.sendImageWithURL(msg.to, str(path)) if msg.text.lower().startswith("ดิสปก "): if line != None: if 'MENTION' in msg.contentMetadata.keys() != None: names = re.findall(r'@(\w+)', text) mention = ast.literal_eval( msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] lists = [] for mention in mentionees: if mention["M"] not in lists: lists.append(mention["M"]) for ls in lists: path = "http://dl.profile.line-cdn.net/" + nadya.getProfileCoverURL( ls) nadya.sendImageWithURL(msg.to, str(path)) if text.lower() == '!แทค': gs = nadya.getGroup(to) targets = [] for g in gs.members: if g.displayName in "": targets.append(g.mid) if targets == []: nadya.sendMessage(to, "ไม่มีคนใส่ชื่อร่องหน(。-`ω´-)") else: mc = "" for target in targets: mc += sendMessageWithMention(to, target) + "\n" nadya.sendMessage(to, mc) if text.lower() == '!มิด': gs = nadya.getGroup(to) lists = [] for g in gs.members: if g.displayName in "": lists.append(g.mid) if lists == []: nadya.sendMessage(to, "ไม่มีคนใส่ชื่อร่องหน(。-`ω´-)") else: mc = "" for mi_d in lists: mc += "->" + mi_d + "\n" nadya.sendMessage(to, mc) if text.lower() == '!คท': gs = nadya.getGroup(to) lists = [] for g in gs.members: if g.displayName in "": lists.append(g.mid) if lists == []: nadya.sendMessage(to, "ไม่มีคนใส่ชื่อร่องหน(。-`ω´-)") else: for ls in lists: contact = nadya.getContact(ls) mi_d = contact.mid nadya.sendContact(to, mi_d) if text.lower() == 'ขอลิ้งกลุ่ม': if msg.toType == 2: group = nadya.getGroup(to) if group.preventedJoinByTicket == False: ticket = nadya.reissueGroupTicket(to) nadya.sendMessage( to, "https://line.me/R/ti/g/{}".format( str(ticket))) else: nadya.sendMessage( to, "กรุณาเปิดลิ้งกลุ่มก่อนลงคำสั่งนี้ด้วยครับ(。-`ω´-)" .format(str(settings["keyCommand"]))) if text.lower() == 'ลิ้งกลุ่ม on': if msg.toType == 2: group = nadya.getGroup(to) if group.preventedJoinByTicket == False: nadya.sendMessage(to, "ลิ้งกลุ่มเปิดอยู่แล้ว(。-`ω´-)") else: group.preventedJoinByTicket = False nadya.updateGroup(group) nadya.sendMessage( to, "เปิดลิ้งกลุ่มเรียบร้อย(。-`ω´-)") if text.lower() == 'ลิ้งกลุ่ม off': if msg.toType == 2: group = nadya.getGroup(to) if group.preventedJoinByTicket == True: nadya.sendMessage(to, "ลิ้งกลุ่มปิดอยู่แล้ว(。-`ω´-)") else: group.preventedJoinByTicket = True nadya.updateGroup(group) nadya.sendMessage(to, "ลิ้งกลุ่มปิดเรียบร้อย(。-`ω´-)") #==============================================================================# if text.lower() == 'เชคแอด': group = nadya.getGroup(to) GS = group.creator.mid nadya.sendContact(to, GS) if text.lower() == 'ไอดีกลุ่ม': gid = nadya.getGroup(to) nadya.sendMessage(to, "\n" + gid.id) if text.lower() == 'รูปกลุ่ม': group = nadya.getGroup(to) path = "http://dl.profile.line-cdn.net/" + group.pictureStatus nadya.sendImageWithURL(to, path) if text.lower() == 'ชื่อกลุ่ม': gid = nadya.getGroup(to) nadya.sendMessage(to, "\n" + gid.name) if text.lower() == 'รายชื่อสมาชิก': if msg.toType == 2: group = nadya.getGroup(to) ret_ = "╔══[ รายชื่อสมชิกกลุ่ม ]" no = 0 + 1 for mem in group.members: ret_ += "\n╠ {}. {}".format( str(no), str(mem.displayName)) no += 1 ret_ += "\n╚══[ จำนวนสมาชิก {} คน(。-`ω´-) ]".format( str(len(group.members))) nadya.sendMessage(to, str(ret_)) if text.lower() == '.รายชื่อกลุ่ม': if msg._from in admin: groups = nadya.groups ret_ = "╔══[ รายชื่อกลุ่ม ]" no = 0 + 1 for gid in groups: group = nadya.getGroup(gid) ret_ += "\n╠ {}. {} | {}".format( str(no), str(group.name), str(len(group.members))) no += 1 ret_ += "\n╚══[ จำนวนกลุ่ม {} กลุ่ม(。-`ω´-)]".format( str(len(groups))) nadya.sendMessage(to, str(ret_)) #=================THEFLASH====================================================# if text.lower() == '.ออโต้บล็อค on': if msg._from in admin: settings["autoBlock"] = True nadya.sendMessage(to, "เปิดระบบออโต้บล็อค(。-`ω´-)") if text.lower() == '.ออโต้บล็อค off': if msg._from in admin: settings["autoBlock"] = False nadya.sendMessage(to, "ปิดระบบออโต้บล็อค(。-`ω´-)") if text.lower() == '.แอดออโต้ on': if msg._from in admin: settings["autoAdd"] = True nadya.sendMessage(to, "เปิดระบบแอดออโต้(。-`ω´-)") if text.lower() == '.แอดออโต้ off': if msg._from in admin: settings["autoAdd"] = False nadya.sendMessage(to, "ปิดระบบแอดออโต้(。-`ω´-)") if text.lower() == '.เข้ากลุ่ม on': if msg._from in admin: settings["autoJoin"] = True nadya.sendMessage(to, "เปิดระบบเข้ากลุ่มออโต้(。-`ω´-)") if text.lower() == '.เข้ากลุ่ม off': if msg._from in admin: settings["autoJoin"] = False nadya.sendMessage(to, "ปิดระบบเข้ากลุ่มออโต้(。-`ω´-)") if text.lower() == '.ออกกลุ่ม on': if msg._from in admin: settings["autoLeave"] = True nadya.sendMessage(to, "เปิดระบบออกกลุ่มออโต้(。-`ω´-)") if text.lower() == '.ออกกลุ่ม off': if msg._from in admin: settings["autoLeave"] = False nadya.sendMessage(to, "ปิดระบบออกกลุ่มออโต้(。-`ω´-)") if msg.text.lower() == '.ตอนรับเข้า on': if msg._from in admin: wait['acommentOn'] = True nadya.sendMessage( msg.to, "เปิดระบบตอนรับสมาชิกเข้ากลุ่ม(。-`ω´-)") if msg.text.lower() == '.ตอนรับเข้า off': if msg._from in admin: wait['acommentOn'] = False nadya.sendMessage( msg.to, "ปิดระบบตอนรับสมาชิกเข้ากลุ่ม(。-`ω´-)") if msg.text.lower() == '.ตอนรับออก on': if msg._from in admin: wait["bcommentOn"] = True nadya.sendMessage( msg.to, "เปิดระบบตอนรับสมาชิกออกกลุ่ม(。-`ω´-)") if msg.text.lower() == '.ตอนรับออก off': if msg._from in admin: wait['bcommentOn'] = False nadya.sendMessage( msg.to, "ปิดระบบตอนรับสมาชิกออกกลุ่ม(。-`ω´-)") if ".ตั้งเข้า:" in msg.text.lower(): if msg._from in admin: c = msg.text.replace(".ตั้งเข้า:", "") if c in ["", " ", "\n", None]: nadya.sendMessage(msg.to, "เกิดข้อผิดพลาด(。-`ω´-)") else: wait["acomment"] = c nadya.sendMessage( msg.to, "ตั้งค่าข้อความตอนรับเสร็จสิ้น(。-`ω´-)") if ".ตั้งออก:" in msg.text.lower(): if msg._from in admin: c = msg.text.replace(".ตั้งออก:", "") if c in ["", " ", "\n", None]: nadya.sendMessage(msg.to, "เกิดข้อผิดพลาด(。-`ω´-)") else: wait["bcomment"] = c nadya.sendMessage( msg.to, "ตั้งค่าข้อความตอนรับออกเสร็จสิ้น(。-`ω´-)") if msg.text in [".เชคเข้า"]: if msg._from in admin: nadya.sendMessage( msg.to, "เช็คข้อความตอนรับล่าสุด(。-`ω´-)" + "\n\n➤" + str(wait["acomment"])) if msg.text in [".เชคออก"]: if msg._from in admin: nadya.sendMessage( msg.to, "เช็คข้อความตอนรับออกล่าสุด(。-`ω´-)" + "\n\n➤" + str(wait["bcomment"])) #=================THEFLASH====================================================# #==============================================================================# if msg.text.lower().startswith("พูด "): sep = text.split(" ") say = text.replace(sep[0] + " ", "") lang = 'th' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") nadya.sendAudio(msg.to, "hasil.mp3") #==============================================================================# if text.lower() == 'เปิดอ่าน': tz = pytz.timezone("Asia/Jakarta") timeNow = datetime.now(tz=tz) day = [ "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" ] hari = [ "Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu" ] bulan = [ "Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember" ] hr = timeNow.strftime("%A") bln = timeNow.strftime("%m") for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k - 1] readTime = hasil + ", " + timeNow.strftime( '%d') + " - " + bln + " - " + timeNow.strftime( '%Y') + "\nJam : [ " + timeNow.strftime( '%H:%M:%S') + " ]" if msg.to in read['readPoint']: try: del read['readPoint'][msg.to] del read['readMember'][msg.to] del read['readTime'][msg.to] except: pass read['readPoint'][msg.to] = msg.id read['readMember'][msg.to] = "" read['readTime'][msg.to] = datetime.now().strftime( '%H:%M:%S') read['ROM'][msg.to] = {} with open('read.json', 'w') as fp: json.dump(read, fp, sort_keys=True, indent=4) nadya.sendMessage(msg.to, "เปิดหาคนซุ่ม(。-`ω´-)") else: try: del read['readPoint'][msg.to] del read['readMember'][msg.to] del read['readTime'][msg.to] except: pass read['readPoint'][msg.to] = msg.id read['readMember'][msg.to] = "" read['readTime'][msg.to] = datetime.now().strftime( '%H:%M:%S') read['ROM'][msg.to] = {} with open('read.json', 'w') as fp: json.dump(read, fp, sort_keys=True, indent=4) nadya.sendMessage( msg.to, "Set reading point:\n" + readTime) if text.lower() == 'อ่าน': tz = pytz.timezone("Asia/Jakarta") timeNow = datetime.now(tz=tz) day = [ "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" ] hari = [ "Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu" ] bulan = [ "Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember" ] hr = timeNow.strftime("%A") bln = timeNow.strftime("%m") for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k - 1] readTime = hasil + ", " + timeNow.strftime( '%d') + " - " + bln + " - " + timeNow.strftime( '%Y') + "\nJam : [ " + timeNow.strftime( '%H:%M:%S') + " ]" if receiver in read['readPoint']: if read["ROM"][receiver].items() == []: nadya.sendMessage(receiver, "[ Reader ]:\nNone") else: chiya = [] for rom in read["ROM"][receiver].items(): chiya.append(rom[1]) cmem = nadya.getContacts(chiya) zx = "" zxc = "" zx2 = [] xpesan = '[ Reader ]:\n' for x in range(len(cmem)): xname = str(cmem[x].displayName) pesan = '' pesan2 = pesan + "@c\n" xlen = str(len(zxc) + len(xpesan)) xlen2 = str( len(zxc) + len(pesan2) + len(xpesan) - 1) zx = {'S': xlen, 'E': xlen2, 'M': cmem[x].mid} zx2.append(zx) zxc += pesan2 text = xpesan + zxc + "\n[ Lurking time ]: \n" + readTime try: nadya.sendMessage( receiver, text, contentMetadata={ 'MENTION': str('{"MENTIONEES":' + json.dumps(zx2).replace(' ', '') + '}') }, contentType=0) except Exception as error: print(error) pass else: nadya.sendMessage( receiver, "ก่อนจะพิมคำสั่งนี้กรุณา (เปิดอ่าน) ก่อนลงคำสั่งนี้(。-`ω´-)" ) if msg.contentType == 1: if settings["changePictureProfile"] == True: path = nadya.downloadObjectMsg(msg_id) settings["changePictureProfile"] = False nadya.updateProfilePicture(path) nadya.sendMessage(to, "เปลี่ยนโปรไฟล์สำเร็จแล้ว(。-`ω´-)") #==============================================================================# if op.type == 55: print("[ 55 ] NOTIFIED READ MESSAGE") try: if op.param1 in read['readPoint']: if op.param2 in read['readMember'][op.param1]: pass else: read['readMember'][op.param1] += op.param2 read['ROM'][op.param1][op.param2] = op.param2 backupData() else: pass except: pass except Exception as error: logError(error)
def _get_papers_and_save_from_id_list(id_list, outdir='./', num_seed_papers=50, random_state=999): reference_papers = get_papers(id_list) seed_papers, target_papers = train_test_split(reference_papers, train_size=num_seed_papers, random_state=random_state) outfname = os.path.join(outdir, "seed_papers.pickle") logger.debug("saving {} papers to {}".format(len(seed_papers), outfname)) pickle_dataframe_from_papers(seed_papers, outfname) # target_papers = set(target_papers) target_paper_ids = set([p['Paper_ID'] for p in target_papers]) outfname = os.path.join(outdir, "target_papers.pickle") logger.debug("saving {} papers to {}".format(len(target_papers), outfname)) pickle_dataframe_from_papers(target_papers, outfname) logger.debug( "getting citing and cited papers for a sample of {} papers...".format( len(seed_papers))) start = timer() test_papers = [] test_paper_ids = set() c = Counter() cur_papers = seed_papers test_papers, test_paper_ids, c = collect_citing_and_cited( cur_papers, test_papers, test_paper_ids, c) logger.debug( "done collecting papers. len(test_papers)=={}. took {}".format( len(test_papers), format_timespan(timer() - start))) outfname = os.path.join(outdir, "counter_checkpoint.pickle") logger.debug("saving counter to {}".format(outfname)) with open(outfname, "wb") as outf: pickle.dump(c, outf) start = timer() outfname = os.path.join(outdir, "test_papers_checkpoint.pickle") logger.debug("saving {} papers to {}".format(len(test_papers), outfname)) pickle_dataframe_from_papers(test_papers, outfname) logger.debug("done saving. took {}".format(format_timespan(timer() - start))) start = timer() logger.debug("getting citing and cited papers for {} papers...".format( len(test_papers))) cur_papers = list(test_papers) test_papers, test_paper_ids, c = collect_citing_and_cited( cur_papers, test_papers, test_paper_ids, c) logger.debug( "done collecting papers. len(test_papers)=={}. took {}".format( len(test_papers), format_timespan(timer() - start))) outfname = os.path.join(outdir, "counter.pickle") logger.debug("saving counter to {}".format(outfname)) with open(outfname, "wb") as outf: pickle.dump(c, outf) start = timer() outfname = os.path.join(outdir, "test_papers.pickle") logger.debug("saving {} papers to {}".format(len(test_papers), outfname)) pickle_dataframe_from_papers(test_papers, outfname) logger.debug("done saving. took {}".format(format_timespan(timer() - start)))
def run( cls, model: AbsESPnetModel, optimizers: Sequence[torch.optim.Optimizer], schedulers: Sequence[Optional[AbsScheduler]], train_iter_factory: AbsIterFactory, valid_iter_factory: AbsIterFactory, plot_attention_iter_factory: Optional[AbsIterFactory], trainer_options, distributed_option: DistributedOption, ) -> None: """Perform training. This method performs the main process of training.""" assert check_argument_types() # NOTE(kamo): Don't check the type more strictly as far trainer_options assert is_dataclass(trainer_options), type(trainer_options) if isinstance(trainer_options.keep_nbest_models, int): keep_nbest_models = trainer_options.keep_nbest_models else: if len(trainer_options.keep_nbest_models) == 0: logging.warning("No keep_nbest_models is given. Change to [1]") trainer_options.keep_nbest_models = [1] keep_nbest_models = max(trainer_options.keep_nbest_models) output_dir = Path(trainer_options.output_dir) reporter = Reporter() if trainer_options.use_amp: if LooseVersion(torch.__version__) < LooseVersion("1.6.0"): raise RuntimeError( "Require torch>=1.6.0 for Automatic Mixed Precision") if fairscale is None: raise RuntimeError( "Requiring fairscale. Do 'pip install fairscale'") if trainer_options.sharded_ddp: scaler = fairscale.optim.grad_scaler.ShardedGradScaler() else: scaler = GradScaler() else: scaler = None if trainer_options.resume and (output_dir / "checkpoint.pth").exists(): cls.resume( checkpoint=output_dir / "checkpoint.pth", model=model, optimizers=optimizers, schedulers=schedulers, reporter=reporter, scaler=scaler, ngpu=trainer_options.ngpu, ) start_epoch = reporter.get_epoch() + 1 if start_epoch == trainer_options.max_epoch + 1: logging.warning( f"The training has already reached at max_epoch: {start_epoch}" ) if distributed_option.distributed: if trainer_options.sharded_ddp: dp_model = fairscale.nn.data_parallel.ShardedDataParallel( module=model, sharded_optimizer=optimizers, ) else: dp_model = torch.nn.parallel.DistributedDataParallel( model, device_ids=( # Perform multi-Process with multi-GPUs [torch.cuda.current_device()] if distributed_option.ngpu == 1 # Perform single-Process with multi-GPUs else None), output_device=(torch.cuda.current_device() if distributed_option.ngpu == 1 else None), find_unused_parameters=trainer_options.unused_parameters, ) elif distributed_option.ngpu > 1: dp_model = torch.nn.parallel.DataParallel( model, device_ids=list(range(distributed_option.ngpu)), find_unused_parameters=trainer_options.unused_parameters, ) else: # NOTE(kamo): DataParallel also should work with ngpu=1, # but for debuggability it's better to keep this block. dp_model = model if trainer_options.use_tensorboard and ( not distributed_option.distributed or distributed_option.dist_rank == 0): summary_writer = SummaryWriter(str(output_dir / "tensorboard")) else: summary_writer = None start_time = time.perf_counter() for iepoch in range(start_epoch, trainer_options.max_epoch + 1): if iepoch != start_epoch: logging.info( "{}/{}epoch started. Estimated time to finish: {}".format( iepoch, trainer_options.max_epoch, humanfriendly.format_timespan( (time.perf_counter() - start_time) / (iepoch - start_epoch) * (trainer_options.max_epoch - iepoch + 1)), )) else: logging.info( f"{iepoch}/{trainer_options.max_epoch}epoch started") set_all_random_seed(trainer_options.seed + iepoch) reporter.set_epoch(iepoch) # 1. Train and validation for one-epoch with reporter.observe("train") as sub_reporter: all_steps_are_invalid = cls.train_one_epoch( model=dp_model, optimizers=optimizers, schedulers=schedulers, iterator=train_iter_factory.build_iter(iepoch), reporter=sub_reporter, scaler=scaler, summary_writer=summary_writer, options=trainer_options, ) with reporter.observe("valid") as sub_reporter: cls.validate_one_epoch( model=dp_model, iterator=valid_iter_factory.build_iter(iepoch), reporter=sub_reporter, options=trainer_options, ) if not distributed_option.distributed or distributed_option.dist_rank == 0: # att_plot doesn't support distributed if plot_attention_iter_factory is not None: with reporter.observe("att_plot") as sub_reporter: cls.plot_attention( model=model, output_dir=output_dir / "att_ws", summary_writer=summary_writer, iterator=plot_attention_iter_factory.build_iter( iepoch), reporter=sub_reporter, options=trainer_options, ) # 2. LR Scheduler step for scheduler in schedulers: if isinstance(scheduler, AbsValEpochStepScheduler): scheduler.step( reporter.get_value( *trainer_options.val_scheduler_criterion)) elif isinstance(scheduler, AbsEpochStepScheduler): scheduler.step() if trainer_options.sharded_ddp: for optimizer in optimizers: if isinstance(optimizer, fairscale.optim.oss.OSS): optimizer.consolidate_state_dict() if not distributed_option.distributed or distributed_option.dist_rank == 0: # 3. Report the results logging.info(reporter.log_message()) reporter.matplotlib_plot(output_dir / "images") if summary_writer is not None: reporter.tensorboard_add_scalar(summary_writer) if trainer_options.use_wandb: reporter.wandb_log() # 4. Save/Update the checkpoint torch.save( { "model": model.state_dict(), "reporter": reporter.state_dict(), "optimizers": [o.state_dict() for o in optimizers], "schedulers": [ s.state_dict() if s is not None else None for s in schedulers ], "scaler": scaler.state_dict() if scaler is not None else None, }, output_dir / "checkpoint.pth", ) # 5. Save the model and update the link to the best model torch.save(model.state_dict(), output_dir / f"{iepoch}epoch.pth") # Creates a sym link latest.pth -> {iepoch}epoch.pth p = output_dir / "latest.pth" if p.is_symlink() or p.exists(): p.unlink() p.symlink_to(f"{iepoch}epoch.pth") _improved = [] for _phase, k, _mode in trainer_options.best_model_criterion: # e.g. _phase, k, _mode = "train", "loss", "min" if reporter.has(_phase, k): best_epoch = reporter.get_best_epoch(_phase, k, _mode) # Creates sym links if it's the best result if best_epoch == iepoch: p = output_dir / f"{_phase}.{k}.best.pth" if p.is_symlink() or p.exists(): p.unlink() p.symlink_to(f"{iepoch}epoch.pth") _improved.append(f"{_phase}.{k}") if len(_improved) == 0: logging.info("There are no improvements in this epoch") else: logging.info("The best model has been updated: " + ", ".join(_improved)) # 6. Remove the model files excluding n-best epoch and latest epoch _removed = [] # Get the union set of the n-best among multiple criterion nbests = set().union(*[ set(reporter.sort_epochs(ph, k, m)[:keep_nbest_models]) for ph, k, m in trainer_options.best_model_criterion if reporter.has(ph, k) ]) for e in range(1, iepoch): p = output_dir / f"{e}epoch.pth" if p.exists() and e not in nbests: p.unlink() _removed.append(str(p)) if len(_removed) != 0: logging.info("The model files were removed: " + ", ".join(_removed)) # 7. If any updating haven't happened, stops the training if all_steps_are_invalid: logging.warning( f"The gradients at all steps are invalid in this epoch. " f"Something seems wrong. This training was stopped at {iepoch}epoch" ) break # 8. Check early stopping if trainer_options.patience is not None: if reporter.check_early_stopping( trainer_options.patience, *trainer_options.early_stopping_criterion): break else: logging.info( f"The training was finished at {trainer_options.max_epoch} epochs " ) if not distributed_option.distributed or distributed_option.dist_rank == 0: # Generated n-best averaged model average_nbest_models( reporter=reporter, output_dir=output_dir, best_model_criterion=trainer_options.best_model_criterion, nbest=keep_nbest_models, )
def __init__(self, name=None, description=None, secret_key_file=None, public_key_file=None, key_id=None): """ Initialize a GPG key object in one of several ways: 1. If `key_id` is specified then the GPG key must have been created previously. If `secret_key_file` and `public_key_file` are not specified they default to ``~/.gnupg/secring.gpg`` and ``~/.gnupg/pubring.gpg``. In this case `key_id` is the only required argument. The following example assumes that the provided GPG key ID is defined in the default keyring of the current user: >>> from deb_pkg_tools.gpg import GPGKey >>> key = GPGKey(key_id='58B6B02B') >>> key.gpg_command 'gpg --no-default-keyring --secret-keyring /home/peter/.gnupg/secring.gpg --keyring /home/peter/.gnupg/pubring.gpg --recipient 58B6B02B' 2. If `secret_key_file` and `public_key_file` are specified but the files don't exist yet, a GPG key will be generated for you. In this case `name` and `description` are required arguments and `key_id` must be ``None`` (the default). An example: >>> name = 'deb-pkg-tools' >>> description = 'Automatic signing key for deb-pkg-tools' >>> secret_key_file = '/home/peter/.deb-pkg-tools/automatic-signing-key.sec' >>> public_key_file = '/home/peter/.deb-pkg-tools/automatic-signing-key.pub' >>> key = GPGKey(name, description, secret_key_file, public_key_file) >>> key.gpg_command 'gpg --no-default-keyring --secret-keyring /home/peter/.deb-pkg-tools/automatic-signing-key.sec --keyring /home/peter/.deb-pkg-tools/automatic-signing-key.pub' :param name: The name of the GPG key pair (a string). Used only when the key pair is generated because it doesn't exist yet. :param description: The description of the GPG key pair (a string). Used only when the key pair is generated because it doesn't exist yet. :param secret_key_file: The absolute pathname of the secret key file (a string). Defaults to ``~/.gnupg/secring.gpg``. :param public_key_file: The absolute pathname of the public key file (a string). Defaults to ``~/.gnupg/pubring.gpg``. :param key_id: The key ID of an existing key pair to use (a string). If this argument is provided then the key pair's secret and public key files must already exist. """ # If the secret or public key file is provided, the other key file must # be provided as well. if secret_key_file and not public_key_file: raise Exception("You provided a GPG secret key file without a public key file; please provide both!") elif public_key_file and not secret_key_file: raise Exception("You provided a GPG public key file without a secret key file; please provide both!") # If neither of the key files is provided we'll default to the # locations that GnuPG uses by default. if not secret_key_file and not public_key_file: gnupg_directory = os.path.join(find_home_directory(), '.gnupg') secret_key_file = os.path.join(gnupg_directory, 'secring.gpg') public_key_file = os.path.join(gnupg_directory, 'pubring.gpg') # If a key ID was specified then the two key files must already exist; # we won't generate them because that makes no sense :-) if key_id and not os.path.isfile(secret_key_file): text = "The provided GPG secret key file (%s) doesn't exist but a key ID was specified!" raise Exception(text % secret_key_file) if key_id and not os.path.isfile(public_key_file): text = "The provided GPG public key file (%s) doesn't exist but a key ID was specified!" raise Exception(text % public_key_file) # If we're going to generate a GPG key for the caller we don't want to # overwrite a secret or public key file without its counterpart. We'll # also need a name and description for the generated key. existing_files = list(filter(os.path.isfile, [secret_key_file, public_key_file])) if len(existing_files) not in (0, 2): text = "Refusing to overwrite existing key file! (%s)" raise Exception(text % existing_files[0]) elif len(existing_files) == 0 and not (name and description): logger.error("GPG key pair doesn't exist! (%s and %s)", format_path(secret_key_file), format_path(public_key_file)) raise Exception("To generate a GPG key you must provide a name and description!") # Store the arguments. self.name = name self.description = description self.secret_key_file = secret_key_file self.public_key_file = public_key_file self.key_id = key_id # Generate the GPG key pair if required. if not existing_files: # Make sure the directories of the secret/public key files exist. for filename in [secret_key_file, public_key_file]: directory = os.path.dirname(filename) if not os.path.isdir(directory): os.makedirs(directory) # Generate a file with batch instructions # suitable for `gpg --batch --gen-key'. fd, gpg_script = tempfile.mkstemp() with open(gpg_script, 'w') as handle: handle.write(textwrap.dedent(''' Key-Type: DSA Key-Length: 1024 Subkey-Type: ELG-E Subkey-Length: 1024 Name-Real: {name} Name-Comment: {description} Name-Email: none Expire-Date: 0 %pubring {public_key_file} %secring {secret_key_file} %commit ''').format(name=self.name, description=self.description, secret_key_file=self.secret_key_file, public_key_file=self.public_key_file)) # Generate the GPG key pair. logger.info("Generating GPG key pair %s (%s) ..", self.name, self.description) logger.debug("Private key: %s", format_path(self.secret_key_file)) logger.debug("Public key: %s", format_path(self.public_key_file)) logger.info("Please note: Generating a GPG key pair can take a long time. " "If you are logged into a virtual machine or a remote server " "over SSH, now is a good time to familiarize yourself with " "the concept of entropy and how to make more of it :-)") start_time = time.time() initialize_gnupg() with EntropyGenerator(): execute('gpg', '--batch', '--gen-key', gpg_script, logger=logger) logger.info("Finished generating GPG key pair in %s.", format_timespan(time.time() - start_time)) os.unlink(gpg_script)
async def rpc(self, ctx, *, member: Member = None, MSG: discord.Message = None, ACT: int = 0): """PFXrpc [<member>]""" if not member: member = ctx.author if ACT == -1: return try: activity = member.activities[ACT] except IndexError: if ACT != 0: return activity = None embed = None if activity != None: if activity.name == 'Spotify': adict = activity.to_dict() embed = discord.Embed(color=activity.color, timestamp=datetime.datetime.utcnow()) embed.set_author( name=f'{member}\'s Spotify Info', icon_url= 'https://cdn.discordapp.com/emojis/471412444716072960.png') embed.add_field(name='Song', value=activity.title, inline=False) embed.add_field(name='Artists', value=', '.join(activity.artists), inline=False) embed.add_field(name='Album', value=activity.album, inline=False) duration = humanfriendly.format_timespan(activity.duration) now = datetime.datetime.utcnow() elapsed = humanfriendly.format_timespan(now - activity.start) left = humanfriendly.format_timespan(activity.end - now) if 'day' in left: left = '0:00:00' embed.add_field( name='Times', value= f'Duration: {duration}\nElapsed: {elapsed}\nLeft: {left}', inline=False) embed.add_field( name='Listen to this track', value= f'[{activity.title}](https://open.spotify.com/track/{activity.track_id})', inline=False) embed.set_thumbnail(url=activity.album_cover_url) elif type(activity) == discord.Streaming: embed = discord.Embed(color=discord.Color.purple(), timestamp=datetime.datetime.utcnow()) embed.set_author( name=f'{member}\'s Stream Info', icon_url= 'https://cdn.discordapp.com/emojis/603188557242433539.png') if member.bot: embed.add_field(name='Title', value=activity.name, inline=False) else: embed.add_field(name='Title', value=activity.name, inline=False) embed.add_field(name='Twitch Name', value=activity.twitch_name, inline=False) if activity.details != None: embed.add_field(name='Game', value=activity.details, inline=False) embed.add_field( name='URL', value=f'[{activity.twitch_name}]({activity.url})', inline=False) elif type(activity) == discord.Activity: embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow()) if activity.small_image_url != None: embed.set_author(name=f'{member}\'s Game Info', icon_url=activity.small_image_url) else: embed.set_author(name=f'{member}\'s Game Info') embed.add_field(name='Game', value=activity.name, inline=False) now = datetime.datetime.utcnow() elapsed = None if activity.start: elapsed = humanfriendly.format_timespan(now - activity.start) if activity.details != None and activity.state != None and elapsed != None: embed.add_field( name='Details', value= f'{activity.details}\n{activity.state}\n{elapsed} elapsed', inline=False) elif activity.state != None and elapsed != None: embed.add_field( name='Details', value=f'{activity.state}\n{elapsed} elapsed', inline=False) elif activity.details != None and elapsed != None: embed.add_field( name='Details', value=f'{activity.details}\n{elapsed} elapsed', inline=False) elif activity.details != None and activity.state != None and elapsed == None: embed.add_field( name='Details', value=f'{activity.details}\n{activity.state}', inline=False) elif activity.state != None and elapsed == None: embed.add_field(name='Details', value=f'{activity.state}', inline=False) elif activity.details != None and elapsed == None: embed.add_field(name='Details', value=f'{activity.details}', inline=False) if activity.large_image_url != None: embed.set_thumbnail(url=activity.large_image_url) else: pass if embed: if MSG: await MSG.edit(embed=embed) def react_check(reaction, user): return user.id == ctx.author.id try: reaction, user = await self.bot.wait_for( 'reaction_add', check=react_check, timeout=120) except asyncio.TimeoutError: return if reaction.emoji == '⏹': await MSG.delete() elif reaction.emoji == '◀': await MSG.remove_reaction('◀', ctx.author) await ctx.invoke(self.bot.get_command('rpc'), member=member, MSG=MSG, ACT=ACT - 1) elif reaction.emoji == '▶': await MSG.remove_reaction('▶', ctx.author) await ctx.invoke(self.bot.get_command('rpc'), member=member, MSG=MSG, ACT=ACT + 1) else: MSG = await ctx.send(embed=embed) await MSG.add_reaction('⏹') await MSG.add_reaction('◀') await MSG.add_reaction('▶') def react_check(reaction, user): return user.id == ctx.author.id try: reaction, user = await self.bot.wait_for( 'reaction_add', check=react_check, timeout=120) except asyncio.TimeoutError: return if reaction.emoji == '⏹': await MSG.delete() elif reaction.emoji == '◀': await MSG.remove_reaction('◀', ctx.author) await ctx.invoke(self.bot.get_command('rpc'), member=member, MSG=MSG, ACT=ACT - 1) elif reaction.emoji == '▶': await MSG.remove_reaction('▶', ctx.author) await ctx.invoke(self.bot.get_command('rpc'), member=member, MSG=MSG, ACT=ACT + 1) else: await ctx.send( f'{discord.utils.escape_mentions(discord.utils.escape_markdown(str(member)))} doesn\'t seem to be playing something with rich presence integration...' ) else: await ctx.send( f'{discord.utils.escape_mentions(discord.utils.escape_markdown(str(member)))} doesn\'t seem to be playing something with rich presence integration...' )
def find(path, acoustid_apikey): '''Search a youtube link with artist and title''' f = File(path) file_id = f.fingerprint(acoustid_apikey) print('Searching for artist {} and title {} and duration {}'.format(f.artist, f.title, format_timespan(f.duration))) urls = youtube.search(f.artist, f.title, f.duration) for url in urls: yt_path = None try: yt = YouTube(url) for stream in yt.streams.all(): with tqdm(total=stream.filesize, desc="Testing {}".format(url), disable=config.quiet, leave=False) as pbar: def show_progress_bar(stream, chunk, file_handle, bytes_remaining): pbar.update(len(chunk)) yt.register_on_progress_callback(show_progress_bar) yt_path = stream.download() break yt_ids = acoustid.match(acoustid_apikey, yt_path) yt_id = None for _, recording_id, _, _ in yt_ids: yt_id = recording_id break if file_id == yt_id: print('Found: fingerprint {} | url {}'.format(file_id, url)) break elif yt_id is not None: print('Not exactly found: fingerprint file: {} | yt: {} | url {}'.format(file_id, yt_id, url)) break else: print('Based only on duration, maybe: {}'.format(url)) # break except VideoUnavailable: pass finally: if yt_path: os.remove(yt_path)
async def help(self, ctx: discord.ext.commands.context.Context, *command): """ Use the help command to get a list of all the available command. If you want to know more about a command use the help command with the other command as an parameter. """ # converts command tuple to string command = " ".join(command) # check if the command argument is empty # -> opening the global help page if command == "": embed = discord.Embed(colour=int(self.bot.config["embed-colours"]["default"], 16)) embed.title = "Available commands!" # list of all cogs cogs = [c for c in self.bot.cogs.keys()] # removes all the cogs with no commands remove_cogs = ["events", "tasks"] for remove_cog in remove_cogs: for file in os.listdir(f"{self.bot.cwd}/cogs/{remove_cog}"): if file.endswith(".py"): cogs.remove(str(file[:-3])) for c in cogs: cog = self.bot.get_cog(c) # skipping the owner commands cog if the user is not an owner if cog.qualified_name == "OwnerCommands" and not await self.bot.is_owner(ctx.author): continue # skipping the help command cog if cog.qualified_name == "Help": continue # disables the music command cog if cog.qualified_name == "MusicCommands": continue # skipping the admin commands for non admins in a guild if isinstance(ctx.author, discord.Member): if cog.qualified_name == "AdminCommands" and not ctx.author.guild_permissions.administrator: continue command_list = "" for command in cog.walk_commands(): # check if command is a subcommand to only display top level commands if command.parent is not None: continue # skip hidden commands commands if command.hidden: continue # skips disabled commands if not command.enabled: continue # command description is empty if there is none command_desc = f" - *{command.description}*" if command.description else "" command_list += f"`{ctx.prefix}{command.name}`{command_desc}\n" if command_list != "": command_list += "\uFEFF" embed.add_field(name=c, value=command_list, inline=False) embed.add_field(name="\uFEFF", value=f"Use `{ctx.prefix}help <command>` to get more information about a command.") # sending the help message and ending the function await ctx.send(embed=embed) return # help command for a specific command else: # getting the command object from the bot (including subcommands) command = self.bot.get_command(command) if command is None: raise discord.ext.commands.BadArgument("Invalid command passed for the help command") # stopping hidden commands from being displayed if command.hidden: raise customErrors.errors.NoPermissionsToViewThisCommand() # stopping disabled commands from being displayed if not command.enabled: raise customErrors.errors.NoPermissionsToViewThisCommand() # check if the commands belongs to a cog if command.cog is not None: # raising an error if the command is an owner command an the user is not an owner if command.cog.qualified_name == "OwnerCommands" and not await self.bot.is_owner(ctx.author): raise customErrors.errors.NoPermissionsToViewThisCommand() # raising an error if a member of a guild tries to use the help command on admin cog commands if isinstance(ctx.author, discord.Member): if command.cog.qualified_name == "AdminCommands" and not ctx.author.guild_permissions.administrator: raise customErrors.errors.NoPermissionsToViewThisCommand() embed = discord.Embed(colour=int(self.bot.config["embed-colours"]["default"], 16)) # getting the name of the parent commands full_parent_name = command.full_parent_name + " " if command.full_parent_name != "" else "" full_name = full_parent_name + command.name embed.title = f"Help for the **{full_name}** command" # if no help text is defined for the command the description text will be taken instead description = f"*{command.help}*" if command.help else command.description embed.description = description if description else "No description available." # adding command usage example to the description embed.description += f"\n```{ctx.prefix}{full_name}" if command.clean_params: parameters = list(command.clean_params) for parameter in parameters: embed.description += f" <{str(parameter)}>" embed.description += "```" # adding aliases field if aliases exist for this command if len(command.aliases) > 0: aliases = "" for alias in command.aliases: aliases += f"`{ctx.prefix}{full_parent_name}{alias}`\n" embed.add_field(name="Aliases", value=aliases, inline=False) # adding subcommand field if isinstance(command, discord.ext.commands.Group): sub_commands = [] for sc in command.walk_commands(): if sc.hidden: continue if sc.parent is not command: continue sub_commands.append(f"`{sc.name}`") if len(sub_commands) > 0: embed.add_field(name="Subcommands", value="\n".join(sub_commands)) # adding cooldown field if a cooldown is active for the command if command._buckets._cooldown: rate_string = f"{command._buckets._cooldown.rate} times" if command._buckets._cooldown.rate > 1 else f"{command._buckets._cooldown.rate} time" per_string = format_timespan(command._buckets._cooldown.per, max_units=3) types = {discord.ext.commands.BucketType.user: "******", discord.ext.commands.BucketType.guild: "server", discord.ext.commands.BucketType.channel: "channel", discord.ext.commands.BucketType.member: "member", discord.ext.commands.BucketType.role: "role", discord.ext.commands.BucketType.category: "category", discord.ext.commands.BucketType.default: "default", } msg_content = f"This command can be used **{rate_string}** every **{per_string}** per **{types.get(command._buckets._cooldown.type, '<unknown>')}**." embed.add_field(name="**Cooldown**", value=msg_content) # sending the help message await ctx.send(embed=embed)
def lineBot(op): try: if op.type == 0: return if op.type == 5: contact = cl.getContact(param2) print ("[ 5 ] 通知添加好友 名字: " + contact.displayName) if settings["autoAdd"] == True: cl.sendMessage(op.param1, "你好 {} 謝謝你加本機為好友 :D\n 本機為CoCo製作\n line.me/ti/p/1MRX_Gjbmv".format(str(cl.getContact(op.param1).displayName))) if op.type == 11: group = cl.getGroup(op.param1) contact = cl.getContact(op.param2) print ("[11]有人打開群組網址 群組名稱: " + str(group.name) + "\n" + op.param1 + "\n名字: " + contact.displayName) if op.param1 in settings["qrprotect"]: if op.param2 in admin: pass else: gs = cl.getGroup(op.param1) cl.kickoutFromGroup(op.param1,[op.param2]) gs.preventJoinByTicket = True cl.updateGroup(gs) if op.type == 13: contact1 = cl.getContact(op.param2) contact2 = cl.getContact(op.param3) group = cl.getGroup(op.param1) print ("[ 13 ] 通知邀請群組: " + str(group.name) + "\n邀請者: " + contact1.displayName + "\n被邀請者" + contact2.displayName) if op.param1 in settings["inviteprotect"]: if op.param2 in admin: pass else: cl.cancelGroupInvitation(op.param3) if clMID in op.param3: if settings["autoJoin"] == True: print ("進入群組: " + str(group.name)) cl.acceptGroupInvitation(op.param1) if op.type == 19: contact1 = cl.getContact(op.param2) group = cl.getGroup(op.param1) contact2 = cl.getContact(op.param3) print ("[19]有人把人踢出群組 群組名稱: " + str(group.name) + "\n" + op.param1 +"\n踢人者: " + contact1.displayName + "\nMid: " + contact1.mid + "\n被踢者" + contact2.displayName + "\nMid:" + contact2.mid ) if mid1 in op.param3: if op.param2 in admin: pass else: cl.kickoutFromGroup(op.param1,[op.param2]) settings["blacklist"][op.param2] = True group = cl.getGroup(op.param1) try: group.preventedJoinByTicket = False cl.updateGroup(group) str1 = cl.reissueGroupTicket(op.param1) except Exception as e: print(e) cl.sendMessage(mid1, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid2, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid3, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid4, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid6, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") if mid2 in op.param3: if op.param2 in admin: pass else: cl.kickoutFromGroup(op.param1,[op.param2]) settings["blacklist"][op.param2] = True group = cl.getGroup(op.param1) try: group.preventedJoinByTicket = False cl.updateGroup(group) str1 = cl.reissueGroupTicket(op.param1) except Exception as e: print(e) cl.sendMessage(mid1, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid2, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid3, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid4, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid6, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") if mid3 in op.param3: if op.param2 in admin: pass else: cl.kickoutFromGroup(op.param1,[op.param2]) settings["blacklist"][op.param2] = True group = cl.getGroup(op.param1) try: group.preventedJoinByTicket = False cl.updateGroup(group) str1 = cl.reissueGroupTicket(op.param1) except Exception as e: print(e) cl.sendMessage(mid1, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid2, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid3, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid4, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid6, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") if mid4 in op.param3: if op.param2 in admin: pass else: cl.kickoutFromGroup(op.param1,[op.param2]) settings["blacklist"][op.param2] = True group = cl.getGroup(op.param1) try: group.preventedJoinByTicket = False cl.updateGroup(group) str1 = cl.reissueGroupTicket(op.param1) except Exception as e: print(e) cl.sendMessage(mid1, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid2, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid3, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid4, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid6, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") if mid6 in op.param3: if op.param2 in admin: pass else: cl.kickoutFromGroup(op.param1,[op.param2]) settings["blacklist"][op.param2] = True group = cl.getGroup(op.param1) try: group.preventedJoinByTicket = False cl.updateGroup(group) str1 = cl.reissueGroupTicket(op.param1) except Exception as e: print(e) cl.sendMessage(mid1, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid2, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid3, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid4, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid6, "/jgurlx gid: " + op.param1 + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") if op.type == 24: if settings["autoLeave"] == True: cl.leaveRoom(op.param1) if op.type == 26 or op.type == 25: msg = op.message text = msg.text msg_id = msg.id receiver = msg.to sender = msg._from if msg.toType == 0: if sender != cl.profile.mid: to = sender else: to = receiver else: to = receiver if msg.contentType == 0: if text is None: return if sender in admin: if "Fbc:" in msg.text: bctxt = text.replace("Fbc:","") t = cl.getAllContactIds() for manusia in t: cl.sendMessage(manusia,(bctxt)) elif "Gbc:" in msg.text: bctxt = text.replace("Gbc:","") n = cl.getGroupIdsJoined() for manusia in n: cl.sendMessage(manusia,(bctxt)) elif 'invitebot' in text.lower(): if msg.toType == 2: group = cl.getGroup(to) try: group.preventedJoinByTicket = False cl.updateGroup(group) str1 = cl.reissueGroupTicket(to) except Exception as e: print(e) cl.sendMessage(mid1, "/jgurlx gid: " + msg.to + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid2, "/jgurlx gid: " + msg.to + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid3, "/jgurlx gid: " + msg.to + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid4, "/jgurlx gid: " + msg.to + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") cl.sendMessage(mid6, "/jgurlx gid: " + msg.to + " gid " + "url: http://line.me/R/ti/g/" + str1 + " url") elif text.startswith("/jgurlx"): str1 = find_between_r(msg.text, "gid: ", " gid") str2 = find_between_r(msg.text, "url: http://line.me/R/ti/g/", " url") print("1") cl.acceptGroupInvitationByTicket(str1, str2) JoinedGroups.append(str1) group = cl.getGroup(str1) try: time.sleep(0.1) cl.reissueGroupTicket(str1) group.preventedJoinByTicket = True cl.updateGroup(group) except Exception as e: print(e) elif "Ri " in msg.text: Ri0 = text.replace("Ri ","") Ri1 = Ri0.rstrip() Ri2 = Ri1.replace("@","") Ri3 = Ri2.rstrip() _name = Ri3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: pass else: for target in targets: if target in admin: pass else: try: cl.kickoutFromGroup(to,[target]) cl.findAndAddContactsByMid(target) cl.inviteIntoGroup(to,[target]) except: pass elif "Tk " in msg.text: key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] targets = [] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: if target in admin: pass else: try: cl.kickoutFromGroup(to,[target]) except: pass elif "Mk " in msg.text: mk0 = msg.text.replace("Mk ","") mk1 = mk0.rstrip() mk2 = mk1.replace("@","") mk3 = mk2.rstrip() _name = mk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: pass else: for target in targets: try: cl.kickoutFromGroup(msg.to,[target]) except: pass elif "Nk " in msg.text: _name = text.replace("Nk ","") gs = cl.getGroup(to) targets = [] for g in gs.members: if _name in g.displayName: targets.append(g.mid) if targets == []: pass else: for target in targets: if target in admin: pass else: try: cl.kickoutFromGroup(to,[target]) except: pass elif "Vk " in msg.text: vkick0 = msg.text.replace("Vk ","") vkick1 = vkick0.rstrip() vkick2 = vkick1.replace("@","") vkick3 = vkick2.rstrip() _name = vkick3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: pass else: for target in targets: try: cl.kickoutFromGroup(msg.to,[target]) cl.findAndAddContactsByMid(target) cl.inviteIntoGroup(msg.to,[target]) cl.cancelGroupInvitation(msg.to,[target]) except: pass elif "Zk" in msg.text: gs = cl.getGroup(to) targets = [] for g in gs.members: if g.displayName in "": targets.append(g.mid) if targets == []: pass else: for target in targets: if target in admin: pass else: try: cl.kickoutFromGroup(to,[target]) except: pass elif msg.text in ["c","C","cancel","Cancel"]: if msg.toType == 2: X = cl.getGroup(msg.to) if X.invitee is not None: gInviMids = (contact.mid for contact in X.invitee) ginfo = cl.getGroup(msg.to) sinvitee = str(len(ginfo.invitee)) start = time.time() for cancelmod in gInviMids: cl.cancelGroupInvitation(msg.to, [cancelmod]) elapsed_time = time.time() - start cl.sendMessage(to, "已取消完成\n取消時間: %s秒" % (elapsed_time)) cl.sendMessage(to, "取消人數:" + sinvitee) else: cl.sendMessage(to, "沒有任何人在邀請中!!") elif text.lower() == 'gcancel': gid = cl.getGroupIdsInvited() start = time.time() for i in gid: cl.rejectGroupInvitation(i) elapsed_time = time.time() - start cl.sendMessage(to, "全部群組邀請已取消") cl.sendMessage(to, "取消時間: %s秒" % (elapsed_time)) elif "Gn " in msg.text: if msg.toType == 2: X = cl.getGroup(msg.to) X.name = msg.text.replace("Gn ","") cl.updateGroup(X) else: cl.sendMessage(msg.to,"無法使用在群組外") elif "Inv " in msg.text: midd = msg.text.replace("Inv ","") cl.findAndAddContactsByMid(midd) cl.inviteIntoGroup(msg.to,[midd]) elif text.lower() == 'kill ban': if msg.toType == 2: group = cl.getGroup(to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in settings["blacklist"]: matched_list+=filter(lambda str: str == tag, gMembMids) if matched_list == []: print ("1") cl.sendMessage(to, "沒有黑名單") return for jj in matched_list: cl.kickoutFromGroup(to, [jj]) cl.sendMessage(to, "黑名單以踢除") elif "/invitemeto:" in msg.text: gid = msg.text.replace("/invitemeto:","") if gid == "": cl.sendMessage(to,"請輸入群組ID") else: try: cl.findAndAddContactsByMid(msg.from_) cl.inviteIntoGroup(gid,[msg.from_]) except: cl.sendMessage(to,"我不在那個群組裡") elif msg.text in ["分機5離開全部群組"]: gid = cl.getGroupIdsJoined() for i in gid: cl.leaveGroup(i) cl.sendText(msg.to,"已離開全部群組") elif msg.text in ["Friendlist"]: anl = cl.getAllContactIds() ap = "" for q in anl: ap += "• "+cl.getContact(q).displayName + "\n" cl.sendMessage(msg.to,"「 朋友列表 」\n"+ap+"人數 : "+str(len(anl))) elif text.lower() == 'speed': time0 = timeit.timeit('"-".join(str(n) for n in range(100))', number=10000) str1 = str(time0) start = time.time() cl.sendMessage(to,'處理速度\n' + str1 + '秒') elapsed_time = time.time() - start cl.sendMessage(to,'指令反應\n' + format(str(elapsed_time)) + '秒') elif text.lower() == 'rebot': cl.sendMessage(to, "重新啟動") restartBot() elif text.lower() == 'runtime': timeNow = time.time() runtime = timeNow - botStart runtime = format_timespan(runtime) cl.sendMessage(to, "機器運行時間 {}".format(str(runtime))) elif text.lower() == 'about': try: arr = [] owner = "u28d781fa3ba9783fd5144390352b0c24" creator = cl.getContact(owner) contact = cl.getContact(clMID) grouplist = cl.getGroupIdsJoined() contactlist = cl.getAllContactIds() blockedlist = cl.getBlockedContactIds() ret_ = "╔══[ 關於自己 ]" ret_ += "\n╠ 名稱 : {}".format(contact.displayName) ret_ += "\n╠ 群組 : {}".format(str(len(grouplist))) ret_ += "\n╠ 好友 : {}".format(str(len(contactlist))) ret_ += "\n╠ 黑單 : {}".format(str(len(blockedlist))) ret_ += "\n╠══[ 關於機器 ]" ret_ += "\n╠ 版本 : 淫蕩6主機測試版" ret_ += "\n╠ 作者 : {}".format(creator.displayName) ret_ += "\n╚══[ 未經許可禁止重製 ]" cl.sendMessage(to, str(ret_)) except Exception as e: cl.sendMessage(msg.to, str(e)) elif text.lower() == 'ourl': if msg.toType == 2: G = cl.getGroup(to) if G.preventedJoinByTicket == False: pass else: G.preventedJoinByTicket = False cl.updateGroup(G) elif text.lower() == 'curl': if msg.toType == 2: G = cl.getGroup(to) if G.preventedJoinByTicket == True: pass else: G.preventedJoinByTicket = True cl.updateGroup(G) elif text.lower() == 'tagall': group = cl.getGroup(msg.to) nama = [contact.mid for contact in group.members] k = len(nama)//100 for a in range(k+1): txt = u'' s=0 b=[] for i in group.members[a*100 : (a+1)*100]: b.append({"S":str(s), "E" :str(s+6), "M":i.mid}) s += 7 txt += u'@Alin \n' cl.sendMessage(to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0) cl.sendMessage(to, "總共 {} 個成員".format(str(len(nama)))) elif text.lower() == 'time': tz = pytz.timezone("Asia/Makassar") timeNow = datetime.now(tz=tz) day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] hr = timeNow.strftime("%A") bln = timeNow.strftime("%m") for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k-1] readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\n時間 : [ " + timeNow.strftime('%H:%M:%S') + " ]" cl.sendMessage(msg.to, readTime) if op.type == 26: msg = op.message text = msg.text msg_id = msg.id receiver = msg.to sender = msg._from if msg.toType == 0: if sender != cl.profile.mid: to = sender else: to = receiver else: to = receiver if settings["autoRead"] == True: cl.sendChatChecked(to, msg_id) if to in read["readPoint"]: if sender not in read["ROM"][to]: read["ROM"][to][sender] = True except Exception as error: logError(error)
ann['count'] = row['question__count_median'] ann['standing'] = row['question__standing'] ann['resting'] = row['question__resting'] ann['moving'] = row['question__moving'] ann['interacting'] = row['question__interacting'] ann['young_present'] = row['question__young_present'] seq_id_to_annotations.setdefault(sequence_id, []).append(ann) annotations.append(ann) # ...for each row in the annotation table elapsed = time.time() - start_time print('Done converting tables to dictionaries in {}'.format( humanfriendly.format_timespan(elapsed))) print('Saving dictionaries to {}'.format(initial_dictionary_cache_file)) cct_cache = [ im_id_to_image, images, seq_id_to_images, seq_id_to_annotations, annotations, categories, species_to_category ] with open(initial_dictionary_cache_file, 'wb') as f: pickle.dump(cct_cache, f, protocol=pickle.HIGHEST_PROTOCOL) #%% Load previously-saved dictionaries when re-starting mid-script if False: #%%
if col_output_distances: # row_distance_matrix = str2affinity_func[row_distance_metric](data) try: col_distance_matrix = str2affinity_func[col_distance_metric](np.transpose(data)) except KeyError: print(col_distance_metric, "is not a valid column distance") log(f"Invalid distance{col_distance_metric}", get_linenumber(), DEBUG) exit(1) log("About to write the col_distance matrix", get_linenumber(), DEBUG) dist_file = open(output_base_name+'_pairwise_column_distances.csv', 'w') dist_file.write('v/>,') dist_file.write(','.join(col_labels)) dist_file.write('\n') i = 0 for row in squareform(col_D): dist_file.write(f'{col_labels[i]},') dist_file.write(','.join([str(s) for s in row])) dist_file.write('\n') i += 1 log("About to make cdt file", get_linenumber(), DEBUG) # Make the cdt file make_cdt(data=new_full_gct, name=output_base_name+'.cdt', atr_companion=atr_companion, gtr_companion=gtr_companion, order_of_columns=order_of_columns, order_of_rows=order_of_rows) end_of_time = timer() spanned = end_of_time - beginning_of_time print("We are done! Wall time elapsed:", humanfriendly.format_timespan(spanned)) print("###", spanned)
async def mysticsquare(self, ctx): def generate_map(): map = [i+1 for i in range(15)] shuffle(map) inv_count = 0 for i in range(15): for j in range(i + 1, 15): if map[i] > map[j]: inv_count += 1 map.append(16) if inv_count % 2 == 1: map[0], map[1] = map[1], map[0] gamemap = [] for i in range(4): gamemap.append([]) for n in range(4): gamemap[i].append(map[4*i+n]) return gamemap def print_board(gamemap): return "\n".join("".join(self.emojis[p] for p in k) for k in gamemap) def get_position(search, gamemap): position = 0 for rows in gamemap: for row in rows: if row == search: return position position += 1 def match_count(gamemap): count, pos = 0, 1 for rows in gamemap: for row in rows: if row == pos: count += 1 pos += 1 return count embed = discord.Embed(title="Mystic Square", description="A game of **Mystic Square** has been started!\n\n**• The objective is to " "arrange the tiles in increasing order from 1-15 row-wise.\n• You can react " "with the corresponding emojis to move a tile in the direction shown by that " "emoji.\n• The tiles can only move to the place which is empty " "(shown by the white square).\n• You can quit anytime by reacting with " "the cross mark emoji :x:.**", color=discord.Color.blue()) embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url) await ctx.channel.send(embed=embed) await asyncio.sleep(3) gamemap = generate_map() moves = 0 message = await ctx.channel.send(print_board(gamemap)) await message.add_reaction("◀") await message.add_reaction("▶") await message.add_reaction("🔼") await message.add_reaction("🔽") await message.add_reaction("❌") start_time = time() async def edit_msg(reaction): await message.edit(content=print_board(gamemap)) try: await message.remove_reaction(reaction.emoji, ctx.author) except Exception: pass def check(reaction, user): nonlocal gamemap, moves if user == ctx.author and reaction.message.id == message.id and str(reaction.emoji) in "◀▶🔼🔽": emoji = str(reaction.emoji) empty_pos = get_position(16, gamemap) if emoji == "◀" and empty_pos not in [3, 7, 11, 15]: tile_pos = empty_pos + 1 gamemap[int(tile_pos/4)][tile_pos % 4], gamemap[int(empty_pos/4)][empty_pos % 4] = \ gamemap[int(empty_pos/4)][empty_pos % 4], gamemap[int(tile_pos/4)][tile_pos % 4] elif emoji == "▶" and empty_pos not in [0, 4, 8, 12]: tile_pos = empty_pos - 1 gamemap[int(tile_pos/4)][tile_pos % 4], gamemap[int(empty_pos/4)][empty_pos % 4] = \ gamemap[int(empty_pos/4)][empty_pos % 4], gamemap[int(tile_pos/4)][tile_pos % 4] elif emoji == "🔼" and empty_pos not in [12, 13, 14, 15]: tile_pos = empty_pos + 4 gamemap[int(tile_pos / 4)][tile_pos % 4], gamemap[int(empty_pos/4)][empty_pos % 4] = \ gamemap[int(empty_pos/4)][empty_pos % 4], gamemap[int(tile_pos / 4)][tile_pos % 4] elif emoji == "🔽" and empty_pos not in [0, 1, 2, 3]: tile_pos = empty_pos - 4 gamemap[int(tile_pos / 4)][tile_pos % 4], gamemap[int(empty_pos/4)][empty_pos % 4] = \ gamemap[int(empty_pos/4)][empty_pos % 4], gamemap[int(tile_pos / 4)][tile_pos % 4] else: return False moves += 1 self.client.loop.create_task(edit_msg(reaction)) if match_count(gamemap) >= 15: return True elif user == ctx.author and reaction.message.id == message.id and str(reaction.emoji) == "❌": return True return False resp = None try: resp = await self.client.wait_for('reaction_add', timeout=1200, check=check) except asyncio.TimeoutError: await ctx.channel.send(ctx.author.mention+" Hey, you're out of time now! Game over!") if resp: end_time = time() time_taken = format_timespan(int(end_time - start_time)) matches = match_count(gamemap) stat_text = "**Moves made:** %s\n**Time taken:** %s\n**Percentage completion:** %s" % (str(moves), time_taken, str(round(matches*100/16, 2))) desc_text = "You decided to quit the game." if matches >= 15: desc_text = "**Congratulations** %s, you have finished the puzzle successfully and **won** the game!" %\ ctx.author.mention embed = discord.Embed(title="Game Over", description=desc_text, color=discord.Color.green() if matches >= 15 else discord.Color.red()) embed.add_field(name="Stats", value=stat_text, inline=False) embed.set_footer(text="Game started by "+str(ctx.author), icon_url=ctx.author.avatar_url) await ctx.channel.send(embed=embed)
def seconds_to_str(sec): sec = humanfriendly.format_timespan(sec) return sec
def format_delta(value): """Format a date time to (Default): d Mon YYYY HH:MM P""" if value is None: return "" return humanfriendly.format_timespan(value)