def command_entered(self): """ Change region boundaries with Blender-like syntax. Examples: "l-0.5" ==> move start position 0.5 s before "r1" ==> move stop position 1 seconds after """ command = self.command_edit.text() try: lr, delta = utils.parse_command(command) except (IndexError, ValueError) as err: print(err) return start, end = self.region if lr == 'l': start = int(start + delta * self.params.framerate) print('New start: {}'.format(timedelta(seconds=(start / self.params.framerate)))) elif lr == 'r': end = int(end + delta * self.params.framerate) print('New end: {}'.format(timedelta(seconds=(end / self.params.framerate)))) self.set_region((start, end)) self.command_edit.setText('') # feature: restart immediately after command is entered self.play()
def test_parse_command(): "Make sure commands get broken down properly" command = 'query:"test" maxnum:10' results = utils.parse_command(command) assert results['query'] == "test" assert results['maxnum'] == 10
def init_start(): #read global config file splt_ch = '|' param = {'school_name': '', 'pre_reserve_time': ''} # get comandline param if len(sys.argv)-1 != len(param.keys()): raise Exception('#param num do not equals to len(param.keys())') param['school_name'] = sys.argv[1] param['pre_reserve_time'] = sys.argv[2] # read today_task list file_name = 'clssrm_id_and_today_task.conf' file_path = './classroom' + '/' + param['school_name'] + '/' +file_name section = 'today_task' task_ls = utils.read_conf(file_path, section=section).keys() # random or in order random_model = False if random_model == True: random.shuffle(task_ls) else: # sort task_ls.sorted(key=lambda t: int(t.split(splt_ch, 1)[0]), reverse=False) debug_p(task_ls) for task in task_ls: task_param_d = utils.parse_command(task, type='simplify')
def import_or_export_database(self): if self.db is None: utils.pause_with_message( "Initialize the database first! (option 4)") return while True: utils.clear() print("Commands:") print( "i|import: Import the contents of the database as a JSON string" ) print( "e|export: Export the contents of the database as a JSON string" ) print( "p|print: Pretty-print the contents of the database for easy inspection" ) print("q|quit: Exit to main menu") command = input("Enter your command: ") command_name, command_argument = utils.parse_command(command) command_name_resolution_success, command_name = utils.resolve_aliases( command_name, [["import", "i"], ["export", "e"], ["print", "p"], ["quit", "q"]]) if not command_name_resolution_success: utils.pause_with_message("Invalid command!") continue if command_name == "import": while True: imported_db_string = input( "Paste the JSON string with the database contents (hit Enter to cancel): " ) if imported_db_string == "": break try: imported_db = json.loads(imported_db_string) except json.decoder.JSONDecodeError: print( "JSON decode error! Is that a valid JSON string? (Make sure there aren't newlines)" ) continue self.db._db = imported_db # write directly to the internal dictionary utils.pause_with_message( "Database contents successfully imported!") break elif command_name == "export": utils.pause_with_message( "On the following screen, the database contents will be visible; use the Select All function in your terminal emulator to copy them, then press Enter to continue." ) utils.clear() input(json.dumps(self.db._db)) utils.pause_with_message( "Database contents successfully exported!") elif command_name == "print": utils.pause_with_message(json.dumps(self.db._db, indent=4)) elif command_name == "quit": break
def do_find(self, command): """Find papers from the paperdir iota> find query:"test" sortfield:year reverse:True maxnum:50 """ c = parse_command(command) try: sexps = find(self.database, **c) except TypeError as e: raise e else: self.print_sexp(sexps)
def do_open(self, command): """Open a paper from the paperdir given a docid iota> open docid:1 """ c = parse_command(command) try: sexps = show(self.database, **c) except TypeError: pass else: self.print_sexp(sexps)
def do_view(self, command): """View a paper from the paperdir given a docid iota> view docid:1 """ c = parse_command(command) try: sexps = view(self.database, **c) except TypeError: pass else: self.print_sexp(sexps)
def process_cmd_comments(): try: queue_item = QUEUE_COMMENTS.get_nowait() except queue.Empty: return comment: Comment = queue_item[0] cmd_str = comment["body"] logger.debug(cmd_str) parsed_cmd = parse_command(cmd_str) if parsed_cmd is None: logger.info("No command found in %s", comment["url"]) QUEUE_COMMENTS.task_done() return if parsed_cmd["help"] is not None and comment["author"] != ACCOUNT: if not replied_to_comment(comment, ACCOUNT): if reply_message(comment, MESSAGES["HELP"], ACCOUNT): logger.info("Help message replied to %s", comment["url"]) else: logger.info("Couldn't reply to %s", comment["url"]) else: logger.info("Already replied with help command to %s", comment["url"]) QUEUE_COMMENTS.task_done() return if parsed_cmd["help"] is None and parsed_cmd.get("status") is None: if len([x for x in parsed_cmd if parsed_cmd[x] is not None ]) > 1 and not replied_to_comment(comment, ACCOUNT): if reply_message(comment, MESSAGES["STATUS_MISSING"], ACCOUNT): logger.info("Missing status parameter message sent to %s", comment["url"]) else: logger.info("Couldn't reply to %s", comment["url"]) QUEUE_COMMENTS.task_done() return root_comment = queue_item[1] category = get_category(root_comment, TASKS_PROPERTIES) if category is None: logger.info("No valid category found. %s", root_comment["url"]) QUEUE_COMMENTS.task_done() return if ACCOUNT: reply = replied_to_comment(root_comment, ACCOUNT) send_summary_to_steem(parsed_cmd, reply, root_comment) if DISCORD_WEBHOOK_TASKS: content = ( f'[{parsed_cmd["status"].upper()}] <{build_comment_link(root_comment)}>' ) embeds = [build_discord_tr_embed(root_comment, parsed_cmd)] send_message_to_discord(DISCORD_WEBHOOK_TASKS, content, embeds) QUEUE_COMMENTS.task_done()
def process(self, messages): for message in messages: try: parts = message.split(' ') #login successfull if parts[1]=='NOTICE' and ('Login authentication failed' in message): print('TwitchBot.process: Failed to authenticate') self.transport.close() raise TwitchBotException('Failed to authenticate') if parts[1]=='376': print('TwitchBot.process: received message with code 376. Proceed to join {} channel'.format(self.channel)) cmd = self.twitch_irc_cap_cmd() self.write(cmd) cmd = self.irc.join_cmd(self.channel) self.write(cmd) elif parts[0]=='PING': cmd = self.irc.pong_cmd(message) self.write(cmd) elif parts[1]=='JOIN': self.joined(extract_username(parts[0])) elif parts[1]=='PART': self.parted(extract_username(parts[0])) elif 'PRIVMSG' in message: username = extract_username(parts[1]) message = extract_message(parts) self.events.message_received(TwitchMessage(username,message)) if message[0]=='!': cmd, args = parse_command(message) print('TwitchBot.process: find command {}'.format(cmd)) print('TwitchBot.process: args = {}'.format(args)) commands = list(filter(lambda x: x.cmd==cmd, self.commands)) if len(commands)>0: command = commands[0] print('TwitchBot.process: execute command {}'.format(cmd)) try: output = command.execute(self, username, args) print('TwitchBot.process: command output = {}'.format(output)) self.write(self.irc.sendmsg_cmd(self.channel, output)) except Exception as e: print('TwitchBot.process: Error while executing command {}'.format(cmd)) print('TwitchBot.process: Exception: {}'.format(e)) else: print('TwitchBot.process: command {} does not exist'.format(cmd)) except IndexError as e: print('TwitchBot.process: IndexError: {}'.format(e)) except ValueError as e: print('TwitchBot.process: ValueError: {}'.format(e))
def main(): while True: try: queue_item = QUEUE_COMMENTS.get_nowait() except queue.Empty: continue comment: Comment = queue_item[0] cmd_str = comment["body"] LOGGER.debug(cmd_str) parsed_cmd = parse_command(cmd_str) if parsed_cmd is None: LOGGER.info("No command found") QUEUE_COMMENTS.task_done() continue elif parsed_cmd["help"] is not None and comment["author"] != ACCOUNT: replied = False for reply in comment.get_replies(): if reply["author"] == ACCOUNT: LOGGER.info("Already replied with help command. %s", comment["url"]) replied = True break if not replied: send_help_message(comment, ACCOUNT) QUEUE_COMMENTS.task_done() continue if parsed_cmd.get("status") is None: if len([x for x in parsed_cmd if parsed_cmd[x] is not None]) > 1: send_missing_status_message(comment, ACCOUNT) QUEUE_COMMENTS.task_done() continue root_comment = queue_item[1] category = get_category(root_comment, TASKS_PROPERTIES) if category is None: LOGGER.info("No valid category found. %s", root_comment["url"]) QUEUE_COMMENTS.task_done() continue category = TASKS_PROPERTIES[category]["category"] webhook = DiscordWebhook( url=DISCORD_WEBHOOK_TASKS, content=f'[{category.upper()}][{parsed_cmd["status"].upper()}] <{build_comment_link(root_comment)}>', ) webhook.add_embed(build_discord_tr_embed(root_comment, parsed_cmd)) webhook.execute() QUEUE_COMMENTS.task_done()
def parse(cls, s): magic = consume_stream(s, 4) if magic != NETWORK_MAGIC: raise ValueError('magic is not right') command = parse_command(consume_stream(s, 12)) payload_length = little_endian_to_int(consume_stream(s, 4)) checksum = consume_stream(s, 4) payload = consume_stream(s, payload_length) calculated_checksum = double_sha256(payload)[:4] if calculated_checksum != checksum: raise RuntimeError('checksum does not match') if payload_length != len(payload): raise RuntimeError( "Tried to read {payload_length} bytes, only received {len(payload)} bytes" ) return cls(command, payload)
def on_request(update: Update, context: CallbackContext): message = update.message command = message.text log.debug(f'Command: {command!r}') finish_time = parse_command(command) if not finish_time: message.reply_text('Не получилось разобрать команду!') return Reminder.create( message_id=message.message_id, command=command, finish_time=finish_time, user=User.get_from(update.effective_user), chat=Chat.get_from(update.effective_chat), ) message.reply_text(f'Напоминание установлено на {get_pretty_datetime(finish_time)}')
while True: time.sleep(3) res_status, status = _get_status_convert_file(work_id) if status == 'completed': uri_to_downloas_file = res_status.json()['output'][0]['uri'] save_from_url(uri_to_downloas_file) break elif status == 'incomplete': print('missing information to run a job') break elif status == 'failed': print(res_status.json()['status']['info']) break if __name__ == "__main__": if len(sys.argv) == 1: print(DOCSTRING) else: data_settings = parse_command() if data_settings.get('-path'): working_file_path = data_settings['-path'] elif data_settings.get('-name'): working_file_path = get_path(data_settings['-name']) else: working_file_path = sys.argv[1] working_target = data_settings.get('-t', 'mobi') working_category = data_settings.get('-cat', 'ebook') print(working_file_path, working_target, working_category) main(working_file_path, working_target, working_category)
if title is not None: plt.title(title, size=24) #fig.suptitle(title, size=24) # カラーバーを付ける t2c.colorbar(fig) # ファイルへの書き出し plt.savefig(output_filename, dpi=300, bbox_inches='tight') plt.close() if __name__ == '__main__': # オプションの読み込み args = parse_command(sys.argv, opt_time=False, opt_wind=True, opt_trange=True) # 作図する領域 area = args.sta # 出力ディレクトリ名 output_dir = args.output_dir # 矢羽を描くかどうか opt_barbs = args.addwind # 気温を描く範囲 try: tr = args.temprange.split(',') tmin = float(tr[0]) tmax = float(tr[1]) try: tstep = float(tr[2]) except IndexError:
import csv import numpy as np import torch import torch.backends.cudnn as cudnn import torch.optim cudnn.benchmark = True from models import ResNet from metrics import AverageMeter, Result from dataloaders.dense_to_sparse import UniformSampling, SimulatedStereo, StaticSampling, ProjectiveSampling, NearestSampling import criteria import utils args = utils.parse_command() print(args) fieldnames = [ 'mse', 'rmse', 'absrel', 'lg10', 'mae', 'delta1', 'delta2', 'delta3', 'data_time', 'gpu_time' ] best_result = Result() best_result.set_to_worst() def create_data_loaders(args): # Data loading code print("=> creating data loaders ...") traindir = os.path.join('data', args.data, 'train') valdir = os.path.join('data', args.data, 'val')
draw(lons, lats, prep, u, v, output_filename, title=tinfo, area=area, opt_pref=True, opt_markerlabel=opt_markerlabel, opt_mapcolor=True) if __name__ == '__main__': # オプションの読み込み args = parse_command(sys.argv, opt_lab=True) # 開始・終了時刻 time_sta = pd.to_datetime(args.time_sta) time_end = pd.to_datetime(args.time_end) # 作図する領域 area = args.sta # 出力ディレクトリ名 output_dir = args.output_dir # 降水量を数字で表示するかどうか opt_markerlabel = args.mlabel # 出力ディレクトリ作成 os_mkdir(output_dir) # データの時間間隔 time_step = timedelta(hours=1) #time_step = timedelta(hours=3)
opt_mapcolor=True, opt_pref=True, opt_markerlabel=opt_markerlabel, opt_barbs=opt_barbs, barb_increments=barb_increments, tmin=tmin, tmax=tmax, tstep=tstep, title=tinfo, area=area) if __name__ == '__main__': # オプションの読み込み args = parse_command(sys.argv, opt_wind=True, opt_trange=True, opt_lab=True) # 開始・終了時刻 time_sta = pd.to_datetime(args.time_sta) time_end = pd.to_datetime(args.time_end) # 作図する領域 area = args.sta # 出力ディレクトリ名 output_dir = args.output_dir # 矢羽を描くかどうか opt_barbs = args.addwind # 気温を数字で表示するかどうか opt_markerlabel = args.mlabel # 気温を描く範囲 try: tr = args.temprange.split(',')
raise Exception('sta is needed') if tinfof is not None: # 入力ファイル名 input_filename = tinfof + ".csv" # データの取得 lons, lats, temp, u, v, prep = read_data(input_filename, sta=sta) print(lons.shape, lats.shape, temp.shape, u.shape, v.shape, prep.shape) # return lons, lats, temp, u, v, prep else: return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan if __name__ == '__main__': # オプションの読み込み args = parse_command(sys.argv, opt_cum=True, opt_temp=True) # 開始・終了時刻 time_sta = pd.to_datetime(args.time_sta) time_end = pd.to_datetime(args.time_end) # アメダス地点名 sta = args.sta # 出力ディレクトリ名 output_dir = args.output_dir # 降水量を積算降水量にするかどうか opt_cumrain = args.cumrain # 気温の折れ線グラフを描くかどうか opt_addtemp = args.addtemp # 出力ディレクトリ作成 os_mkdir(output_dir)
import time import csv import numpy as np import torch import torch.backends.cudnn as cudnn import torch.optim cudnn.benchmark = True from models import ResNet from metrics import AverageMeter, Result from dataloaders.dense_to_sparse import UniformSampling, SimulatedStereo import criteria import utils args = utils.parse_command() #把一些参数加入到程序中来 print(args) fieldnames = [ 'mse', 'rmse', 'absrel', 'lg10', 'mae', 'delta1', 'delta2', 'delta3', 'data_time', 'gpu_time' ] best_result = Result() best_result.set_to_worst() #定义主函数 def main(): global args, best_result, output_directory, train_csv, test_csv # create results folder, if not already exists