def onecmd(self, line): if not line: return if line == 'EOF': line = 'quit' try: s_line = shlex.split(line) except: self._help() return opts = self.cli.optparser.parse_main_args(s_line) if opts.command in self.MAPPING: getattr(self, '_' + self.MAPPING[opts.command])(s_line[1::]) else: cmd_cls = self.cli.cli_commands.get(opts.command) if cmd_cls is not None: cmd = cmd_cls(self) try: opts = self.cli.optparser.parse_command_args(cmd, s_line) cmd.cli = self.cli cmd.cli.demands = copy.deepcopy(self.cli.demands) cmd.configure() cmd.run() except: pass else: self._help()
def run_m2_grid_search(filepath, nrm_articles=False): log.info("running CoNLL grid searching on {0}.pred and {0}.cnfs".format(filepath)) if result_is_ready('{}.params'.format(filepath)): return cmd.run('cat {}.params'.format(filepath)) assert_file_exists(filepath + '.in') assert_file_exists(filepath + '.cnfs') assert_file_exists(filepath + '.pred') assert_file_exists(filepath + '.m2') if nrm_articles: options = ' --restore-articles' else: options = '' output = cmd.run("{root}/tune_m2.py {opts} -c {cs} -f {frm} " \ "-g {fp}.eval.m2.grid -w {fp}.m2gs --m2 {fp}.m2 " \ "{fp}.in {fp}.cnfs {fp}.pred" \ .format(root=config.ROOT_DIR, cs=CONFUSION_SET, frm=FORMAT, opts=options, fp=filepath)) thr, dif = output.split("\t")[:2] log.info("M^2 grid search found tunning options: t={} d={}".format(thr, dif)) opts = " -t {} -d {}".format(thr, dif) cmd.run("echo '{}' > {}.params".format(opts, filepath)) return opts
def run_command(self): for cmd in filter(lambda x: not x.ready(), self._commands): try: cmd.run() except: print 'Error running command {}'.format(cmd) cmd.fail()
def onecmd(self, line): if not line or line == '\n': return if line == 'EOF': line = 'quit' try: s_line = shlex.split(line) except: self._help() return opts = self.cli.optparser.parse_main_args(s_line) # Disable shell recursion. if opts.command == 'shell': return if opts.command in self.MAPPING: getattr(self, '_' + self.MAPPING[opts.command])(s_line[1::]) else: cmd_cls = self.cli.cli_commands.get(opts.command) if cmd_cls is not None: cmd = cmd_cls(self.cli) try: opts = self.cli.optparser.parse_command_args(cmd, s_line) cmd.cli.demands = copy.deepcopy(self.cli.demands) cmd.configure() cmd.run() except dnf.exceptions.Error as e: logger.error(_("Error:") + " " + e.value) except: return else: self._help()
def run_command(self): for cmd in filter(lambda x: not x.ready(), self._commands): try: cmd.run() except: print "Error running command {}".format(cmd) cmd.fail()
def do_ssh(self, wait=const.WAIT_START): if self.state < const.VMS_RUNNING: self.do_up() self._check_state() self.ensure_ssh() host = self.get_ssh_host() cmd_seq = "ssh '%s'" % host cmd.run(cmd_seq, stdout=True, stderr=True)
def evaluate_m2(filepath): log.info("evaluating on M^2 file: {}.m2".format(filepath)) assert_file_exists(filepath + '.out') assert_file_exists(filepath + '.m2') cmd.run("{root}/eval_m2.py {fp}.out {fp}.m2 >> {fp}.eval" \ .format(root=config.ROOT_DIR, fp=filepath))
def make_m2_parallel(filepath): log.debug("making parallel files from M2 file: {}.m2".format(filepath)) cmd.run("cat {fp}.m2 | perl {root}/make_parallel.perl > {fp}.txt" \ .format(root=config.SCRIPTS_DIR, fp=filepath)) cmd.source_side_of_file(filepath + '.txt', filepath + '.in') assert_file_exists(filepath + '.txt')
def evaluate_predictions(options, filepath): log.info("evaluating predictions {0}.pred on {0}.cnfs".format(filepath)) assert_file_exists(filepath + '.cnfs') assert_file_exists(filepath + '.pred') cmd.run("{root}/eval_preds.py -c {cs} -f {frm} {opts} {fp}.cnfs {fp}.pred >> {fp}.eval" \ .format(root=config.ROOT_DIR, cs=CONFUSION_SET, frm=FORMAT, opts=options, fp=filepath))
def default(self, line): """ Called if a command is not recognised. Try importing command or display a random response from Chimpbot. """ mmodule = importlib.import_module('ape.commands.' + line.split(' ', 1)[0]) cclass = getattr(mmodule, 'Command') cmd = cclass() cmd.run(line)
def download_latest_image(): # Remote latest_url = get_latest_image_url() # Local filename = os.path.basename(latest_url) cached_image = os.path.join (util.get_image_cache_dir(), filename) # Download cmd.run("wget -c -O %s %s" %(cached_image, latest_url))
def extract_features(filepath, options=''): log.info("extracting features for file {}.cnfs.empty".format(filepath)) if result_is_ready('{}.cnfs'.format(filepath)): return assert_file_exists(filepath + '.cnfs.empty') cmd.run("{root}/extract_feats.py {opts} {fp}.in {fp}.cnfs.empty > {fp}.cnfs" \ .format(root=config.ROOT_DIR, opts=options, fp=filepath))
def run(self, algorithm, model_file, data_file, pred_file, options=''): log.info("running model: {}".format(model_file)) if options is None or not options.strip(): options = self.__default_options(algorithm) if not os.path.exists(model_file): log.error("model file does not exist: {}".format(model_file)) if not os.path.exists(data_file): log.error("data file does not exists: {}".format(data_file)) if 'snow' == algorithm: cmd.run("{bin} -test -I {data} -F {model} -v max -R {pred} {opts}" \ .format(bin=config.CLASSIFIERS.SNOW_BIN, data=data_file, model=model_file, pred=pred_file, opts=options)) elif 'vw' == algorithm: reduced_options = re.sub(r'--(oaa|ect|wap)\s+\d+ ', '', options) cmd.run("{bin} -t -d {data} -i {model} -c -r {pred} {opts}" \ .format(bin=config.CLASSIFIERS.VW_BIN, data=data_file, model=model_file, pred=pred_file, opts=reduced_options)) elif 'vwldf' == algorithm: reduced_options = re.sub(r'--csoaa_ldf\s+mc?', '', options) cmd.run("{bin} -t -d {data} -i {model} -c -r {pred} {opts}" \ .format(bin=config.CLASSIFIERS.VW_BIN, data=data_file, model=model_file, pred=pred_file, opts=reduced_options)) elif 'liblinear' == algorithm: cmd.run("{dir}/predict -b 1 {data} {model} {pred}" \ .format(dir=config.CLASSIFIERS.LIBLINEAR_DIR, data=data_file, model=model_file, pred=pred_file)) elif 'maxent' == algorithm: cmd.run("{bin} -testFile {data} -loadClassifier {model} {opts} > {pred}" \ .format(bin=config.CLASSIFIERS.MAXENT_BIN, data=data_file, model=model_file, pred=pred_file, opts=options)) elif 'majority' == algorithm: MajorityClassClassifier(self.confusion_set) \ .predict(model_file, data_file, pred_file) elif 'perfect' == algorithm: PerfectClassifier(self.confusion_set) \ .predict(model_file, data_file, pred_file) else: log.error("not supported algorithm: {}".format(algorithm))
def vectorize_features(options, filepath): log.info("vectorizing features from file {}.cnfs".format(filepath)) if result_is_ready('{}.data'.format(filepath)): return assert_file_exists(filepath + '.cnfs') cmd.run("{root}/vectorize_feats.py -c {cs} -f {frm} {opts} {fp}.cnfs {fp}.data" \ .format(root=config.ROOT_DIR, cs=CONFUSION_SET, frm=FORMAT, opts=options, fp=filepath))
def train_classifier(model, options, filepath): log.info("training {} model from file {}.data".format(ALGORITHM, filepath)) if result_is_ready(model): return assert_file_exists(filepath + '.data') cmd.run("{root}/run_classifier.py -t -a {alg} -c {cs} -o ' {opts}' {model} {fp}.data" \ .format(root=config.ROOT_DIR, cs=CONFUSION_SET, alg=ALGORITHM, opts=options, model=model, fp=filepath))
def split_m2_data(m2_file, filepath, num_of_parts): if result_is_ready('{}.00.txt'.format(filepath)): return cmd.run("python {root}/split_m2.py -n {n} -p {fp}. -s .m2 {m2}" \ .format(root=config.SCRIPTS_DIR, n=num_of_parts, fp=filepath, m2=m2_file)) log.info("preparing text data from M2 files") for part in format_parts(num_of_parts): cmd.run("cat {fp}.{p}.m2 | perl {root}/make_parallel.perl > {fp}.{p}.txt" \ .format(root=config.SCRIPTS_DIR, fp=filepath, p=part))
def run_m2_grid_search(conf_set, format, text_file, cnfs_file, pred_file, m2_file, grid_file=None, work_dir=None, steps=(10,1), deep=True, restore_articles=False): if not work_dir: work_dir = os.getpid() if not os.path.exists(work_dir): os.makedirs(work_dir) cmd.run("cat {} | grep '^S ' | cut -c3- > {}/m2.txt".format(m2_file, work_dir)) err_file = cmd.cut(work_dir + '/m2.txt', work_dir + '/m2.err') preds = parse_pred_file(pred_file, format, conf_set) minmax_params = find_minmax_params(preds) generator = grid_search_generator(minmax_params, steps, grid_file, deep) while True: thrdif = generator.next() if not thrdif or len(thrdif) == 3: break thr, dif = thrdif out_file = os.path.join(work_dir, "output.{0:.4f}-{1:.4f}.txt".format(thr, dif)) inject_predictions(conf_set, format, text_file, cnfs_file, pred_file, thr, dif, out_file, restore_articles) cmd.wdiff(err_file, out_file) prec, rec, fscore = evaluate_m2(out_file, m2_file) generator.send( (prec, rec, fscore) ) while deep: thrdif = generator.next() if not thrdif or len(thrdif) == 3: break thr, dif = thrdif out_file = os.path.join(work_dir, "output.{0:.4f}-{1:.4f}.txt".format(thr, dif)) inject_predictions(conf_set, format, text_file, cnfs_file, pred_file, thr, dif, out_file, restore_articles) cmd.wdiff(err_file, out_file) prec, rec, fscore = evaluate_m2(out_file, m2_file) generator.send( (prec, rec, fscore) ) return generator.next()
def train_nulls(filepath): log.info("training <null> positions from file: {}.txt".format(filepath)) if result_is_ready('{}.ngrams.tok'.format(filepath)) \ and result_is_ready('{}.ngrams.pos'.format(filepath)) \ and result_is_ready('{}.ngrams.awc'.format(filepath)): return assert_file_exists(filepath + '.txt') cmd.run("{root}/train_nulls.py -c {cs} -l tok,pos,awc -n {fp}.ngrams {fp}.txt" \ .format(root=config.ROOT_DIR, cs=CONFUSION_SET, fp=filepath))
def split_txt_data(txt_file, filepath, num_of_parts): if result_is_ready('{}.00.txt'.format(filepath)): return num_of_lines = cmd.wc(txt_file) log.info("total number of lines: {}".format(num_of_sents)) part_size = math.ceil(num_of_sents / float(num_of_parts)) log.info("number of lines per part: {}".format(part_size)) cmd.run("split --lines {size} -d --additional-suffix .txt {txt} {fp}." \ .format(txt=txt_file, size=part_size, fp=filepath))
def prepare_more_data(cset, work_dir, args): log.info("changing error rate in training data") cmd.run("python {root}/../scripts/change_txt.py --shuffle" \ " -c {cset} -e {rate} {input} | head -n {limit}" \ " > {dir}/more_data.txt 2> {dir}/more_data.stderr" \ .format(root=config.ROOT_DIR, input=args.more_data, dir=work_dir, cset=cset, rate=args.error_rate, limit=MORE_DATA_LIMIT)) if not args.debug: os.remove(work_dir + '/more_data.stderr')
def run_geccla(release_dir, eval_files, m2=False): if result_is_ready('{}/output.eval'.format(release_dir)): return m2_opt = '--m2' if m2 else '' command = "python {root}/../bin/run_geccla.py" \ " --work-dir {rel} --model {rel}/release.model" \ " --run {eval} --eval {m2}" \ " > {rel}/output.eval 2>&1" \ .format(root=config.ROOT_DIR, rel=release_dir, eval=eval_files, m2=m2_opt) cmd.run(command)
def train_geccla(release_dir, train_file, algorithm, confset, options): if result_is_ready('{}/output.train'.format(release_dir)): return os.makedirs(release_dir) command = "python {root}/../bin/run_geccla.py {opts}" \ " --work-dir {rel}" \ " --confusion-set {cs} --algorithm {alg} --model {rel}/release.model" \ " --train {train} " \ " > {rel}/output.train 2>&1" \ .format(root=config.ROOT_DIR, alg=algorithm, cs=confset, train=train_file, rel=release_dir, opts=options) cmd.run(command)
def main(): args = parse_user_arguments() # Create working directory if not os.path.exists(args.work_dir): os.makedirs(args.work_dir) log.info("working directory: {}".format(args.work_dir)) # Change error rate in M2 file if args.m2 and args.ann_rate: log.info("CHANGING ERROR RATE IN M2 DATA FILE") annrate_data = os.path.join(args.work_dir, 'data.annrate-{}.m2' \ .format(args.ann_rate)) cmd.run("{root}/change_annorate_m2.py -e {er} {data} > {erdata}" \ .format(root=config.SCRIPTS_DIR, er=args.ann_rate, data=args.data, erdata=annrate_data)) args.data = annrate_data # Preprare data parts for cross validation log.info("SPLITTING DATA FILES INTO {} PARTS".format(args.parts)) cross_filebase = os.path.join(args.work_dir, 'cross') split_data = split_m2_data if args.m2 else split_txt_data split_data(args.data, cross_filebase, args.parts) # Train cross validation parts and find threshold parameters jobs = [] for part in format_parts(args.parts): jobs.append(delayed(run_cross_validation)(part, cross_filebase, args)) # Train on all data train_file = os.path.join(args.work_dir, 'train.txt') release_dir = os.path.join(args.work_dir, 'release') jobs.append(delayed(run_release)(train_file, release_dir, args)) Parallel(n_jobs=args.jobs, verbose=PARALLEL_VERBOSE)(jobs) # Find average (tuned) threshold parameter log.info("AVERAGING PARAMS") param_sets = collect_evaluation_params(cross_filebase, args.parts) evl_opts = average_param_sets(param_sets) log.info("Updating evaluation options") cmd.run("sed -ir 's/evl=.*/evl={}/' {}/release.model.settings" \ .format(evl_opts, release_dir)) # Evaluate with tunned threshold parameter eval_files = ' '.join(args.eval) log.info("EVALUATING ON FILES: {}".format(eval_files)) run_geccla(release_dir, eval_files, args.m2)
def eval_cross(crosspath, eval_files, m2): if result_is_ready('{}/output.eval'.format(crosspath)): return ext = 'm2' if m2 else 'txt' m2_opt = '--m2' if m2 else '' command = "python {root}/../bin/run_geccla.py" \ " --work-dir {cv} --model {cv}/cross.model" \ " --run {eval} --eval {m2}" \ " > {cv}/output.eval 2>&1" \ .format(root=config.ROOT_DIR, cv=crosspath, ext=ext, eval=eval_files, m2=m2_opt) cmd.run(command)
def build(args): context = { 'dbsnp_build': args.dbsnp_build, 'genome_build': args.genome_build, 'prefix': args.prefix, 'target': args.target, } context['db_name'] = '{prefix}_{dbsnp_build}_{genome_build}'.format(**context) context['db_user'] = '******' log.info(colored(pformat(context), 'blue')) with cd(DBSNP_HOME): force('createuser {db_user}'.format(**context)) force('createdb --owner={db_user} {db_name}'.format(**context)) target = [DBSNP_HOME] if 'core' in args.target else [] target += [os.path.join(DBSNP_HOME, 'contrib', x) for x in set(args.target) if x != 'core'] for src in target: with cd(src): run('pwd') if glob.glob('02_drop_create_table.*'): context.update(src=src) run('./01_fetch_data.sh -d {dbsnp_build} -r {genome_build} {src}/data'.format(**context)) run('./02_drop_create_table.sh {db_name} {db_user} {src}'.format(**context)) run('./03_import_data.sh {db_name} {db_user} {src} {src}/data'.format(**context)) log.info('Done') log.info('To connect via psql, run:') log.info('') log.info(colored('$ psql {db_name} -U {db_user}'.format(**context), 'blue', attrs=['bold'])) log.info('')
def on_push(data, listener): logger.debug("Received data: \n{}".format(data)) ts = time.time() - 5 status, pushes = listener.pb.get_pushes(modified_after=ts) if status is True: logger.debug("Received pushes: \s".format(json.dumps(pushes))) outputs = [ push for push in pushes if push.has_key("title") ] command = [ push for push in pushes if not push.has_key("title") ] if len(outputs) > 0 and command > 0: # 避免造成push死循环, 所以这个等待时间应当超过上面的2 logger.debug("Have output type push, will continue") time.sleep(3) return if not command: logger.debug("No command , will retrun ") return command = command[0]["body"] try: output, error, status = cmd.run(command=command, timeout=5) listener.device.push_note("Output", output) logger.info("Complated run(%s)" % command) except cmd.Timeout as e: raspberry.push_note("Output", "command(%s) timeout(5s)." % command) print("Command timeout .") logger.info("command(%s) timtout" % command) else: logger.info("Not get pushes.")
def prepare_eval(cset, work_dir, eval_inits): log.info("selecting errors by confusion set: {}".format(cset)) eval_files = [] for base, files in eval_inits.iteritems(): eval_file = work_dir + '/' + base + '.m2' cmd.run("python {root}/../scripts/format_m2.py" \ " -c {cset} -t cset --greedy --no-spaces {files}" \ " > {eval}" \ .format(root=config.ROOT_DIR, cset=cset, files=' '.join(files), eval=eval_file)) eval_files.append(eval_file) return eval_files
def train(self, algorithm, model_file, data_file, options=''): log.info("training model: {}".format(model_file)) if not options or not options.strip(): options = self.__default_options(algorithm) if not os.path.exists(data_file): log.error("data file does not exists: {}".format(data_file)) if 'snow' == algorithm: cmd.run("{bin} -train -I {data} -F {model} {opts}" \ .format(bin=config.CLASSIFIERS.SNOW_BIN, data=data_file, model=model_file, opts=options)) elif 'vw' == algorithm: cmd.run("{bin} -d {data} -f {model} -c {opts}" \ .format(bin=config.CLASSIFIERS.VW_BIN, data=data_file, model=model_file, opts=options)) elif 'vwldf' == algorithm: cmd.run("{bin} -d {data} -f {model} -c {opts}" \ .format(bin=config.CLASSIFIERS.VW_BIN, data=data_file, model=model_file, opts=options)) elif 'liblinear' == algorithm: cmd.run("{bin}/train {opts} {data} {model}" \ .format(bin=config.CLASSIFIERS.LIBLINEAR_DIR, data=data_file, model=model_file, opts=options)) elif 'maxent' == algorithm: cmd.run("{bin} -trainFile {data} -serializeTo {model} -prop {data}.prop {opts}" \ .format(bin=config.CLASSIFIERS.MAXENT_BIN, data=data_file, model=model_file, opts=options)) elif 'majority' == algorithm: MajorityClassClassifier(self.confusion_set).train(model_file, data_file) elif 'perfect' == algorithm: PerfectClassifier(self.confusion_set).train(model_file, data_file) else: log.error("not supported algorithm: {}".format(algorithm))
def create_train_files(filepath, part, num_of_parts, more_data=None, shuffle=None): if result_is_ready('{}.{}.train.txt'.format(filepath, part)): return shuf = '| shuf' if shuffle else '' if more_data: cmd.run("cat {} {} >> {}.{}.train.txt".format(more_data, shuf, filepath, part)) train_files = ' '.join(["{}.{}.txt".format(filepath, p) for p in format_parts(num_of_parts) if p != part]) cmd.run("cat {} {} >> {}.{}.train.txt".format(train_files, shuf, filepath, part))
def doChatbox(self, event): if not common.cb.hide and event.key == 27: common.cb.hide = True; common.cb.txt = "" elif not common.cb.hide: ret = common.cb.event(event) if ret: if common.debug: print "ISSUED COMMAND:", ret common.cb.items.insert(0,ret) common.cb.hide = True # Clear out notifications if ret == "/clean" or ret == "/clear" or ret == "/cls": cmd.posts = []; return # Run command cmd.run(ret) elif event.key == 32 or event.key == 116 or event.key == 47: # either press SPACE or t or / common.cb.hide = False
def run_grid_search(filepath): log.info("running grid searching on {0}.pred and {0}.cnfs".format(filepath)) if result_is_ready('{}.params'.format(filepath)): return cmd.run('cat {}.params'.format(filepath)) assert_file_exists(filepath + '.cnfs') assert_file_exists(filepath + '.pred') output = cmd.run("{root}/tune_preds.py -c {cs} -f {frm} -g {fp}.eval.grid {fp}.cnfs {fp}.pred" \ .format(root=config.ROOT_DIR, cs=CONFUSION_SET, frm=FORMAT, fp=filepath)) thr, dif = output.split("\t")[:2] log.info("grid search found tunning options: t={} d={}".format(thr, dif)) opts = " -t {} -d {}".format(thr, dif) cmd.run("echo '{}' > {}.params".format(opts, filepath)) return opts
def ping(host, count = 1, timeout = 1, interval = 0.2): result = cmd.run("ping "+host+" -c "+str(count)+" -i "+str(interval)+" -W "+str(timeout), float(count*timeout)+5) # 1. check for srvice not known - domain doesn't exist if re.search('ping: (.*): Name or service not known', result): return 'NOK', 'DNS_NOT_EXISTS', None # 2. check if there is reply if re.search('100% packet loss', result): return 'NOK', 'NOT_RESPONSE', None # 3. find average response time rtt_avg = float(re.search(r'rtt min/avg/max/mdev = \d{1,10}.\d{1,10}/(.*?)/\d{1,10}.\d{1,10}/\d{1,10}.\d{1,10} ms', result).group(1)) return 'OK', None, rtt_avg
def _ffmpeg_get_audio(infilename, outfilename, timeout=None): """ :param infilename: input video file name or url :param outfilename: output audio file name with c print int format, like 'test.wav' :param timeout: timeout in seconds, type: float :return: """ retcode, stdout = cmd.run( ['ffmpeg', '-hide_banner', '-nostats', '-v', 'quiet', '-i', infilename, '-f', 'wav', '-t', '10', '-vn', '-y', outfilename], timeout=timeout) return retcode, stdout.decode() if retcode == 0 else ''
def restore(args): context = { 'dbsnp_build': args.dbsnp_build, 'genome_build': args.genome_build, 'prefix': args.prefix, 'tag': args.tag, } context['db_src_name'] = 'dbsnp_{dbsnp_build}_{genome_build}'.format(**context) context['db_name'] = '{prefix}_{dbsnp_build}_{genome_build}'.format(**context) context['db_user'] = '******' log.info(colored(pformat(context), 'blue')) with cd(DBSNP_HOME): force('createuser {db_user}'.format(**context)) run('./script/pg_restore.sh {db_src_name} {db_name} {db_user} {tag}'.format(**context)) log.info('Done') log.info('To connect via psql, run:') log.info('') log.info(colored('$ psql {db_name} -U {db_user}'.format(**context), 'blue', attrs=['bold'])) log.info('')
def ffmpeg_get_keyframe(infilename, outfilename, timeout=None): """ :param infilename: input video file name or url :param outfilename: output images file name with c print int format, like 'pics/thumbnails-%03d.jpeg' :param timeout: timeout in seconds, type: float :return: """ retcode, stdout = cmd.run( ['ffmpeg', '-hide_banner', '-nostats', '-v', 'quiet', '-skip_frame', 'nokey', '-vsync', '0', '-t', '100', '-i', infilename, '-f', 'image2', outfilename], timeout=timeout) return retcode, stdout.decode() if retcode == 0 else ''
def onecmd(self, line): if not line or line == '\n': return if line == 'EOF': line = 'quit' try: s_line = shlex.split(line) except: self._help() return # reset option parser before each command, keep usage information self.cli.optparser.__init__(reset_usage=False) opts = self.cli.optparser.parse_main_args(s_line) # Disable shell recursion. if opts.command == 'shell': return if opts.command in self.MAPPING: getattr(self, '_' + self.MAPPING[opts.command])(s_line[1::]) else: cmd_cls = self.cli.cli_commands.get(opts.command) if cmd_cls is not None: cmd = cmd_cls(self.cli) try: opts = self.cli.optparser.parse_command_args(cmd, s_line) except SystemExit: # argparse.ArgumentParser prints usage information and executes # sys.exit() on problems with parsing command line arguments return try: cmd.cli.demands = copy.deepcopy(self.cli.demands) cmd.configure() cmd.run() except dnf.exceptions.Error as e: logger.error(_("Error:") + " " + ucd(e)) return else: self._help()
def init_demo(args): context = { 'db_user': args.demo_db_user, 'db_name': args.demo_db_name, } log.info(colored(pformat(context), 'blue')) with cd(DBSNP_HOME): force('createuser {db_user}'.format(**context)) force('createdb --owner={db_user} {db_name}'.format(**context)) for src in [DBSNP_HOME] + glob.glob(DBSNP_HOME + '/contrib/*'): with cd(src): run('pwd') if glob.glob('02_drop_create_table.*'): context.update(src=src) run('./02_drop_create_table.sh {db_name} {db_user} {src}'.format(**context)) run('./03_import_data.sh {db_name} {db_user} {src} {src}/test/data'.format(**context)) log.info('Done') log.info('To connect via psql, run:') log.info('') log.info(colored('$ psql {} -U {}'.format(args.demo_db_name, args.demo_db_user), 'blue', attrs=['bold'])) log.info('')
from cmd import run from gui import Gui import tkinter as tk import logging from datetime import datetime logging.basicConfig(filename='myapp.log', level=logging.INFO) logging.info(str(datetime.now()) + ' Started') print("please enter command: ") command = input() if '--get' in command or '--view' in command: logging.info(str(datetime.now()) + ' Console mode running...') run(command) else: logging.info(str(datetime.now()) + ' GUI mode running...') root = tk.Tk() app = Gui(master=root) app.mainloop() logging.info(str(datetime.now()) + ' Finished')
return ret def _formathex(hexstr): if (len(hexstr) % 2 != 0): return hexstr ret = '' for i in range(0, len(hexstr), 2): ret = ret + hexstr[i:i + 2] + ' ' return ret.rstrip() @cmd.subcmd def show(d=cmd.ArgSpec( help="Path to debug info file, by default, path from setup is used.")): """ Interpret binary log (data of dump instruction) """ if d == None: d = os.path.join(_getwsroot(), _getparam('debuginfo-path')) _loaddebuginfo(d) inf = sys.stdin #read byte by byte: while True: tag = int(_nexthexs(inf, 4), 16) datalen = int(_nexthexs(inf, 2), 16) print "%04x: %s {%s}" % (tag, _getstring(tag), _formathex(_nexthexs(inf, datalen * 2)) if datalen != 0 else 'no data') #each 4 hexa bytes read interpret the string, read the length byte and dump it. if __name__ == '__main__': cmd.run()
def do_status(self, args): """Display current cluster status""" cmd = ClusterStatus(self._config) cmd.run()
def do_restore(self, args): cmd = RestoreCommand(self._config) cmd.run(args)
def _reconfigure_node(self): sys.stdout.write("Updating configuration files... ") cmd = InitNode(self._config) cmd.run() sys.stdout.write("done\n")
def do_init(self, args): """Initialize current node as part of cluster""" cmd = InitNode(self._config) cmd.run()
def do_promote(self, args): """Promote current node to master role""" cmd = PromoteToMaster(self._config) cmd.run()
def _setup_app(self): cmd = paste.script.appinstall.SetupCommand('setup-app') cmd.run([self.filename])
def do_archive(self, args): cmd = ArchiveCommand(self._config) cmd.run(args)
#!/usr/bin/python import cmd import re import timestamp stats = cmd.run('speedtest-cli') # Example: # Download: 38.87 Mbits/s # Upload: 5.29 Mbits/s download = re.compile(r'Download: ([0-9]*\.[0-9]*.*)\n').findall(stats) if len(download) > 0: download = download[0] else: download = "ERROR" upload = re.compile(r'Upload: ([0-9]*\.[0-9]*.*)\n').findall(stats) if len(upload) > 0: upload = upload[0] else: upload = "ERROR" time = timestamp.now() string = time + ', ' + download + ', ' + upload f = open("speedLog.txt", 'a') f.write(s + "\n") f.close()
def do_gen_key(self, args): """Generate RSA key pair""" cmd = GenerateKey(self._config) cmd.run()
def do_recovery(self, args): """Recovery current node""" cmd = Recovery(self._config) cmd.run()