sim = dataset[i, :][:] idx = np.argsort(sim)[-100:] return {'id': id_, 'images': ' '.join(map(str, index_ids[idx]))} def make_submit(name): index_ids, index_vectors = get_data('index', name) test_ids, test_vectors = get_data('test', name) test_vector_empty = jl.Parallel(n_jobs=-1, backend='threading')( jl.delayed(check_empty)(x) for x in test_vectors) file = File('data/distances.h5', 'r') dataset = file['result'] result = jl.Parallel(n_jobs=-1, backend='threading')( jl.delayed(process)(i, id_, empty, index_ids, dataset) for i, (id_, empty) in tqdm(enumerate(zip(test_ids, test_vector_empty)), desc='test', total=len(test_ids))) file.close() result = pd.DataFrame(result) result.to_csv('result/retrieval.csv', index=False) if __name__ == '__main__': Fire(calculate_distances) Fire(make_submit)
def main(): return Fire({ 'process': process })
datefmt='%Y-%m-%d %H:%M:%S', color=True) root_logger = logging.getLogger() console_handler = logging.StreamHandler() file_handler = logging.FileHandler('server.log') console_handler.setFormatter(fmt) file_handler.setFormatter(fmt) root_logger.addHandler(console_handler) root_logger.addHandler(file_handler) app_log.setLevel(logging.INFO) gen_log.setLevel(logging.INFO) access_log.setLevel(logging.INFO) app_log.info("Model is loading...") app_log.info("Model Has Been Loaded!") app = Application([(r"/.*", LTPHandler, dict(ltp=self))]) server = HTTPServer(app) server.bind(port) server.start(n_process) ioloop.IOLoop.instance().start() if __name__ == '__main__': Fire(Server)
fps = video.get(cv2.CAP_PROP_FPS) print "Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format( fps) print "Capturing {0} frames".format(num_frames) # Start time start = time.time() # Grab a few frames for i in xrange(0, num_frames): ret, frame = video.read() # End time end = time.time() # Time elapsed seconds = end - start print "Time taken : {0} seconds".format(seconds) # Calculate frames per second fps = num_frames / seconds print "Estimated frames per second : {0}".format(fps) # Release video video.release() if __name__ == '__main__': Fire(check_camera_fps)
""" 输入: s = "barfoothefoobarman", words = ["foo","bar"] 输出:[0,9] 解释: 从索引 0 和 9 开始的子串分别是 "barfoor" 和 "foobar" 。 输出的顺序不重要, [9,0] 也是有效答案。 示例 2: 输入: s = "wordgoodgoodgoodbestword", words = ["word","good","best","word"] 输出:[] :return: """ assert process("barfoothefoobarman", ["foo","bar"]) == [0, 9] assert process("barfoothefoobar", ["foo", "bar"]) == [0, 9] assert process("wordgoodgoodgoodbestword", ["word","good","best","word"]) == [] assert process("abc", ["word", "good", "best", "word"]) == [] assert process("words", ["word", "good", "best", "word"]) == [] assert process("wordgoodgoodgoodbestword", ["word", "good", "best", "good"]) == [8] if __name__ == '__main__': """ python -m mixleet.201908.l30_concate_subwords test """ from fire import Fire Fire()
from pathlib import Path from shutil import copytree from os import popen from fire import Fire def build_day(day, year='2021'): path = Path(str(year)) / ('day_' + str(int(day)).zfill(2)) copytree('template', path) # Automatically opens directory in VSCode # popen(f'code {str(path)}') if __name__ == "__main__": Fire(build_day)
def resolve_paths(config): paths = [k for k in config.keys() if k.endswith('_path')] res = {} for path in paths: res[path] = Path(config[path]) return res def fix_seed(args): if 'random_seed' not in args: args['random_seed'] = 0 random.seed(args['random_seed']) np.random.seed(args['random_seed']) torch.manual_seed(args['random_seed']) torch.cuda.manual_seed_all(args['random_seed']) return args def get_device(args): if hasattr(args, 'device'): device = args.device else: device = "cuda" if torch.cuda.is_available() else "cpu" return {'device': device} if __name__ == "__main__": Fire(Cli)
from sqlitedict import SqliteDict from fire import Fire def save_snapshot(snapshot, key, output): dd = SqliteDict(snapshot) payload = dd[key] with open(output, 'wb') as f: f.write(payload) def list_snapshot(snapshot): dd = SqliteDict(snapshot) keys = list(dd.keys()) print(keys) return if __name__ == "__main__": Fire(dict( save=save_snapshot, list=list_snapshot, ))
if self.dry or self.copy: shutil.copy( f, os.path.join(rootdir, 'train', c, os.path.basename(f))) else: os.replace( f, os.path.join(rootdir, 'train', c, os.path.basename(f))) def test_out(self): assert set(os.listdir(self.out)) == set(['test', 'train' ]), os.listdir(self.out) test_classes = sorted(os.listdir(os.path.join(self.out, 'test'))) train_classes = sorted(os.listdir(os.path.join(self.out, 'train'))) for testlabel, trainlabel in zip(test_classes, train_classes): assert testlabel == trainlabel, (testlabel, trainlabel) assert len(os.listdir(os.path.join(self.out, 'test', testlabel))) > 5, '' assert len(os.listdir(os.path.join(self.out, 'train', trainlabel))) > 30 def test_classnames(): pass def test_duplicates(): pass if __name__ == '__main__': Fire(Transform)
#!/usr/bin/env python3 # -*- coding:UTF-8 -*- # File Name: app.py # Author: Shechucheng # Created Time: 2020-07-07 00:17:59 from fire import Fire from androidbot.wechat import Device if __name__ == "__main__": Fire(Device())
import os import sys import setproctitle from fire import Fire sys.path.append(os.getcwd()) from siamfc import train_alexnet as train from IPython import embed if __name__ == '__main__': program_name = 'zrq train ' + os.getcwd().split('/')[-1] setproctitle.setproctitle(program_name) Fire(train)
def login(): # global token # url = host + '/api/loginFrom51PM' # # headers = {'Content-Type': 'application/json;charset=UTF-8'} # data = { # 'username': '******', 'password': '******' # } # resp = session.request(url=url, method='post', data=data) # # status_code = resp.status_code # # print('响应状态码:{}'.format(status_code)) # # text = resp.text # # print('响应内容:{}'.format(text)) # resp_json = resp.json() # token = jsonpath.jsonpath(resp_json, '$.data')[0] # print('响应内容:{}'.format(resp_json)) # # resp_headers = resp.headers # # print('响应内容:{}'.format(resp_headers)) url = 'http://127.0.0.1:8888/api/loginFrom51PM' json = {"username": "******", "password": "******"} login_res = requests.post(url, json=json) # 从响应结果中获取token值 token = login_res.json()["token"] print("token:", token) if __name__ == '__main__': # get() # login() Fire(login)
k = k.replace("gamma", "weight").replace("beta", "bias") nps[k] = np.ascontiguousarray(v.cpu().numpy()) np.savez(out, **nps) config = self.ltp.config with open('config.json', 'w', encoding='utf-8') as f: json.dump(config, f, indent=2) def test_seged(self): import torch sentences = [ 'My name is tom.', 'He called Tom to get coats.', '他叫Tom去拿外衣。', '他叫汤姆去拿外衣。', "我去长江大桥玩。" ] seg, hidden = self.ltp.seg(sentences) seged, hidden_seged = self.ltp.seg(seg, is_preseged=True) hidden: dict hidden_seged: dict for key, value in hidden.items(): if isinstance(value, torch.Tensor): test = torch.sum(value.float() - hidden_seged[key].float()).numpy() print(key, test) print(seg == seged) if __name__ == '__main__': Fire(Run)
def __init__(self): # Settings pygame.mixer.init() pygame.mixer.music.load('latenight.ogg') pygame.mixer.music.play(0) self.WIDTH = 640 self.HEIGHT = 360 # Config self.tps_max = 100 # Initialization pygame.init() font = pygame.font.SysFont("Arial", 18) self.resolution = (self.screen_width, self.screen_height) = (self.WIDTH, self.HEIGHT) self.screen = pygame.display.set_mode(self.resolution, pygame.RESIZABLE) self.tps_clock = pygame.time.Clock() self.tps_delta = 0.0 self.scroll = Vector2(0, 0) self.map = Map(self) self.player = Player( self ) # przy inicjalizacji przekazuje playerowi wszystko Player(self) self.enemy = Enemy(self) self.weapon = Weapon(self) self.fire = Fire(self) self.physics = Physics(self) self.platforms = Platforms(self) self.collision = Collision(self) self.sprite = Sprite(self) self.menu = Menu(self) self.file_loader = FileLoader(self) self.sprite.load_images() def create_fonts(font_sizes_list): "Creates different fonts with one list" fonts = [] for size in font_sizes_list: fonts.append(pygame.font.SysFont("Arial", size)) return fonts def render(fnt, what, color, where): "Renders the fonts as passed from display_fps" text_to_show = fnt.render(what, 0, pygame.Color(color)) self.screen.blit(text_to_show, where) def display_fps(): "Data that will be rendered and blitted in _display" render(fonts[0], what=str(int(self.tps_clock.get_fps())), color="white", where=(0, 0)) fonts = create_fonts([32, 16, 14, 8]) while True: # Events for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit(0) elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE: ############# klik i cos sie dzieje raz sys.exit(0) # Ticking self.tps_delta += self.tps_clock.tick( ) / 1000.0 # zamieniam MS na sekundy while self.tps_delta > 1 / self.tps_max: self.tick() self.tps_delta -= 1 / self.tps_max # Rendering/Drawing self.screen.fill((0, 0, 0)) self.draw() display_fps() pygame.display.flip()
def brute_host(host): """Brute single host""" print(process_host('', host)) def brute(hosts_file: str, w: int = None, i: str = '', sp: bool = True, d: bool = False): """Brute multiple hosts""" from functools import partial if d: logger.setLevel(logging.DEBUG) h = logging.StreamHandler() logger.addHandler(h) hosts = ListFile(hosts_file) ph = partial(process_host, i) urls = threaded(ph, hosts, callback=lambda x: bool(x), workers=w) for url in urls: print(url) if __name__ == "__main__": Fire(brute)
import tempfile def get_katex_from_url(version, tar): katex_version = version url = "https://cdn.jsdelivr.net/npm/katex@{}/dist/katex.js".format( katex_version) ret = requests.get(url, allow_redirects=True) assert ret.status_code == 200, ret.status_code content = requests.get(url).content tar.write(content) return url def update_katex_py(src=None, tar="katex.py"): src = "katex.js" if src is None else src if PurePath(src).suffix == ".js": print("%s -> %s" % (src, tar)) js2py.translate_file("katex.js", tar) else: with tempfile.NamedTemporaryFile() as tmp_tar: print("katex version: %s" % src) url = get_katex_from_url(src, tmp_tar) src = tmp_tar.name print("%s -> %s" % (url, tar)) js2py.translate_file(src, tar) if __name__ == '__main__': Fire(update_katex_py)
def search(*, nsamples: int, strategy: str) -> dict: """ Randomly samples `nsamples` models and trains them on the training data. """ if strategy == "embeddings": trainer = train_from_cache samplers = train_from_cache_hyperparam_samplers elif strategy == "fine-tune": trainer = fine_tune_train samplers = fine_tune_train_hyperparam_samplers elif strategy == "transfer": trainer = transfer_train samplers = transfer_train_hyperparam_samplers else: raise ValueError("unsupported training strategy") # Sample and train `nsamples` different models. The results # and configs are saved to wandb. for _ in range(nsamples): params = sample_params(samplers) print(f"training {strategy} model with params:") pprint(params) trainer(**params) if __name__ == "__main__": Fire(search)
def cli(): fire = Fire(main)
run_name=f"{stage}", version=self.base_cfg.version), ) trainer.fit(pipeline) del pipeline, trainer def run(self): starter = False for n, stage in enumerate(self.stages): if self.resume_stage: if stage == self.resume_stage: starter = True if starter: self.run_stage(stage, n) else: self.run_stage(stage, n) def main(hparams): if hparams.seed: set_determenistic(hparams.seed) stager = Stager(hparams) stager.run() if __name__ == "__main__": cfg = Dict(Fire(fit)) main(cfg)
f"{int(round(event.xdata))} {int(round(event.ydata))} " + filename + "\n") elif event.button == 3: print("-1 -1") myfile.write("-1 -1 " + filename + "\n") imgg = None fig, ax = plt.subplots() cid = fig.canvas.mpl_connect('button_press_event', onclick) img_file_names = img_file_names[start_id:] print("Start at: ", start_id) print("Still to go:", len(img_file_names)) for filename in img_file_names: img = io.imread(filename) print(filename) if imgg is None: imgg = ax.imshow(img) else: imgg.set_data(img) fig.show() ret = plt.waitforbuttonpress(0) if ret: break if __name__ == '__main__': Fire(annotate)
) new_entities = check_key(yaml_file, data, "entities", known_entities) new_intents = check_key(yaml_file, data, "intents", known_intents) # to do list: # check ast eval of all functions (inline lambdas and of python files) return new_entities, new_intents def validate_all_files(interpreter_directory): "Loop over yaml files and validate." if not interpreter_directory: return known_entities = set() known_intents = set() for yaml_file in glob.glob(os.path.join(interpreter_directory, "*.yaml"), recursive=True): validate_regex_in_yaml_file(yaml_file) new_entities, new_intents = validate_yaml_file(yaml_file, known_entities, known_intents) known_entities.update(new_entities) known_intents.update(new_intents) if __name__ == "__main__": Fire(validate_all_files)
def cli(): Fire(align_json)
class Main: @staticmethod def start(data_dir: str = '.', index_name: str = 'hot-workspace', doc_type='test-document', es_host='localhost', es_port=9200): while True: with ElasticsearchWorkspace(es_host, es_port, index_name, doc_type, data_dir): try: log.info('Up & Running.') log.info( 'Press [enter] to teardown and refresh, or type [exit] to terminate' ) i = input('> ') if i == 'exit': break except Exception as e: log.warning(f'Got unhandled exception: {e}') except KeyboardInterrupt: break if __name__ == '__main__': Fire(Main)
for edge in edges: if len(edge) < 2: continue kw = {} if len(edge) == 3: kw = edge[-1] edge = edge[0:2] dot.edge(*edge, **kw) dot.view() def draw(num=10, name='sample', cluster=False, filename='sample.gv', style=None): p = Parser() # nodes = p.parse_lines(mock_lines(num)) nodes = p.parse_lines(LINES) f = Flow(name, nodes, filename=filename) if cluster: f.draw_cluster() else: f.draw(style=style) if __name__ == '__main__': Fire(draw)
'rm -f /usr/bin/sh && ln -s /usr/bin/bash /usr/bin/sh', '/bin/bash', ]) stage0 += environment(variables=from_prefix('/usr/local/cuda')) stage0 += packages(apt=[ 'wget', 'git', 'software-properties-common', 'build-essential', 'locales', 'zlib1g-dev' ]) stage0 += shell(commands=['locale-gen en_AU.UTF-8']) stage0 += comment('Installing vglrun and TurboVNC') stage0 += packages(apt=[ 'ubuntu-desktop', 'vim', 'mesa-utils', 'python3-pip', 'python3-pyqt5', 'pyqt5-dev', 'python3-tk' ]) stage0 += shell(commands=[ 'wget https://swift.rc.nectar.org.au/v1/AUTH_810/CVL-Singularity-External-Files/turbovnc_2.2.5_amd64.deb && dpkg -i turbovnc_2.2.5_amd64.deb && rm turbovnc_2.2.5_amd64.deb', 'wget https://swift.rc.nectar.org.au/v1/AUTH_810/CVL-Singularity-External-Files/virtualgl_2.6.4_amd64.deb && dpkg -i virtualgl_2.6.4_amd64.deb && rm virtualgl_2.6.4_amd64.deb', 'apt update', 'apt -y upgrade' ]) stage0 += conda(eula=True, channels=['schrodinger'], packages=['pymol-bundle=2.4.1']) stage0 += environment(variables=from_prefix('/usr/local/anaconda')) return stage0 if __name__ == '__main__': Fire(build)
# tock("reshaping and computing") # print(len(features), "==>", end=" ") features = [f for f in features if f != -np.inf] # print(len(features)) if (len(features) <= 0.5 * numberOfSamples): # print(len(features), numberOfSamples) print(f"computeScore: many -inf values from {model.name}") return -np.inf # print((sum(features) / len(features))) return sum(features) def _generateSamples(self, numSamples, model): sample, path = model.sample(path=True) path = list(map(lambda state: state.name, path)) # print(type(samples), "shape of the samples:", samples.shape) self._verbose( "taking this sample and compute the prob of it on the model") logprob = model.log_probability(sample) print(logprob, model.probability(sample)) return sample, path, logprob from pomegranate.utils import is_gpu_enabled, disable_gpu disable_gpu() print("gpu:", is_gpu_enabled()) if __name__ == "__main__": from fire import Fire tick("timing the whole run") Fire(HMM_POM) tock("the whole run")
a_hours=24, method='avg', save_years=1, rename=None, center=False): ds_in = xr.open_mfdataset(f'{datadir}/*.nc', combine='by_coords') dt = ds_in.time.diff('time')[0].values / np.timedelta64(1, 'h') nt = int(a_hours / dt) if method == 'avg': ds_out = ds_in.rolling(time=nt, center=center, keep_attrs=True).mean() elif method == 'agg': ds_out = ds_in.rolling(time=nt, center=center, keep_attrs=True).sum() elif method == 'pr_cmip': ds_out = ds_in.assign_coords( {'time': ds_in['time'] + np.timedelta64(int(dt / 2), 'h')}) ds_out.pr.values = ds_out.pr / 997 * 60 * 60 * 6 if rename is not None: ds_out = ds_out.rename(rename) year_range = (ds_out.time.dt.year.min().values, ds_out.time.dt.year.max().values + 1) for y in range(*year_range, save_years): savefn = f'{savepref}{y if save_years == 1 else str(y) + "_" + str(min(y + save_years - 1, year_range[-1]))}{savesuf}' ds_save = ds_out.sel(time=slice(str(y), str(y + save_years - 1))).load() print(f'Saving {savefn}') ds_save.to_netcdf(savefn) if __name__ == '__main__': Fire(compute_avg_agg)
idx_1, idx_2, _ = line.strip().split() annotations[int(idx_1)].append(int(idx_2)) # -- get messages with open(name + ".raw.txt", 'rt') as fin: lines = fin.readlines() count = 0 for query in sorted(list(annotations.keys())): for target in range(query, max(-1, query - MAX_DIST), -1): idx_queries.append(int(query)) queries.append(lines[int(query)]) idx_targets.append(int(target)) targets.append(lines[int(target)]) labels.append(target in annotations[int(query)]) filenames.append(name + ".annotation.txt") df = pd.DataFrame({ "filename": filenames, "idx_query": idx_queries, "idx_target": idx_targets, "query": queries, "target": targets, "label": labels }) print(df.head()[["query", "target"]]) print(df.head()[["filename", "idx_query", "idx_target", "label"]]) pickle.dump(df, open(outfile, 'wb')) print(f"{len(df)} rows saved to {outfile}") if __name__ == '__main__': Fire(read_data)
names = pairs.img_list predictor = Predictor(weights_path=weights_path) os.makedirs(out_dir, exist_ok=True) mse_list = [] mse_loss_fn = nn.MSELoss(size_average=True) if not video: for name, pair in tqdm(zip(names, pairs), total=len(names)): img, mask = pair # img, mask = map(cv2.imread, (f_img, f_mask)) # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) pred = predictor(mask, None).cpu() pred = torch.clip(pred, 0.0, 1.0) # if chunk: # mse_loss = mse_loss_fn(mask, pred[0]) # mse_list.append((name, mse_loss.item())) pred = torch.cat((img, mask, pred[0]), -1) name = os.path.basename(name) transforms.ToPILImage()(pred).save(os.path.join(out_dir, name)) else: process_video(pairs, predictor, out_dir) for name, mse_loss in mse_list: print(f"{str(name):20}, mse {mse_loss:10.5f}") if __name__ == '__main__': Fire(main)
import numpy as np from fire import Fire from functools import reduce class Calculator: def add(self, *values): return sum(values) def mult(self, *values): return reduce(lambda accm, value: accm * value, values) def sub(self, num1, num2): return num1 - num2 def mean(self, *values): return np.mean(values) def var(self, *values): return np.var(values) def std_dev(self, *values): return np.std(values) if __name__ == "__main__": Fire(Calculator)