def unlock(path, key_path): with lockfile.LockFile(path): with open(_lock_path(path)) as f: lock_obj = json.load(f) del lock_obj['users'][key_path] with open(_lock_path(path), 'w') as f: utils.json_dump(lock_obj, f)
def main(train_dir, dev_dir, test_dir): # input_dir = "data/train/*.muc" vocabs_dir = "embedding/vocabs.json" counter = Counter() # num_sens = 0 read_file(train_dir, counter, update_kb=True) read_file(dev_dir, counter) read_file(test_dir, counter) print(counter) # print("Num sent train: %s" % num_sens) print("longest sentence: %s" % str(counter.longest_sen)) print("longest word: %s" % counter.longest_word()) kb_words = {k: list(v) for k, v in counter.kb_words.items()} json_dump(kb_words, "embedding/kb_words.json") word2idx = construct_word_embeddings(counter.word_vocab) char2idx = construct_char_embeddings(counter.char_vocab) pos2idx = construct_pos_embeddings(counter.pos_tags) ner2idx = construct_ner_embeddings(counter.ner_tags) vocabs = ObjectDict(make_dict(word2idx, char2idx, ner2idx, pos2idx), max_sen_len=counter.max_sen_len, max_word_len=counter.max_word_len) vocabs.save(vocabs_dir)
def main(args_dict): # Extract configuration MK = args_dict['MK'] # Construct Robust Bayesian regression model sigma = 2 * np.ones(1) bounder = robustbayesregr.Bounder() splitter = robustbayesregr.Splitter() proposal = robustbayesregr.IsotropicGaussian(1, sigma) np.random.seed(0) x, y = robustbayesregr.generate_data(1000) target = robustbayesregr.CauchyRegression(x, y, sigma) # Obtain MK samples (and their corresponding MAP values) using A* sampling implementation samples = np.empty((MK)).squeeze() MAPs = [] for i in range(MK): stream = astar.astar_sampling_iterator(target, proposal, bounder, splitter) X, G = stream.next() samples[i] = X MAPs.append(G[0] - EULER) if i % 1 == 0: print_progress('Sampled %d / %d' % (i + 1, MK)) print('') lnZ = float(np.log(target.z())) # Dump true ln(Z) and MAP values to JSON file data = {'lnZ': lnZ, 'MAPs': MAPs} savepath = 'data/astar_rbr_MK%d.json' % (MK) json_dump(data, savepath, indent=None) print('Saved %d samples to %s' % (len(MAPs), savepath))
def read_word_embedding(replace=False): unknown_dir = "embedding/unknown.npy" vectors_dir = "embedding/vectors.npy" words_dir = "embedding/words.json" word_embeddings_dir = "embedding/word_embeddings.npy" word2idx_dir = "embedding/word2idx.json" print("read word embedding") if replace or not os.path.exists( word_embeddings_dir) or not os.path.exists(word2idx_dir): vectors = np.load(vectors_dir) unknown = np.load(unknown_dir) extension = utils.get_file_extension(words_dir)[1:] assert extension in ["json", "pl"] if extension == "json": words = utils.json_load(words_dir) else: words = utils.pkl_load(words_dir) word2idx = {"UNKNOWN": 1, **{w: i + 2 for i, w in enumerate(words)}} vectors = [ unknown, *list(vectors), ] np.save(word_embeddings_dir, vectors) utils.json_dump(word2idx, word2idx_dir) else: word2idx = utils.json_load(word2idx_dir) print("vocab: %d words" % (len(word2idx) - 1)) return word2idx
def mark_used(self, temp_ver, key_path): """ Adds or updates the user entry in the user access log for the given template version Args: temp_ver (TemplateVersion): template version to add the entry for key_path (str): Path to the prefix uuid file to set the mark for """ dest = self.get_path(temp_ver) with lockfile.LockFile(dest): with open('%s.users' % dest) as f: users = json.load(f) updated_users = {} for path, key in users['users'].items(): try: with open(path) as f: if key == f.read(): updated_users[path] = key except OSError: pass except IOError: pass with open(key_path) as f: updated_users[key_path] = f.read() users['users'] = updated_users users['last_access'] = int(time.time()) with open('%s.users' % dest, 'w') as f: utils.json_dump(users, f)
def multiShp2txt(cfg): output_dir = cfg.output_dir num_process = cfg.num_process global min_box_size min_box_size = cfg.min_box_size files = json_load(os.path.join(output_dir, 'filelist.json')) keys = list(files.keys()) manager = multiprocessing.Manager() convert_results = manager.dict() processes = [] for i in range(num_process): process_files = {} for key in keys[i::num_process]: process_files[key] = files[key] p = multiprocessing.Process(target=batShp2txt, args=(i, process_files, output_dir, convert_results)) p.start() processes.append(p) for p in processes: p.join() for k, v in convert_results.items(): files[k].update(v) json_dump(files, os.path.join(output_dir, 'filelist.json'))
def backup_releases(args, repo_cwd, repository, repos_template, include_assets=False): repository_fullname = repository['full_name'] # give release files somewhere to live & log intent release_cwd = os.path.join(repo_cwd, 'releases') log_info('Retrieving {0} releases'.format(repository_fullname)) mkdir_p(repo_cwd, release_cwd) query_args = {} release_template = '{0}/{1}/releases'.format(repos_template, repository_fullname) releases = retrieve_data(args, release_template, query_args=query_args) # for each release, store it log_info('Saving {0} releases to disk'.format(len(releases))) for release in releases: release_name = release['tag_name'] output_filepath = os.path.join(release_cwd, '{0}.json'.format(release_name)) with codecs.open(output_filepath, 'w+', encoding='utf-8') as f: json_dump(release, f) if include_assets: assets = retrieve_data(args, release['assets_url']) for asset in assets: download_file(asset['url'], os.path.join(release_cwd, asset['name']), get_auth(args))
def trylock(path, excl, key_path): with lockfile.LockFile(path): # Prune invalid users if os.path.exists(_lock_path(path)): with open(_lock_path(path)) as f: lock_obj = json.load(f) else: lock_obj = {'excl': False, 'users': {}} for other_key_path in lock_obj['users'].copy(): if not os.path.isfile(other_key_path): del lock_obj['users'][other_key_path] continue with open(other_key_path) as f: key = f.read() if key != lock_obj['users'][other_key_path]: del lock_obj['users'][other_key_path] if ( (excl and len(lock_obj['users']) != 0) or (not excl and lock_obj['excl'] and len(lock_obj['users']) != 0) ): success = False else: lock_obj['excl'] = excl with open(key_path) as f: lock_obj['users'][key_path] = f.read() success = True # Update lock object file with open(_lock_path(path), 'w') as f: utils.json_dump(lock_obj, f) return success
def main(train_dir, dev_dir, test_dir, lifelong_dir): print('Loading data...') x_valid, y_valid = load_data_and_labels(dev_dir) x_test, y_test = load_data_and_labels(test_dir) print(len(x_valid), 'valid sequences') print(len(x_test), 'test sequences') embeddings = np.load(EMBEDDING_PATH) vocabs = json.load(open(VOCAB_PATH, "r", encoding="utf8")) kb_words = json.load(open(KB_PATH, "r", encoding='utf8')) for k, v in kb_words.items(): print(k, len(v)) # Use pre-trained word embeddings m = wrapper.Sequence(max_epoch=20, batch_size=40, embeddings=embeddings, vocab_init=vocabs, log_dir="log") x_train, y_train = load_data_and_labels(train_dir) print(len(x_train), 'train sequences') m.train(x_train, kb_words, y_train, x_valid, y_valid) # lifelong for path in glob("%s/*.txt" % lifelong_dir): print("testing-lifelong on %s" % path) x = load_data_and_labels(path)[0] kb_words = m.tag(x, kb_words) m.eval(x_test, kb_words, y_test) json_dump(kb_words, "log/new_kb_words.json")
def backup_milestones(username, password, repo_cwd, repository, repos_template): milestone_cwd = os.path.join(repo_cwd, 'milestones') # if args.skip_existing and os.path.isdir(milestone_cwd): # return logger.info(f"Retrieving {repository['full_name']} milestones") mkdir_p(repo_cwd, milestone_cwd) template = f"{repos_template}/{repository['full_name']}/milestones" query_args = {'state': 'all'} _milestones = retrieve_data(username, password, template, query_args=query_args) milestones = {} for milestone in _milestones: milestones[milestone['number']] = milestone log_info('Saving {len(list(milestones.keys()))} milestones to disk') for number, milestone in list(milestones.items()): milestone_file = f'{milestone}/{number}.json' with codecs.open(milestone_file, 'w', encoding='utf-8') as f: json_dump(milestone, f) return
def walk(cfg): input_dir = cfg.input_dir output_dir = cfg.output_dir seed = cfg.seed percent = cfg.percent random.seed(seed) files = {} l1s = os.listdir(input_dir) for l1 in l1s: if os.path.isdir(os.path.join(input_dir, l1)): result = walk_and_check(os.path.join(input_dir, l1)) if result is None: continue if 'background' in l1: result['ann'] = [] result['background'] = True else: result['background'] = False files[os.path.join(input_dir, l1)] = result # 进行排序,防止遍历结果的影响 files = dict(sorted(files.items())) for i, f in enumerate(files.values()): f['id'] = i ids = [data['id'] for data in files.values() if not data['background']] val_ids = set(random.sample(ids, int(len(ids) * (1 - percent)))) for data in files.values(): if data['id'] in val_ids: data['split'] = 'val' else: data['split'] = 'train' json_dump(files, os.path.join(output_dir, 'filelist.json'))
def trylock(path, excl, key_path): with lockfile.LockFile(path): # Prune invalid users if os.path.exists(_lock_path(path)): with open(_lock_path(path)) as f: lock_obj = json.load(f) else: lock_obj = {'excl': False, 'users': {}} for other_key_path in lock_obj['users'].copy(): if not os.path.isfile(other_key_path): del lock_obj['users'][other_key_path] continue with open(other_key_path) as f: key = f.read() if key != lock_obj['users'][other_key_path]: del lock_obj['users'][other_key_path] if ((excl and len(lock_obj['users']) != 0) or (not excl and lock_obj['excl'] and len(lock_obj['users']) != 0)): success = False else: lock_obj['excl'] = excl with open(key_path) as f: lock_obj['users'][key_path] = f.read() success = True # Update lock object file with open(_lock_path(path), 'w') as f: utils.json_dump(lock_obj, f) return success
def _save_metadata(self): """ Write this prefix metadata to disk Returns: None """ with open(self.paths.metadata(), 'w') as metadata_fd: utils.json_dump(self._get_metadata(), metadata_fd)
def download(self, temp_ver, store_metadata=True): """ Retrieve the given template version Args: temp_ver (TemplateVersion): template version to retrieve store_metadata (bool): If set to ``False``, will not refresh the local metadata with the retrieved one Returns: None """ dest = self._prefixed(temp_ver.name) temp_dest = '%s.tmp' % dest with lockfile.LockFile(dest): # Image was downloaded while we were waiting if os.path.exists(dest): return temp_ver.download(temp_dest) if store_metadata: with open('%s.metadata' % dest, 'w') as f: utils.json_dump(temp_ver.get_metadata(), f) sha1 = hashlib.sha1() with open(temp_dest) as f: while True: chunk = f.read(65536) if not chunk: break sha1.update(chunk) if temp_ver.get_hash() != sha1.hexdigest(): raise RuntimeError( 'Image %s does not match the expected hash %s' % ( temp_ver.name, sha1.hexdigest(), ) ) with open('%s.hash' % dest, 'w') as f: f.write(sha1.hexdigest()) with log_utils.LogTask('Convert image', logger=LOGGER): utils.run_command( [ 'qemu-img', 'convert', '-O', 'raw', temp_dest, dest, ], ) os.unlink(temp_dest) self._init_users(temp_ver)
def _init_users(self, temp_ver): with open('%s.users' % self.get_path(temp_ver), 'w') as f: utils.json_dump( { 'users': {}, 'last_access': int(time.time()), }, f, )
def walk(cfg): input_dir = cfg.input_dir output_dir = cfg.output_dir sample_one_layer = cfg.sample_one_layer sample_two_layer = cfg.sample_two_layer background_one_layer = cfg.background_one_layer background_two_layer = cfg.background_two_layer site = cfg.site seed = cfg.seed percent = cfg.percent random.seed(seed) files = {} assert site in sample_one_layer or site in sample_two_layer l1s = os.listdir(input_dir) for l1 in l1s: if not os.path.isdir(os.path.join(input_dir, l1)): continue if 'background' == l1: if site in background_one_layer: walk_background = walk_data_one_layer elif site in background_two_layer: walk_background = walk_data_two_layer else: walk_background = walk_data_root result_files = walk_background(os.path.join(input_dir, l1), background=True) for result_file in result_files.values(): result_file['background'] = True files.update(result_files) else: if site in sample_one_layer: walk_data = walk_data_one_layer else: walk_data = walk_data_two_layer result_files = walk_data(os.path.join(input_dir, l1)) for result_file in result_files.values(): result_file['background'] = False files.update(result_files) files = dict(sorted(files.items())) for i, file in enumerate(files.values()): file['id'] = i ids = [file['id'] for file in files.values() if not file['background']] num_train = int(len(ids) * percent) train_id = random.sample(ids, num_train) val_id = [item for item in ids if item not in train_id] for file in files.values(): if file['id'] in val_id: file['split'] = 'val' else: file['split'] = 'train' json_dump(files, os.path.join(output_dir, 'filelist.json'))
def save(self): for net in self._nets.values(): net.save() for vm in self._vms.values(): vm.save() spec = { 'nets': self._nets.keys(), 'vms': self._vms.keys(), } with open(self.virt_path('env'), 'w') as f: utils.json_dump(spec, f)
def multi_divide(cfg): output_dir = cfg.output_dir num_process = cfg.num_process files = json_load(os.path.join(output_dir, 'filelist.json')) keys = list(files.keys()) process = [] for i in range(num_process): process_files = {} for key in keys[i::num_process]: process_files[key] = files[key] p = multiprocessing.Process(target=divide, args=(i, process_files, cfg)) p.start() process.append(p) for p in process: p.join() for split in ['train', 'val']: result = {} result['images'] = [] result['annotations'] = [] result['categories'] = [] result['categories'].append({ 'supercategory': 'none', 'id': 1, 'name': 'building' }) img_id = 0 ann_id = 0 for i in range(num_process): with open(os.path.join(output_dir, split, '%d.pkl' % i), 'rb') as f: datas = pickle.load(f) for data in datas: data['image']['id'] = img_id result['images'].append(data['image']) for ann in data['annotations']: ann['image_id'] = img_id ann['id'] = ann_id result['annotations'].append(ann) ann_id += 1 img_id += 1 os.remove(os.path.join(output_dir, split, '%d.pkl' % i)) json_dump(result, os.path.join(output_dir, split, f'{split}.json'))
def save(self): with LogTask("Save nets"): for net in self._nets.values(): net.save() with LogTask("Save VMs"): for vm in self._vms.values(): vm.save() spec = {"nets": self._nets.keys(), "vms": self._vms.keys()} with LogTask("Save env"): with open(self.virt_path("env"), "w") as f: utils.json_dump(spec, f)
def _take_lease(path, uuid_path): """ Persist to the given leases path the prefix uuid that's in the uuid path passed Args: path (str): Path to the leases file uuid_path (str): Path to the prefix uuid Returns: None """ with open(uuid_path) as f: uuid = f.read() with open(path, 'w') as f: utils.json_dump((uuid_path, uuid), f)
def main(args_dict): np.random.seed(0) # Extract parameters N = args_dict['N'] D = args_dict['D'] # Generate the private data X_private = generate_data_mixture_of_Gaussians(N, D) # Save the generated data dataset = 'mixture_of_Gaussians' timestamp = datetime.now().strftime('%Y_%m_%d_%H_%M_%S') path_save = '../data/%s_N%d_D%d' % (dataset, N, D) np.savez(path_save + '.npz', X_private=X_private) json_dump({'dataset': dataset, 'N': N, 'D': D}, path_save + '.json')
def can_close(self): # Let everyone know we are leaving... if hasattr(self, '_bounce_window') and \ self._bounce_window.we_are_sharing(): self._playing = False self.send_event('l', {"data": (json_dump([self.nick]))}) return True
def can_close(self): # Let everyone know we are leaving... if hasattr(self, '_bounce_window') and \ self._bounce_window.we_are_sharing(): self._playing = False self.send_event('l|%s' % (json_dump([self.nick]))) return True
def do_1_times(): # 默认你钱较多 大概几十万(误) # 挖宝 if utils.get_time(2) % 10 == 0: # 每十天一次挖50次 不够就买,钱不够就能买多少买多少 tail.do_dig_trea(level=User.level, plan={1: [('common1', 5), ('common2', 5), ('high1', 0), ('high2', 0)], 2: [('common1', 5), ('common2', 5), ('high1', 0), ('high2', 0)], 3: [('common1', 5), ('common2', 5), ('high1', 0), ('high2', 0)], 4: [('common1', 10), ('common2', 10), ('high1', 0), ('high2', 0)], 5: [('common1', 0), ('common2', 0), ('high1', 0), ('high2', 0)]}, is_buy=True, my_coin=User.gold_coin) # false则不需要输入金钱 else: tail.do_dig_trea(level=40, plan={1: [('common1', 0), ('common2', 0), ('high1', 0), ('high2', 0)], 2: [('common1', 0), ('common2', 0), ('high1', 0), ('high2', 0)], 3: [('common1', 0), ('common2', 0), ('high1', 0), ('high2', 0)], 4: [('common1', 1), ('common2', 1), ('high1', 0), ('high2', 0)], 5: [('common1', 0), ('common2', 0), ('high1', 0), ('high2', 0)]}, is_buy=True, my_coin=User.gold_coin) # 每天任务 #聊天 tail.speak(data={'msg': '233'}) #种植方面 if os.path.isfile('plan.json'): plan=utils.json_load('plan,json') else: plan=get_plan_of_plant() #一键还未实现 middle.do_farm_work(middle.get_land_data(),plan=plan) if len(plan)==1: #可传入购买计划和金钱 plan=get_plan_of_plant() #签到 head.sign(is_print=True) head.sign_total_situation(is_print=True) #活动 #activity_3_times寻宝次数,my_coin我拥有的钱,活动3——寻宝需要,people_list表白的人列表,可以传入好友列表,默认是我的小号,记得修改,confession_statement_list表白语句列表,有默认,is_run_list针对4个活动的运行否,默认寻宝不执行,可以短,缺少的都是不执行 #和集市有关的活动在一直运行里可以多次执行或者一天结束前执行,内部未实现 head.do_activity(activity_3_times=50,my_coin=User.gold_coin,people_list=None,confession_statement_list=None,is_run_list=None) #集市 head.do_sun_market() head.add_order()#多次运行时,需要执行一次 #做阳光事务 head.do_sun_affair() #领取和开启 head.receive_and_turnOn() #保存数据 utils.json_dump('plan.json',data=plan) pass
def _new_joiner(self, payload): ''' Someone has joined; sharer adds them to the buddy list. ''' [nick, colors] = json_load(payload) self.status.set_label(nick + ' ' + _('has joined.')) self._append_player(nick, colors) if self.initiating: payload = json_dump([self._game.buddies, self._player_colors]) self.send_event('b|%s' % (payload))
def _backup_data(username, password, name, template, output_file, output_directory, overwrite=True): # skip_existing = args.skip_existing if overwrite: logger.info(f'Retrieving {username} {name}') mkdir_p(output_directory) data = retrieve_data(username, password, template) logger.info(f'Writing {len(data)} {name} to disk') with codecs.open(output_file, 'w', encoding='utf-8') as f: json_dump(data, f)
def download(self, temp_ver, store_metadata=True): dest = self._prefixed(temp_ver.name) temp_dest = '%s.tmp' % dest with lockfile.LockFile(dest): # Image was downloaded while we were waiting if os.path.exists(dest): return temp_ver.download(temp_dest) if store_metadata: with open('%s.metadata' % dest, 'w') as f: utils.json_dump(temp_ver.get_metadata(), f) sha1 = hashlib.sha1() with open(temp_dest) as f: while True: chunk = f.read(65536) if not chunk: break sha1.update(chunk) if temp_ver.get_hash() != sha1.hexdigest(): raise RuntimeError( 'Image %s does not match the expected hash %s' % ( temp_ver.name, sha1.hexdigest(), ) ) with open('%s.hash' % dest, 'w') as f: f.write(sha1.hexdigest()) utils.run_command( [ 'qemu-img', 'convert', '-O', 'raw', temp_dest, dest, ], ) os.unlink(temp_dest) self._init_users(temp_ver)
def trylock(path, excl, key_path): """ Tries once to get a lock to the given dir Args: path(str): path to the directory to lock excl(bool): If the lock should be exclusive key_path(str): path to the file that contains the uid to use when locking Returns: bool: True if it did get a lock, False otherwise """ with lockfile.LockFile(path): # Prune invalid users if os.path.exists(_lock_path(path)): with open(_lock_path(path)) as f: lock_obj = json.load(f) else: lock_obj = {'excl': False, 'users': {}} for other_key_path in lock_obj['users'].copy(): if not os.path.isfile(other_key_path): del lock_obj['users'][other_key_path] continue with open(other_key_path) as f: key = f.read() if key != lock_obj['users'][other_key_path]: del lock_obj['users'][other_key_path] if ( (excl and len(lock_obj['users']) != 0) or (not excl and lock_obj['excl'] and len(lock_obj['users']) != 0) ): success = False else: lock_obj['excl'] = excl with open(key_path) as f: lock_obj['users'][key_path] = f.read() success = True # Update lock object file with open(_lock_path(path), 'w') as f: utils.json_dump(lock_obj, f) return success
def serialize(self): ''' Serialize the grid for passing to share and saving ''' grid = [] for i in range(ROW * COL): if self.grid[i] is not None: grid.append([self.grid[i].number, self.grid[i].orientation]) else: grid.append([None, None]) return json_dump(grid)
def unlock(path, key_path): """ Removes the lock of the uid in the given key file Args: path(str): Path of the directory to lock key_path(str): path to the file that contains the uid to remove the lock of Returns: None """ with lockfile.LockFile(path): with open(_lock_path(path)) as f: lock_obj = json.load(f) del lock_obj['users'][key_path] with open(_lock_path(path), 'w') as f: utils.json_dump(lock_obj, f)
def _init_users(self, temp_ver): """ Initializes the user access registry Args: temp_ver (TemplateVersion): template version to update registry for Returns: None """ with open('%s.users' % self.get_path(temp_ver), 'w') as f: utils.json_dump( { 'users': {}, 'last_access': int(time.time()), }, f, )
def download(self, temp_ver, store_metadata=True): dest = self._prefixed(temp_ver.name) temp_dest = '%s.tmp' % dest with lockfile.LockFile(dest): # Image was downloaded while we were waiting if os.path.exists(dest): return temp_ver.download(temp_dest) if store_metadata: with open('%s.metadata' % dest, 'w') as f: utils.json_dump(temp_ver.get_metadata(), f) sha1 = hashlib.sha1() with open(temp_dest) as f: while True: chunk = f.read(65536) if not chunk: break sha1.update(chunk) if temp_ver.get_hash() != sha1.hexdigest(): raise RuntimeError( 'Image %s does not match the expected hash %s' % ( temp_ver.name, sha1.hexdigest(), )) with open('%s.hash' % dest, 'w') as f: f.write(sha1.hexdigest()) utils.run_command([ 'qemu-img', 'convert', '-O', 'raw', temp_dest, dest, ], ) os.unlink(temp_dest) self._init_users(temp_ver)
def _buddy_left(self, payload): [nick] = json_load(payload) self._label.set_label(nick + ' ' + _('has left.')) if self.initiating: self._remove_player(nick) payload = json_dump( [self._bounce_window.buddies, self._player_colors]) self.send_event('b|%s' % (payload)) # Restart from sharer's turn self._bounce_window.its_my_turn()
def main(): train_dir = "origin_data/-Doi_song_train.muc" dev_dir = "origin_data/Doi_song_dev.muc" test_dir = "origin_data/Doi_song_test.muc" counter = Counter() print("read train file") sentences_train = read_file(train_dir, counter) print("read dev file") sentences_dev = read_file(dev_dir, counter) print("read test file") sentences_test = read_file(test_dir, counter) print(counter) print("longest sentence: %s" % str(counter.longest_sen)) print("longest word: %s" % counter.longest_word()) word2idx = read_word_embedding() char2idx = tags2idx(counter.char_vocab) pos2idx = tags2idx(counter.pos_tags) chunk2idx = tags2idx(counter.chunk_tags) ner2idx = tags2idx(counter.ner_tags) utils.json_dump(char2idx, "embedding/char2idx.json") utils.json_dump(pos2idx, "embedding/pos2idx.json") utils.json_dump(chunk2idx, "embedding/chunk2idx.json") utils.json_dump(ner2idx, "embedding/ner2idx.json") print("encoding data") encoder = { "max_sen_len": counter.max_sen_len, "max_word_len": counter.max_word_len, **utils.make_dict(word2idx, char2idx, pos2idx, chunk2idx, ner2idx) } encoder = utils.dict_to_object(encoder) chars_train, words_train, pos_train, chunk_train, ner_train = encode_sens( sentences_train, encoder) chars_dev, words_dev, pos_dev, chunk_dev, ner_dev = encode_sens( sentences_dev, encoder) chars_test, words_test, pos_test, chunk_test, ner_test = encode_sens( sentences_test, encoder) print("saving data") data = utils.make_dict(chars_train, words_train, pos_train, chunk_train, ner_train, chars_dev, words_dev, pos_dev, chunk_dev, ner_dev, chars_test, words_test, pos_test, chunk_test, ner_test) os.makedirs("data", exist_ok=True) for k, d in data.items(): np.save("data/%s.npy" % k, d)
def _buddy_left(self, payload): [nick] = json_load(payload) self._label.set_label(nick + ' ' + _('has left.')) if self.initiating: self._remove_player(nick) payload = json_dump([self._bounce_window.buddies, self._player_colors]) self.send_event('b', {"data": payload}) # Restart from sharer's turn self._bounce_window.its_my_turn()
def trylock(path, excl, key_path): """ Tries once to get a lock to the given dir Args: path(str): path to the directory to lock excl(bool): If the lock should be exclusive key_path(str): path to the file that contains the uid to use when locking Returns: bool: True if it did get a lock, False otherwise """ with lockfile.LockFile(path): # Prune invalid users if os.path.exists(_lock_path(path)): with open(_lock_path(path)) as f: lock_obj = json.load(f) else: lock_obj = {'excl': False, 'users': {}} for other_key_path in lock_obj['users'].copy(): if not os.path.isfile(other_key_path): del lock_obj['users'][other_key_path] continue with open(other_key_path) as f: key = f.read() if key != lock_obj['users'][other_key_path]: del lock_obj['users'][other_key_path] if ((excl and len(lock_obj['users']) != 0) or (not excl and lock_obj['excl'] and len(lock_obj['users']) != 0)): success = False else: lock_obj['excl'] = excl with open(key_path) as f: lock_obj['users'][key_path] = f.read() success = True # Update lock object file with open(_lock_path(path), 'w') as f: utils.json_dump(lock_obj, f) return success
def _new_joiner(self, payload): ''' Someone has joined; sharer adds them to the buddy list. ''' [nick, colors] = json_load(payload) self._label.set_label(nick + ' ' + _('has joined.')) if self.initiating: self._append_player(nick, colors) payload = json_dump([self._bounce_window.buddies, self._player_colors]) self.send_event('b', {"data": payload}) if self._bounce_window.count == 0: # Haven't started yet... self._bounce_window.its_my_turn()
def serialize(self, buddy=None): ''' Serialize the hand for passing to share and saving ''' if buddy == None: hand = [] else: hand = [buddy] for i in range(COL): if self.hand[i] is not None: hand.append(self.hand[i].number) else: hand.append(None) return json_dump(hand)
def mark_used(self, temp_ver, key_path): dest = self.get_path(temp_ver) with lockfile.LockFile(dest): with open('%s.users' % dest) as f: users = json.load(f) updated_users = {} for path, key in users['users'].items(): try: with open(path) as f: if key == f.read(): updated_users[path] = key except OSError: pass except IOError: pass with open(key_path) as f: updated_users[key_path] = f.read() users['users'] = updated_users users['last_access'] = int(time.time()) with open('%s.users' % dest, 'w') as f: utils.json_dump(users, f)
def _new_tube_cb(self, id, initiator, type, service, params, state): """ Create a new tube. """ print('New tube: ID=%d initator=%d type=%d service=%s params=%r \ state=%d' % (id, initiator, type, service, params, state)) if (type == telepathy.TUBE_TYPE_DBUS and service == SERVICE): if state == telepathy.TUBE_STATE_LOCAL_PENDING: self.tubes_chan[ \ telepathy.CHANNEL_TYPE_TUBES].AcceptDBusTube(id) self.collab = CollabWrapper(self) self.collab.message.connect(self.event_received_cb) self.collab.setup() # Let the sharer know joiner is waiting for a hand. if self.waiting_for_hand: self.send_event("j", json_dump([self.nick, self.colors]))
def took_my_turn(self): # Did I complete my turn without any errors? if self._there_are_errors: self._set_label(_('There are errors—it is still your turn.')) return # After the tile is placed, expand regions of playable grid squares. self.show_connected_tiles() # Are there any completed paths? self._test_for_complete_paths(self._last_grid_played) # If so, let everyone know what piece I moved. if self.we_are_sharing(): self._activity.send_event('p|%s' % \ (json_dump([self._last_tile_played, self._last_tile_orientation, self._last_grid_played]))) self._last_tile_orientation = 0 # Reset orientation. # I took my turn, so I am waiting again. self._waiting_for_my_turn = True if self.last_spr_moved is not None: self.last_spr_moved.set_layer(TILES) self.last_spr_moved = None self._hide_highlight() self._set_label(_('You took your turn.')) if self.playing_with_robot: self.its_their_turn(_('robot')) self._waiting_for_robot = True gobject.timeout_add(1000, self._robot_turn) elif not self.we_are_sharing(): if self.deck.empty() and \ self.hands[self._my_hand].tiles_in_hand() == 0: self.game_over() else: self.its_my_turn() elif self._initiating(): self.whos_turn += 1 if self.whos_turn == len(self.buddies): self.whos_turn = 0 else: self.its_their_turn(self.buddies[self.whos_turn]) self._activity.send_event('t|%s' % ( self.buddies[self.whos_turn]))
def _new_tube_cb(self, id, initiator, type, service, params, state): ''' Create a new tube. ''' _logger.debug( 'Newtube: ID=%d initator=%d type=%d service=%s params=%r state=%d', id, initiator, type, service, params, state) if (type == telepathy.TUBE_TYPE_DBUS and service == SERVICE): if state == telepathy.TUBE_STATE_LOCAL_PENDING: self.tubes_chan[ \ telepathy.CHANNEL_TYPE_TUBES].AcceptDBusTube(id) self.collab = CollabWrapper(self) self.collab.message.connect(self.event_received_cb) self.collab.setup() # Let the sharer know a new joiner has arrived. if self.waiting_for_fraction: self.send_event('j', {"data": (json_dump([self.nick, self._colors]))})
def _new_tube_cb(self, id, initiator, type, service, params, state): """ Create a new tube. """ print('New tube: ID=%d initator=%d type=%d service=%s params=%r \ state=%d' % (id, initiator, type, service, params, state)) if (type == telepathy.TUBE_TYPE_DBUS and service == SERVICE): if state == telepathy.TUBE_STATE_LOCAL_PENDING: self.tubes_chan[ \ telepathy.CHANNEL_TYPE_TUBES].AcceptDBusTube(id) tube_conn = TubeConnection(self.conn, self.tubes_chan[telepathy.CHANNEL_TYPE_TUBES], id, \ group_iface=self.text_chan[telepathy.CHANNEL_INTERFACE_GROUP]) self.chattube = ChatTube(tube_conn, self.initiating, \ self.event_received_cb) # Let the sharer know joiner is waiting for a hand. if self.waiting_for_hand: self.send_event('j|%s' % (json_dump([self.nick, self.colors])))
def api_error(error): response.content_type = "application/json" response.status = 500 return json_dump({'error': error})
def save(self, path=None): if path is None: path = self._env.virt_path('vm-%s' % self.name()) with open(path, 'w') as f: utils.json_dump(self._spec, f)
def make_json_response(obj): response.content_type = "application/json" return json_dump(obj)
def serialize(self): ''' Serialize the deck for passing to share and saving ''' order = [] for i in range(ROW * COL): order.append(self.tiles[i].number) return json_dump(order)
def send_new_game(self): ''' Send a new grid to all players ''' self.send_event('n|%s' % (json_dump(self._game.save_game())))
def send_dot_click(self, dot): ''' Send a dot click to all the players ''' self.send_event('p|%s' % (json_dump(dot)))