def multiShp2txt(cfg): output_dir = cfg.output_dir num_process = cfg.num_process global min_box_size min_box_size = cfg.min_box_size files = json_load(os.path.join(output_dir, 'filelist.json')) keys = list(files.keys()) manager = multiprocessing.Manager() convert_results = manager.dict() processes = [] for i in range(num_process): process_files = {} for key in keys[i::num_process]: process_files[key] = files[key] p = multiprocessing.Process(target=batShp2txt, args=(i, process_files, output_dir, convert_results)) p.start() processes.append(p) for p in processes: p.join() for k, v in convert_results.items(): files[k].update(v) json_dump(files, os.path.join(output_dir, 'filelist.json'))
def add(self, args): #check args update = 1 count = 0 data = {} group_list = [] if os.path.exists(self.json_file): data = json_load(self.json_file) if len(data) != 0: group_list = [(k, data[k]) for k in sorted(data.keys())] gname, v = group_list[len(group_list) - 1] count = filter(str.isdigit, json.dumps(gname)) if args: gname = "group" + str(int(count) + 1) data[gname] = {} group = data[gname] #generate vipconf json group['nodes'] = args.host group['type'] = args.type group['vip'] = args.vip else: update = 0 #data = sorted(data.items(), key=lambda d: d[0]) #modify vipconf about vip or nodes if update: json_store(data, self.config.vip_conf) dmsg('add vipconf sucessfully') else: dwarn('vipconf was updated.')
def _sending_hand(self, payload): ''' Sharer sends a hand. ''' hand = json_load(payload) nick = hand[0] if nick == self.nick: self._game.hands[self._game.buddies.index(nick)].restore( payload, self._game.deck, buddy=True)
def run_cv(qs_path, save_paths, filter_=training_q, k=5, f=.8, seed=0): # fmt_save_paths = {name: path.format(f=f, s=seed) for name, path in save_paths.items()} print("Loading questions...") qs = [q for q in json_load(qs_path) if filter_(q)] print("Done!\n") print("Generating splits...") n_samples = len(qs) train_idx, test_idx = generate_cv_splits(k, f, n_samples, seed) #, save_paths) print("Done!\n") print("Cross-validating...") for (i, (train_set, test_set)) in enumerate(zip(train_idx, test_idx)): print(f"Fold {i+1} of {k}...") curr_paths = { name: path.format(f=f, k=i, s=seed) for name, path in save_paths.items() } print(" Fitting model...") qs_train = [qs[idx] for idx in train_set] qs_test = [qs[idx] for idx in test_set] model = fit(qs_train, save_paths=curr_paths) print(" Predicting...") preds = [predict(model, q) for q in qs_test] print(" Scoring...") scores = [score(q, pred) for q, pred in zip(qs_test, preds)] q1, q2, q3 = pd.Series(scores).quantile([.25, .5, .75]) print(f" Scores [median, IQR]: {q2} [{q1}, {q3}]") np.save(curr_paths['scores'], scores)
def _play_a_piece(self, payload): ''' When a piece is played, everyone should move it into position. ''' tile_number, orientation, grid_position = json_load(payload) for i in range(ROW * COL): # find the tile with this number if self._game.deck.tiles[i].number == tile_number: tile_to_move = i break self._game.grid.add_tile_to_grid(tile_to_move, orientation, grid_position, self._game.deck) self._game.show_connected_tiles() if self.initiating: # First, remove the piece from whatever hand it was played. for i in range(COL): if self._game.hands[self._game.whos_turn].hand[i] is not None \ and \ self._game.hands[self._game.whos_turn].hand[i].number == \ tile_number: self._game.hands[self._game.whos_turn].hand[i] = None break # Then let the next player know it is their turn. self._game.whos_turn += 1 if self._game.whos_turn == len(self._game.buddies): self._game.whos_turn = 0 self.status.set_label(self.nick + ': ' + _('take a turn.')) self._take_a_turn(self._game.buddies[self._game.whos_turn]) self.send_event('t|%s' % ( self._game.buddies[self._game.whos_turn]))
def get_source_data_info(): """ Returns a list of data sources, together with information about the latest update (timestamp, list of table names and columns) """ # Establish connection to the database db = DatabaseConnection(path_config='db_config_data.yaml') # Iterate through sources listed in sources.json sources = json_load('../data/sources.json') result = [] for source in sources: # Obtain schema with the last update try: schema = db.get_latest_schema('source_' + source['name']) except Exception as exception: print('[WARNING] %s' % (exception)) continue # Store information to be returned result.append({ 'description': source['description'], 'name': source['name'], 'schema': schema, 'tables': _get_tables_and_columns_in_schema(db, schema), 'update': _datetimestr_from_schema(schema), }) # Close database connection and return the result db.close() return result
def vip_confdel_group(args): data = {} config = Config() json_file = config.vip_conf if os.path.exists(json_file): data = json_load(json_file) if len(data) == 0: dmsg('vipconf was null') return if not data.has_key(args.group): derror("vipconfdel: %s not exist." % (args.group)) sys.exit(2) else: del_vips = data[args.group]["vips"] mask = data[args.group]["mask"] hosts = data[args.group]["nodes"].split(",") del data[args.group] json_store(data, json_file) for host in hosts: try: _del_vip_from_host(config, host, del_vips, mask) except Exp, e: print e
def repo_build(f, t): try: f_repo_fn = os.path.join(f, 'repo.js') repo_js = utils.json_load(f_repo_fn) except FileNotFoundError: print('No repo.js found in the source directory. ' 'Creating a new repository.') repo_js = {} enter_missing(repo_js, 'id', 'Enter a repository ID: ') enter_missing(repo_js, 'title', 'Enter a nice repository title: ') enter_missing(repo_js, 'contact', 'Enter a contact e-mail address: ') utils.json_store('repo.js', repo_js, dirs=[f, t]) while 'servers' not in repo_js or not repo_js['servers'][0].strip(): repo_js['servers'] = [ input('Enter the public URL of your repository ' '(the path that contains repo.js): ') ] repo_js['patches'] = {} ignored = set(IGNORED_BY_DEFAULT).union(thcrap_ignore_get(f)) for root, dirs, files in os.walk(f): del (dirs) if 'patch.js' in files: patch_id = os.path.basename(root) repo_js['patches'][patch_id] = patch_build(patch_id, repo_js['servers'], f, t, ignored) print('Done.') utils.json_store('repo.js', repo_js, dirs=[f, t])
def load_libdai_results(topology, n, A, B, K, f, c, potentials_type, M, MAP_solver): """ Load libdai output in JSON format for given model and inference parameters """ filename = get_filename(topology, n, A, B, K, f, c, potentials_type, M, MAP_solver) data_json = json_load(filename + '.json') return data_json
def repo_build(f, t): try: f_repo_fn = os.path.join(f, 'repo.js') repo_js = utils.json_load(f_repo_fn) except FileNotFoundError: print( 'No repo.js found in the source directory. ' 'Creating a new repository.' ) repo_js = {} enter_missing(repo_js, 'id', 'Enter a repository ID: ') enter_missing(repo_js, 'title', 'Enter a nice repository title: ') enter_missing(repo_js, 'contact', 'Enter a contact e-mail address: ') utils.json_store('repo.js', repo_js, dirs=[f, t]) while not 'servers' in repo_js or not repo_js['servers'][0].strip(): repo_js['servers'] = [input( 'Enter the public URL of your repository ' '(the path that contains repo.js): ' )] repo_js['patches'] = {} for root, dirs, files in os.walk(f): del(dirs) if 'patch.js' in files: patch_id = os.path.basename(root) repo_js['patches'][patch_id] = patch_build( patch_id, repo_js['servers'], f, t ) print('Done.') utils.json_store('repo.js', repo_js, dirs=[f, t])
def load_feature_groups(): '''Returns a map where keys are feature groups and values are features.''' result = {} feature_groups_path = os.path.realpath( os.path.join(os.path.dirname(__file__), '../../lib/featureGroups.json')) result[feature_groups_path] = utils.json_load(feature_groups_path) fb_feature_groups_path = os.path.realpath( os.path.join(os.path.dirname(__file__), '../../lib/fb-featureGroups.json')) try: result[fb_feature_groups_path] = utils.json_load(fb_feature_groups_path) except IOError: pass return result
def _load(self, data): ''' Load game data from the journal. ''' piece, pixbuf_data = json_load(data) pixbuf = base64_to_pixbuf(activity, pixbuf_data, width=self._gnuchess.scale, height=self._gnuchess.scale) return piece, pixbuf
def restore(self, deck_as_text): ''' Restore the deck upon resume. ''' deck = [] order = json_load(deck_as_text) for i in order: # deck.append(self.tiles[order[i]]) deck.append(self.tiles[i]) self.tiles = deck[:]
def _new_joiner(self, payload): ''' Someone has joined; sharer adds them to the buddy list. ''' [nick, colors] = json_load(payload) self.status.set_label(nick + ' ' + _('has joined.')) self._append_player(nick, colors) if self.initiating: payload = json_dump([self._game.buddies, self._player_colors]) self.send_event('b|%s' % (payload))
def load_feature_groups(): """Returns a map where keys are feature groups and values are features.""" result = {} feature_groups_path = os.path.realpath( os.path.join(os.path.dirname(__file__), "../../lib/featureGroups.json") ) result[feature_groups_path] = utils.json_load(feature_groups_path) fb_feature_groups_path = os.path.realpath( os.path.join(os.path.dirname(__file__), "../../lib/fb-featureGroups.json") ) try: result[fb_feature_groups_path] = utils.json_load(fb_feature_groups_path) except IOError: pass return result
def _buddy_left(self, payload): [nick] = json_load(payload) self._label.set_label(nick + ' ' + _('has left.')) if self.initiating: self._remove_player(nick) payload = json_dump([self._bounce_window.buddies, self._player_colors]) self.send_event('b', {"data": payload}) # Restart from sharer's turn self._bounce_window.its_my_turn()
def _buddy_left(self, payload): [nick] = json_load(payload) self._label.set_label(nick + ' ' + _('has left.')) if self.initiating: self._remove_player(nick) payload = json_dump( [self._bounce_window.buddies, self._player_colors]) self.send_event('b|%s' % (payload)) # Restart from sharer's turn self._bounce_window.its_my_turn()
def _new_joiner(self, payload): ''' Someone has joined; sharer adds them to the buddy list. ''' [nick, colors] = json_load(payload) self._label.set_label(nick + ' ' + _('has joined.')) if self.initiating: self._append_player(nick, colors) payload = json_dump([self._bounce_window.buddies, self._player_colors]) self.send_event('b', {"data": payload}) if self._bounce_window.count == 0: # Haven't started yet... self._bounce_window.its_my_turn()
def list(self): if os.path.exists(self.json_file): data = json_load(self.json_file) if len(data) != 0: group_list = [(k, data[k]) for k in sorted(data.keys())] for (k, v) in group_list: print("%s:" % k) for (key, val) in v.items(): print("\t%s:%s" % (key, val))
def _buddy_list(self, payload): '''Sharer sent the updated buddy list, so regenerate internal lists''' if not self.initiating: [buddies, colors] = json_load(payload) self._bounce_window.buddies = buddies[:] self._player_colors = colors[:] self._player_pixbufs = [] for colors in self._player_colors: self._player_pixbufs.append(svg_str_to_pixbuf( generate_xo_svg(scale=0.8, colors=[str(colors[0]), str(colors[1])])))
def _new_joiner(self, payload): ''' Someone has joined; sharer adds them to the buddy list. ''' [nick, colors] = json_load(payload) self._label.set_label(nick + ' ' + _('has joined.')) if self.initiating: self._append_player(nick, colors) payload = json_dump( [self._bounce_window.buddies, self._player_colors]) self.send_event('b|%s' % (payload)) if self._bounce_window.count == 0: # Haven't started yet... self._bounce_window.its_my_turn()
def get_annotated_dataset(name, n=None): '''load data from pickle Args: name: str = name of the file e.g. example.pkl Returns: ''' tagged = utils.json_load(config.preprocessedfolder + '/' + name) # import process_data as proc # Pdata = proc.load_data(name) tagged = tagged[:n] return tagged
def load_results(path_load): j = json_load(path_load) # TEMP #return j['results'] count = j['count'] assert j['next'] is None results = j['results'] assert len(results) == count print('[OK] Loaded %d results from %s' % (count, path_load)) return results
def main(args_dict): # Extract configuration from command line arguments MK = np.array(args_dict['MK']) M = 100 K = MK / M print('M = %d; K = %d' % (M, K)) x_type = args_dict['x_type'] deltas = args_dict['deltas'] do_confidence = args_dict['confidence'] # Load data from JSON files generated by (non-public) Matlab code jsons = [ json_load('data/bandits_normal_delta%s_MK%d.json' % (delta, MK)) for delta in deltas ] lnZs = np.array([json['lnZ'] for json in jsons]) MAPs = np.array([json['MAPs_ttest'] for json in jsons]) # Estimate estimator MSEs for the various tricks (as specified by alphas) alphas = np.linspace(-0.2, 1.5, 100) MSEs, MSEs_stdev = MAPs_to_estimator_MSE_vs_alpha(1, MAPs, lnZs, alphas, K) # Set up plot matplotlib_configure_as_notebook() fig, ax = plt.subplots(1, 1, facecolor='w', figsize=(4.25, 3.25)) ax.set_xlabel('trick parameter $\\alpha$') ax.set_ylabel('MSE of estimator of $\ln Z$') # Plot the MSEs labels = ['$\\delta = %g$' % (delta) for delta in deltas] colors = [ plt.cm.plasma((np.log10(delta) - (-3)) / (0 - (-3))) for delta in deltas ] plot_MSEs_to_axis(ax, alphas, MSEs, MSEs_stdev, do_confidence, labels, colors) # Finalize plot for vertical in [0.0, 1.0]: ax.axvline(vertical, color='black', linestyle='dashed', alpha=.7) ax.annotate('Gumbel trick', xy=(0.0, 0.0052), rotation=90, horizontalalignment='right', verticalalignment='bottom') ax.annotate('Exponential trick', xy=(1.0, 0.0052), rotation=90, horizontalalignment='right', verticalalignment='bottom') lgd = ax.legend(loc='upper center') ax.set_ylim((5 * 1e-3, 5 * 1e-2)) save_plot(fig, 'figures/fig3b', bbox_extra_artists=(lgd, ))
def get_forms(self, keyword): stemmer = SnowballStemmer("english") transform_mapping = json_load(os.path.join(self.model_path, 'cache'), 'mapping.json') bow = {} for word in transform_mapping: bow[word] = list(transform_mapping[word]) bow[word].append(stemmer.stem(word)) return bow[keyword]
def vip_add(args): #check args if not is_valid_args(args): derror('please check args') sys.exit(1) update = 1 count = 0 data = {} group_list = [] config = Config() json_file = config.vip_conf if os.path.exists(json_file): data = json_load(json_file) args.host = list_distinct(args.host) args.vip = list_distinct(args.vip) if len(data) != 0: group_list = [(k, data[k]) for k in sorted(data.keys())] for k,v in group_list: if v['nodes'] == args.host and v['vips'] == args.vip \ and v['type'] == args.type and v['mask'] == args.mask: update = 0 break gname, v = group_list[len(group_list) - 1] count = filter(str.isdigit, json.dumps(gname)) if args: gname = "group" + str(int(count)+1) data[gname] = {} group = data[gname] #generate vipconf json group['nodes'] = args.host group['type'] = args.type group['vips'] = args.vip group['mask'] = args.mask else: update = 0 #data = sorted(data.items(), key=lambda d: d[0]) #modify vipconf about vip or nodes if update: json_store(data, json_file) dmsg('add vipconf sucessfully') else: dwarn('vipconf was updated.')
def restore(self, grid_as_text, deck): ''' Restore tiles to grid upon resume or share. ''' self.hide() grid = json_load(grid_as_text) for i in range(ROW * COL): if grid[i][0] is None: self.grid[i] = None else: for k in range(ROW * COL): if deck.tiles[k].number == grid[i][0]: self.add_tile_to_grid(k, grid[i][1], i, deck) break self.show()
def main(args_dict): # Extract configuration from command line arguments MK = args_dict['MK'] Kmin = args_dict['Kmin'] # Load data data = json_load('data/astar_rbr_MK%d.json' % (MK)) lnZ = data['lnZ'] MAPs = np.array(data['MAPs']) print('Loaded %d MAP samples from A* sampling' % (len(MAPs))) # Estimate MSE of lnZ estimators from Gumbel and Exponential tricks MSEs_Gumb = [] MSEs_Expo = [] Ms = xrange(1, MK / Kmin) for M in Ms: # Computation with M samples, repeated K >= Kmin times with a new set every time K = MK / M myMAPs = np.reshape(MAPs[:(K * M)], (K, M)) # Compute unbiased estimators of ln(Z) lnZ_Gumb = np.mean(myMAPs, axis=1) lnZ_Expo = EULER - np.log(np.mean(np.exp(-myMAPs), axis=1)) - (np.log(M) - digamma(M)) # Save MSE estimates MSEs_Gumb.append(np.mean((lnZ_Gumb - lnZ)**2)) MSEs_Expo.append(np.mean((lnZ_Expo - lnZ)**2)) # Set up plot matplotlib_configure_as_notebook() fig, ax = plt.subplots(1, 1, facecolor='w', figsize=(4.25, 3.25)) ax.set_xscale('log') ax.set_xlabel('desired MSE (lower to the right)') ax.set_ylabel('required number of samples $M$') ax.grid(b=True, which='both', linestyle='dotted', lw=0.5, color='black', alpha=0.3) # Plot MSEs ax.plot(MSEs_Gumb, Ms, color=tableau20(0), label='Gumbel') ax.plot(MSEs_Expo, Ms, color=tableau20(2), label='Exponential') # Finalize plot ax.set_xlim((1e-2, 2)) ax.invert_xaxis() lgd = ax.legend(loc='upper left') save_plot(fig, 'figures/fig3a', (lgd, ))
def create_config_for_package(path): """Create a config for a parsed package.json. Returns None if it is not a Nuclide package. No code in this library should parse a package.json file directly. Instead, it should operate on a package config that is created by this method. Because we may read extra properties in package.json, such as "customDeps", it is critical that all scripts operate on a normalized package config rather than a raw package.json. """ pkg = utils.json_load(path) nuclide_config = pkg.get("nuclide", {}) config = {} # Standard package.json fields config["name"] = pkg["name"] config["repository"] = pkg.get("repository") config["version"] = pkg.get("version") config["description"] = pkg.get("description") config["license"] = pkg.get("license") config["main"] = pkg.get("main") config["author"] = pkg.get("author", "") config["dependencies"] = pkg.get("dependencies", {}) config["optionalDependencies"] = pkg.get("optionalDependencies", {}) config["devDependencies"] = pkg.get("devDependencies", {}) config["providedServices"] = pkg.get("providedServices", {}) config["consumedServices"] = pkg.get("consumedServices", {}) # Both spellings are acceptable: config["bundleDependencies"] = pkg.get("bundleDependencies", {}) config["bundledDependencies"] = pkg.get("bundledDependencies", {}) config["scripts"] = pkg.get("scripts", {}) config["private"] = pkg.get("private", False) config["engines"] = pkg.get("engines") # Custom Nuclide fields config["packageRootAbsolutePath"] = os.path.dirname(path) config["packageType"] = nuclide_config.get("packageType") config["isNodePackage"] = nuclide_config.get("packageType") == "Node" config["testRunner"] = nuclide_config.get("testRunner") config["testsCannotBeRunInParallel"] = nuclide_config.get( "testsCannotBeRunInParallel", False ) config["excludeTestsFromContinuousIntegration"] = nuclide_config.get( "excludeTestsFromContinuousIntegration", False ) config["atomTestRunner"] = pkg.get("atomTestRunner") config["_atomModuleCache"] = pkg.get("_atomModuleCache") config["_rawPkg"] = pkg return config
def multi_divide(cfg): output_dir = cfg.output_dir num_process = cfg.num_process files = json_load(os.path.join(output_dir, 'filelist.json')) keys = list(files.keys()) process = [] for i in range(num_process): process_files = {} for key in keys[i::num_process]: process_files[key] = files[key] p = multiprocessing.Process(target=divide, args=(i, process_files, cfg)) p.start() process.append(p) for p in process: p.join() for split in ['train', 'val']: result = {} result['images'] = [] result['annotations'] = [] result['categories'] = [] result['categories'].append({ 'supercategory': 'none', 'id': 1, 'name': 'building' }) img_id = 0 ann_id = 0 for i in range(num_process): with open(os.path.join(output_dir, split, '%d.pkl' % i), 'rb') as f: datas = pickle.load(f) for data in datas: data['image']['id'] = img_id result['images'].append(data['image']) for ann in data['annotations']: ann['image_id'] = img_id ann['id'] = ann_id result['annotations'].append(ann) ann_id += 1 img_id += 1 os.remove(os.path.join(output_dir, split, '%d.pkl' % i)) json_dump(result, os.path.join(output_dir, split, f'{split}.json'))
def create_config_for_package(path): '''Create a config for a parsed package.json. Returns None if it is not a Nuclide package. No code in this library should parse a package.json file directly. Instead, it should operate on a package config that is created by this method. Because we may read extra properties in package.json, such as "customDeps", it is critical that all scripts operate on a normalized package config rather than a raw package.json. ''' pkg = utils.json_load(path) nuclide_config = pkg.get('nuclide', {}) config = {} # Standard package.json fields config['name'] = pkg['name'] config['repository'] = pkg.get('repository') config['version'] = pkg.get('version') config['description'] = pkg.get('description') config['license'] = pkg.get('license') config['main'] = pkg.get('main') config['author'] = pkg.get('author', '') config['dependencies'] = pkg.get('dependencies', {}) config['optionalDependencies'] = pkg.get('optionalDependencies', {}) config['devDependencies'] = pkg.get('devDependencies', {}) config['providedServices'] = pkg.get('providedServices', {}) config['consumedServices'] = pkg.get('consumedServices', {}) # Both spellings are acceptable: config['bundleDependencies'] = pkg.get('bundleDependencies', {}) config['bundledDependencies'] = pkg.get('bundledDependencies', {}) config['scripts'] = pkg.get('scripts', {}) config['private'] = pkg.get('private', False) config['engines'] = pkg.get('engines') # Custom Nuclide fields config['packageRootAbsolutePath'] = os.path.dirname(path) config['packageType'] = nuclide_config.get('packageType') config['isNodePackage'] = nuclide_config.get('packageType') == 'Node' config['testRunner'] = nuclide_config.get('testRunner') config['testsCannotBeRunInParallel'] = nuclide_config.get( 'testsCannotBeRunInParallel', False) config['excludeTestsFromContinuousIntegration'] = nuclide_config.get( 'excludeTestsFromContinuousIntegration', False) config['atomTestRunner'] = pkg.get('atomTestRunner') config['_atomModuleCache'] = pkg.get('_atomModuleCache') config['_rawPkg'] = pkg return config
def do_1_times(): # 默认你钱较多 大概几十万(误) # 挖宝 if utils.get_time(2) % 10 == 0: # 每十天一次挖50次 不够就买,钱不够就能买多少买多少 tail.do_dig_trea(level=User.level, plan={1: [('common1', 5), ('common2', 5), ('high1', 0), ('high2', 0)], 2: [('common1', 5), ('common2', 5), ('high1', 0), ('high2', 0)], 3: [('common1', 5), ('common2', 5), ('high1', 0), ('high2', 0)], 4: [('common1', 10), ('common2', 10), ('high1', 0), ('high2', 0)], 5: [('common1', 0), ('common2', 0), ('high1', 0), ('high2', 0)]}, is_buy=True, my_coin=User.gold_coin) # false则不需要输入金钱 else: tail.do_dig_trea(level=40, plan={1: [('common1', 0), ('common2', 0), ('high1', 0), ('high2', 0)], 2: [('common1', 0), ('common2', 0), ('high1', 0), ('high2', 0)], 3: [('common1', 0), ('common2', 0), ('high1', 0), ('high2', 0)], 4: [('common1', 1), ('common2', 1), ('high1', 0), ('high2', 0)], 5: [('common1', 0), ('common2', 0), ('high1', 0), ('high2', 0)]}, is_buy=True, my_coin=User.gold_coin) # 每天任务 #聊天 tail.speak(data={'msg': '233'}) #种植方面 if os.path.isfile('plan.json'): plan=utils.json_load('plan,json') else: plan=get_plan_of_plant() #一键还未实现 middle.do_farm_work(middle.get_land_data(),plan=plan) if len(plan)==1: #可传入购买计划和金钱 plan=get_plan_of_plant() #签到 head.sign(is_print=True) head.sign_total_situation(is_print=True) #活动 #activity_3_times寻宝次数,my_coin我拥有的钱,活动3——寻宝需要,people_list表白的人列表,可以传入好友列表,默认是我的小号,记得修改,confession_statement_list表白语句列表,有默认,is_run_list针对4个活动的运行否,默认寻宝不执行,可以短,缺少的都是不执行 #和集市有关的活动在一直运行里可以多次执行或者一天结束前执行,内部未实现 head.do_activity(activity_3_times=50,my_coin=User.gold_coin,people_list=None,confession_statement_list=None,is_run_list=None) #集市 head.do_sun_market() head.add_order()#多次运行时,需要执行一次 #做阳光事务 head.do_sun_affair() #领取和开启 head.receive_and_turnOn() #保存数据 utils.json_dump('plan.json',data=plan) pass
def vip_confdel(args): #check args if not is_valid_ip(args): derror('please check args') sys.exit(1) data = {} new_host = [] new_vip = [] #load vipconf config = Config() json_file = config.vip_conf if os.path.exists(json_file): data = json_load(json_file) if len(data) == 0: print 'vipconf was null' return if not data.has_key(args.group): derror("vipconfdel: %s not exist." % (args.group)) sys.exit(2) #reload new host and vip old_host = json.dumps(data[args.group]['nodes']).strip('"').split(',') old_vip = json.dumps(data[args.group]['vips']).strip('"').split(',') if args.host: new_host = [json.dumps(h.strip('"')) for h in old_host if h.strip('"') not in args.host.split(',') and h != ''] if args.vip: new_vip = [json.dumps(v.strip('"')) for v in old_vip if v.strip('"') not in args.vip.split(',') and v != ''] #print old_host #print new_host if len(new_host) != 0: data[args.group]['nodes'] = _list2str(new_host) if len(new_vip) != 0: data[args.group]['vips'] = _list2str(new_vip) if len(new_host) == 0 and len(new_vip) == 0: del data[args.group] json_store(data, json_file)
def vip_confadd(args): #check args if not is_valid_ip(args): derror('please check args') sys.exit(1) data = {} new_host = [] new_vip = [] #load vipconf config = Config() json_file = config.vip_conf if os.path.exists(json_file): data = json_load(json_file) if len(data) == 0: print 'vipconf was null' return #reload new host and vip old_host = json.dumps(data[args.group]['nodes']).strip('"').split(',') old_vip = json.dumps(data[args.group]['vips']).strip('"').split(',') if args.host: new_host = [json.dumps(h.strip('"')) for h in args.host.split(',') if h.strip('"') not in old_host] if args.vip: new_vip = [json.dumps(v.strip('"')) for v in args.vip.split(',') if v.strip('"') not in old_vip] #print old_host #print new_host if len(new_host) != 0: new_host.extend(old_host) data[args.group]['nodes'] = _list2str(new_host) if len(new_vip) != 0: new_vip.extend(old_vip) data[args.group]['vips'] = _list2str(new_vip) if len(new_host) != 0 or len(new_vip) != 0: json_store(data, json_file) dmsg('update vipconf sucessfully') else: dwarn('vipconf is already updated.')
def restore(self, hand_as_text, deck, buddy=False): ''' Restore tiles to hand upon resume or share. ''' hand = json_load(hand_as_text) if buddy: offset = 1 # skip the buddy else: offset = 0 for tile in range(COL): i = tile + offset if hand[i] is None: self.hand[i] = None else: for k in range(ROW * COL): if deck.tiles[k].number == hand[i]: self.hand[tile] = deck.tiles[k] self.hand[tile].spr.move(self.hand_to_xy(tile)) self.hand[tile].spr.set_layer(TILES) break
def load_dependencies(): return utils.json_load(get_dependencies_filename())
def _receive_dot_click(self, payload): ''' When a dot is clicked, everyone should change its color. ''' dot = json_load(payload) self._game.remote_button_press(dot)
def _receive_new_game(self, payload): ''' Sharer can start a new game. ''' (dot_list, move_list) = json_load(payload) self._game.restore_game(dot_list, move_list)
def patch_build(patch_id, servers, f, t): """Updates the patch in the [f]/[patch_id] directory. Ensures that patch.js contains all necessary keys and values, then updates the checksums in files.js and, if [t] differs from [f], copies all patch files from [f] to [t]. Returns the contents of the patch ID key in repo.js.""" f_path, t_path = [os.path.join(i, patch_id) for i in [f, t]] # Prepare patch.js. f_patch_fn = os.path.join(f_path, 'patch.js') patch_js = utils.json_load(f_patch_fn) enter_missing( patch_js, 'title', 'Enter a nice title for "{}": '.format(patch_id) ) patch_js['id'] = patch_id patch_js['servers'] = [] # Delete obsolete keys. if 'files' in patch_js: del(patch_js['files']) for i in servers: url = os.path.join(i, patch_id) + '/' patch_js['servers'].append(str_slash_normalize(url)) utils.json_store(f_patch_fn, patch_js) # Reset all old entries to a JSON null. This will delete any files on the # client side that no longer exist in the patch. try: files_js = utils.json_load(os.path.join(f_path, 'files.js')) for i in files_js: files_js[i] = None except FileNotFoundError: files_js = {} patch_size = 0 print(patch_id, end='') for root, dirs, files in os.walk(f_path): for fn in utils.patch_files_filter(files): print('.', end='') f_fn = os.path.join(root, fn) patch_fn = f_fn[len(f_path) + 1:] t_fn = os.path.join(t_path, patch_fn) with open(f_fn, 'rb') as f_file: f_file_data = f_file.read() # Ensure Unix line endings for JSON input if fn.endswith(('.js', '.jdiff')) and b'\r\n' in f_file_data: f_file_data = f_file_data.replace(b'\r\n', b'\n') with open(f_fn, 'wb') as f_file: f_file.write(f_file_data) f_sum = zlib.crc32(f_file_data) & 0xffffffff files_js[str_slash_normalize(patch_fn)] = f_sum patch_size += len(f_file_data) del(f_file_data) os.makedirs(os.path.dirname(t_fn), exist_ok=True) if f != t: shutil.copy2(f_fn, t_fn) utils.json_store('files.js', files_js, dirs=[f_path, t_path]) print( '{num} files, {size}'.format( num=len(files_js), size=sizeof_fmt(patch_size) ) ) return patch_js['title']
def _receive_new_game(self, payload): """ Sharer can start a new game. """ dot_list, correct, level, game = json_load(payload) self._game.restore_game(dot_list, correct, level, game)
def _receive_a_fraction(self, payload): ''' Receive a fraction from another player. ''' fraction = json_load(payload) self._bounce_window.play_a_fraction(fraction)
def _receive_new_grid(self, payload): ''' Receive a grid from the sharer. ''' (dot_list, boolean) = json_load(payload) self._game.restore_grid(dot_list, boolean)
def _buddy_list(self, payload): ''' Sharer sent the updated buddy list. ''' [buddies, colors] = json_load(payload) for i, nick in enumerate(buddies): self._append_player(nick, colors[i])