def end(): path = util.read_path_record() job_data = util.read_start() target_day = get_target_day() day = str(target_day.day) subj = [job_data["subject"], job_data["value"]] val = f"{job_data['start_time']}-{datetime.now().strftime('%H:%M')}" if os.path.exists(util.RECORD_DIR + path): data = util.read_json(util.RECORD_DIR + path) else: data = {} if day in data: date_val = data[day] if subj[0] in date_val: if subj[1] in date_val[subj[0]]: date_val[subj[0]][subj[1]].append(val) else: date_val[subj[0]][subj[1]] = [val] else: date_val[subj[0]] = {subj[1]: [val]} else: date_val = {subj[0]: {subj[1]: [val]}} data[day] = date_val util.write_json(util.RECORD_DIR + path, data) clear_start_record() return date_val
def write_detail_files(film_details, dest): ''' Write out the details of all films to a dest folder ''' for film in film_details.values(): util.write_json(join(dest, film['id'] + '.json'), film)
def set_cli_defaults(code_root, cli_config, install_config): """Write install-time configuration options to the cli.jsonc file used to set run-time default values. """ def _set_cli_default(template, name, default): template[name] = default in_path = os.path.join(code_root, cli_config['config_in']) out_path = os.path.join(code_root, cli_config['config_out']) print("Writing default settings to {}".format(out_path)) try: cli_template = util.read_json(in_path) except Exception as exc: fatal_exception_handler(exc, "ERROR: Couldn't read {}.".format(in_path)) for key in cli_config['default_keys']: try: _set_cli_default(cli_template, key, install_config[key]) except Exception as exc: fatal_exception_handler(exc, "ERROR: {} not set".format(key)) if os.path.exists(out_path): print("{} exists; overwriting".format(out_path)) os.remove(out_path) try: util.write_json(cli_template, out_path, sort_keys=False) except Exception as exc: fatal_exception_handler(exc, "ERROR: Couldn't write {}.".format(out_path))
def edit(path, val, day): print(val) if path is None: path = util.read_path_record() if day is None: day = str(get_target_day().day) json_data = util.read_json(util.RECORD_DIR + path) json_data[day] = val util.write_json(util.RECORD_DIR + path, json_data)
def _backup_config_file(self, config): """Record settings in file variab_dir/config_save.json for rerunning """ out_file = os.path.join(self.MODEL_WK_DIR, 'config_save.json') if not self.file_overwrite: out_file, _ = util_mdtf.bump_version(out_file) elif os.path.exists(out_file): print('Overwriting {}.'.format(out_file)) util.write_json(config.config.toDict(), out_file) return out_file
def backup_config_file(self, case): """Record settings in file config_save.json for rerunning. """ config = core.ConfigManager() out_file = os.path.join(self.WK_DIR, self._backup_config_file_name) if not self.file_overwrite: out_file, _ = util.bump_version(out_file) elif os.path.exists(out_file): _log.info("%s: Overwriting %s.", case.name, out_file) util.write_json(config.backup_config, out_file)
def train(self, is_save_model=True): # 加载数据 train_data = self.load_train_data() # 去做特征选择了 返回没有被选择的特征 X, y, self.feature_used_name = self.feature_preprocess(train_data) self._clf.fit(X, y) if is_save_model: saved_model_path = self._clf.save_model(self._model_path) config = { 'model_name': saved_model_path, 'features': self.feature_used_name } write_json(self._model_config, config)
def backup_config_files(self): """Record settings in file config_save.json for rerunning. """ config = core.ConfigManager() for config_tup in config._configs.values(): if config_tup.backup_filename is None: continue out_file = os.path.join(self.WK_DIR, config_tup.backup_filename) if not self.file_overwrite: out_file, _ = util.bump_version(out_file) elif os.path.exists(out_file): self.obj.log.info("%s: Overwriting '%s'.", self.obj.full_name, out_file) util.write_json(config_tup.contents, out_file, log=self.obj.log)
def write(self, episode_id, step, s, a, r, ss, done): s = convert(s) a = convert(a) ss = convert(ss) data = { "episode_id": episode_id, "step": step, "s": s, "a": a, "r": r, "ss": ss, "done": done, "timestamp": str(datetime.datetime.now()) } fn = "{}/{}/{}.json".format(self.output_dir, episode_id, step) write_json(fn, data)
def main(): data = util.read_json("record/test_data.json") new_dict = OrderedDict() for key in data: days_dict = OrderedDict() for subj in data[key]: new_subj = subj.split("/", 1) if len(new_subj) == 1: new_subj.append("") if new_subj[0] in days_dict: days_dict[new_subj[0]][new_subj[1]] = data[key][subj].split( ",") else: elem_dict = OrderedDict() elem_dict[new_subj[1]] = data[key][subj].split(",") days_dict[new_subj[0]] = elem_dict new_dict[key] = days_dict print(new_dict) util.write_json("record/sample_data.json", new_dict)
if DOING_SETUP: config = shared.get_configuration('', check_input=True) out_path = config['paths']['OUTPUT_DIR'] case_list = shared.get_test_data_configuration() # write temp configuration, one for each POD temp_config = config.copy() temp_config['pod_list'] = [] temp_config['settings']['make_variab_tar'] = False temp_config['settings']['test_mode'] = True pod_configs = shared.configure_pods(case_list, config_to_insert=temp_config) for pod in case_list['pods']: write_json(pod_configs[pod], os.path.join(out_path, pod + '_temp.json')) # Python 3 has subTest; in 2.7 to avoid introducing other dependencies we use # the advanced construction presented in https://stackoverflow.com/a/20870875 # to programmatically generate tests class TestSequenceMeta(type): def __new__(mcs, name, bases, test_dict): def generate_test(pod_name): def test(self): temp_config_file = os.path.join(out_path, pod_name + '_temp.json') self.assertEqual( 0, subprocess.check_call(
def test(path): path = util.RECORD_DIR + path data = util.read_json(path) data["31"] = {"test": "10:00-12:00"} util.write_json(path, data)
film['name'] = film['name'].replace('"', '') url = 'http://www.omdbapi.com/?t=' + film['name'] + '&y=&plot=full&r=json' print "Scraping " + url r = requests.get(url) response = json.loads(r.text) if 'Title' in response: # Film data found! film['metadata'] = response newfilms.append(film) print response['Title'] except: pass return newfilms if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Scrape OMDB for movie data') parser.add_argument('--src', help='Source films', required=True) parser.add_argument('--dest', help='Destination for film dictionary', required=False) args = parser.parse_args() data = scrape(args.src) util.write_json(args.dest, data)
args = parser.parse_args() male_image_info = util.read_json('data/results/images/male/results.json') female_image_info = util.read_json('data/results/images/female/results.json') male_trope_info = util.read_json('data/results/only_tropes-male.json') female_trope_info = util.read_json('data/results/only_tropes-female.json') res = [] if args.by_ll: male_ll = util.read_json('data/analysis/ll-male.json') female_ll = util.read_json('data/analysis/ll-female.json') male_trope_adj = util.read_json('data/results/tropes_adjectives-male.json') female_trope_adj = util.read_json('data/results/tropes_adjectives-female.json') res = top_N_by_ll(100, male_ll, female_ll, male_trope_adj, female_trope_adj) elif args.by_film_occurence: male_film_data = util.read_json('data/results/films/trope_films-male.json') female_film_data = util.read_json('data/results/films/trope_films-female.json') res = top_N_by_film_occurrence(100, male_film_data, female_film_data) else: male_trope_info = util.read_json('data/results/only_tropes-male.json') female_trope_info = util.read_json('data/results/only_tropes-female.json') res = all_tropes(male_trope_info + female_trope_info) util.write_json(args.dest, res)
def write_output(films, filename): ''' Write out the list of files in a json format. ''' util.write_json(filename, films)
def write_tropes(tropes, dest): for trope in tropes.values(): util.write_json(join(dest, trope['id'] + '.json'), trope)