def merge_graphs(): pubmed_graph = read_json(PROCESSED_PUBMED_FILEPATH) clinical_trials_graph = read_json(PROCESSED_CLINICAL_TRIALS_FILE_PATH) for drug in clinical_trials_graph.keys(): if drug not in pubmed_graph.keys(): pubmed_graph[drug] = clinical_trials_graph[drug] else: if 'clinical_trial' in clinical_trials_graph[drug].keys(): pubmed_graph[drug]['clinical_trial'] = clinical_trials_graph[ drug]['clinical_trial'] if 'journal' in clinical_trials_graph[drug].keys(): if 'journal' not in pubmed_graph[drug].keys(): pubmed_graph[drug]['journal'] = clinical_trials_graph[ drug]['journal'] else: for journal in clinical_trials_graph[drug]['journal'].keys( ): if journal in pubmed_graph[drug]['journal'].keys(): pubmed_graph[drug]['journal'][ journal] += clinical_trials_graph[drug][ 'journal'][journal] else: pubmed_graph[drug]['journal'][ journal] = clinical_trials_graph[drug][ 'journal'][journal] return pubmed_graph
def get_joints_labels_and_images(self) -> Tuple[dict, dict]: """Returns the dictionary conatinign the bound box of the image and dictionary containig image information. Returns: Tuple[dict, dict]: joints, image_dict image_dict - `name` - Image name in the form of `youtube/VIDEO_ID/video/frames/FRAME_ID.png`. - `width` - Width of the image. - `height` - Height of the image. - `id` - Image ID. joints - `joints` - 21 joints, containing bound box limits as vertices. - `is_left` - Binary value indicating a right/left hand side. - `image_id` - ID to the corresponding entry in `images`. - `id` - Annotation ID (an image can contain multiple hands). """ data_json_path = os.path.join(self.root_dir, f"youtube_{self.split}.json") joints_path = os.path.join(self.root_dir, f"youtube_{self.split}_joints.json") images_json_path = os.path.join(self.root_dir, f"youtube_{self.split}_images.json") if os.path.exists(joints_path) and os.path.exists(images_json_path): return read_json(joints_path), read_json(images_json_path) else: data_json = read_json(data_json_path) images_dict = data_json["images"] save_json(images_dict, images_json_path) annotations_dict = data_json["annotations"] joints = self.get_joints_from_annotations(annotations_dict) save_json(joints, joints_path) return joints, images_dict
def from_args(cls, args: ArgumentParser, options: collections.namedtuple): """ Initialize this class from some cli arguments. Used in train, test. """ for opt in options: args.add_argument(*opt.flags, default=opt.default, type=opt.type, help=opt.help) if not isinstance(args, tuple): args = args.parse_args() if args.device is not None: os.environ["CUDA_VISIBLE_DEVICES"] = args.device if args.resume is not None: resume = Path(args.resume) cfg_fname = resume.parent / 'config.json' else: msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example." assert args.config is not None, msg_no_cfg resume = None cfg_fname = Path(args.config) config = read_json(cfg_fname) if args.config and resume: # update new config for fine-tuning config.update(read_json(args.config)) # parse custom cli options into dictionary modification = {opt.target: getattr(args, _get_opt_name(opt.flags)) for opt in options} return cls(config, resume, modification)
def get_meta_info(self) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, dict]: data = read_json( os.path.join( self.root_dir, self.annotation_sampling_folder, self.annotor, f"InterHand2.6M_{self.split}_data.json", ) ) camera_info = pd.DataFrame( read_json( os.path.join( self.root_dir, self.annotation_sampling_folder, self.annotor, f"InterHand2.6M_{self.split}_camera.json", ) ) ).T joints_dict = read_json( os.path.join( self.root_dir, self.annotation_sampling_folder, self.annotor, f"InterHand2.6M_{self.split}_joint_3d.json", ) ) annotations_info = pd.DataFrame(data["annotations"]) # selecting only single hand images annotations_info = annotations_info[ annotations_info["hand_type"] != "interacting" ] annotations_info = annotations_info.set_index(np.arange(len(annotations_info))) image_info = pd.DataFrame(data["images"]).set_index("id") return image_info, annotations_info, camera_info, joints_dict
def proportion_daily_country(data_type, country): try: data = util.read_json(f"csv_{data_type}.json") ret = {"proportion-daily": {}} for region in list(data.keys()): if util.pattern_match(country, region, data[region]["iso2"], data[region]["iso3"]): if data[region]["iso3"] in util.populations: pop = float(util.populations[data[region]["iso3"]]) else: util.populations = util.csv_to_dict(util.CSV_POPULATIONS) pop = float(util.populations[data[region]["iso3"]]) prev = 0 for d, h in data[region]["history"].items(): ret["proportion-daily"][ d] = f"{round((h - prev) / pop * 100, 10):.10f}" prev = h ret["iso2"] = data[region]["iso2"] ret["iso3"] = data[region]["iso3"] ret["name"] = region return jsonify(ret) raise CountryNotFound("This region cannot be found. Please try again.") except CountryNotFound as e: return util.response_error(message=f"{type(e).__name__} : {e}", status=404) except Exception as e: return util.response_error(message=f"{type(e).__name__} : {e}")
def get_scale(self) -> list: """Extacts the scale from freihand data.""" if self.split in ["train", "val"]: labels_path = os.path.join(self.root_dir, "training_scale.json") else: labels_path = os.path.join(self.root_dir, "evaluation_scale.json") return read_json(labels_path)
def test_cal_loss3d(self): print("Running test on 3d loss calculation") train_param = edict(read_json(TRAINING_CONFIG_PATH)) data = DataLoader( Data_Set( train_param, None, split="val", experiment_type="supervised", source="freihand", ), batch_size=12, ) loss3d = 100 for i in iter(data): sample = i loss3d = cal_3d_loss( sample["joints"], sample["joints3D"] + 5, sample["scale"], sample["K"], sample["joints_valid"], ) print(loss3d) break self.assertTrue((loss3d - 5 < 1e-6).tolist())
def proportion_daily(data_type): try: data = util.read_json(f"csv_{data_type}.json") for region in list(data.keys()): ret = {"proportion-daily": {}} if data[region]["iso3"] == "": # TODO: Note, some regions do not have iso2/3 codes.... data[region] = { "proportion-daily": "This region doesn't work with this function atm" } continue if data[region]["iso3"] in util.populations: pop = float(util.populations[data[region]["iso3"]]) else: util.populations = util.csv_to_dict(util.CSV_POPULATIONS) pop = float(util.populations[data[region]["iso3"]]) prev = 0 for d, h in data[region]["history"].items(): ret["proportion-daily"][ d] = f"{round((h - prev) / pop * 100, 10):.10f}" prev = int(h) ret["iso2"] = data[region]["iso2"] ret["iso3"] = data[region]["iso3"] data[region] = ret return jsonify(data) except Exception as e: return util.response_error(message=f"{type(e).__name__} : {e}")
def __init__(self): super().__init__() self.swaprb = True self.class_names = read_json('./models/ssd_mobilenet/labels.json') self.model = cv2.dnn.readNetFromTensorflow( 'models/ssd_mobilenet/frozen_inference_graph.pb', 'models/ssd_mobilenet/ssd_mobilenet_v2_coco_2018_03_29.pbtxt') self.colors = np.random.uniform(0, 255, size=(100, 3))
def history_region_all(data_type, country): try: if country.lower() in ("us", "united states", "usa"): data = util.read_json(f"csv_{data_type}_us_region.json") else: data = util.read_json(f"csv_{data_type}_region.json") for inner_country in list(data.keys()): if util.pattern_match(country, inner_country, data[inner_country]["iso2"], data[inner_country]["iso3"]): return jsonify(data[inner_country]["regions"]) raise CountryNotFound( "This country cannot be found. Please try again.") except CountryNotFound as e: return util.response_error(message=f"{type(e).__name__} : {e}", status=404) except Exception as e: return util.response_error(message=f"{type(e).__name__} : {e}")
def __init__(self, list_txt_path, npz_config_path, fetched_slot_names, if_random_shuffle=True): self.list_npz_file = self.read_list_txt(list_txt_path) self.npz_config = read_json(npz_config_path) self.fetched_slot_names = set(fetched_slot_names) self.if_random_shuffle = if_random_shuffle
def main(args): print_args(args, 'args') conf = Config(args.exp) ### build model npz_config = read_json(conf.npz_config_path) scope = fluid.Scope() with fluid.scope_guard(scope): with fluid.unique_name.guard(): if args.model == 'UniRNN': model = RLUniRNN(conf, npz_config, candidate_encode=args.candidate_encode) elif args.model == 'PointerNet': model = RLPointerNet(conf, npz_config, candidate_encode=args.candidate_encode) algorithm = RLAlgorithm(model, optimizer=conf.optimizer, lr=conf.lr, gpu_id=(0 if args.use_cuda == 1 else -1), gamma=args.gamma) td_ct = RLComputationTask(algorithm, model_dir=conf.model_dir, mode=args.train_mode, scope=scope) # get eval model eval_args = copy.deepcopy(args) eval_args.exp = args.eval_exp eval_args.model = args.eval_model eval_args.task = 'eval' eval_td_ct = eval_entry_func(eval_args) ### other tasks if args.task == 'eps_greedy_sampling': eps_greedy_sampling(td_ct, eval_td_ct, args, conf, None, td_ct.ckp_step) exit() elif args.task == 'evaluate': evaluate(td_ct, eval_td_ct, args, conf, td_ct.ckp_step) exit() ### start training memory_size = 1000 replay_memory = collections.deque(maxlen=memory_size) summary_writer = tf.summary.FileWriter(conf.summary_dir) for epoch_id in range(td_ct.ckp_step + 1, conf.max_train_steps): if args.log_reward == 1: log_train(td_ct, args, conf, summary_writer, replay_memory, epoch_id) else: train(td_ct, eval_td_ct, args, conf, summary_writer, replay_memory, epoch_id) td_ct.save_model(conf.model_dir, epoch_id) eps_greedy_sampling(td_ct, eval_td_ct, args, conf, summary_writer, epoch_id)
def get_camera_param(self) -> list: """Extacts the camera parameters from the camera_param_json at camera_param_path. Returns: list: List of camera paramters for all images(32650) """ if self.split in ["train", "val"]: camera_param_path = os.path.join(self.root_dir, "training_K.json") else: camera_param_path = os.path.join(self.root_dir, "evaluation_K.json") return read_json(camera_param_path)
def get_labels(self) -> list: """Extacts the labels(joints coordinates) from the label_json at labels_path Returns: list: List of all the the coordinates(32650). """ if self.split in ["train", "val"]: labels_path = os.path.join(self.root_dir, "training_xyz.json") return read_json(labels_path) else: return None
def test_vasp_001_shell_job(self): """ Extracts a job from a vasp calculation and asserts the results. """ config = Job("External Job", os.path.join(FIXTURES_DIR, "vasp/test-001")).to_json() self._clean_job_config(config) self.assertDeepAlmostEqual( config, read_json(os.path.join(FIXTURES_DIR, "vasp", "shell-job.json")))
def get_ct_cf(exp, use_cuda, train_mode, output_type, output_dim): conf = Config_Env(exp) npz_config = read_json(conf.npz_config_path) scope = fluid.Scope() model = NeuralCF(conf, npz_config, scope=scope, output_type=output_type, output_dim=output_dim) algorithm = CFAlgorithm(model, optimizer=conf.optimizer, lr=conf.lr, gpu_id=(0 if use_cuda else -1)) ct = GenComputationTask(algorithm, model_dir=conf.model_dir, mode=train_mode) return ct
def get_flops(self): input_size = [512, 512] # for PASCAL-Context filename = Path('flops_MobileNetV2_{}_{}.json'.format( input_size[0], input_size[1])) if filename.is_file(): flops_dict = utils.read_json(filename) else: print('no LUT found, calculating FLOPS...') flops_dict = self.calculate_flops_lut(filename, input_size) return flops_dict['per_block_flops']
def get_labels(self) -> Dict[str, dict]: label_file_names = [ file_name for file_name in next(os.walk(self.label_dir_path))[2] if ".json" in file_name ] labels = { file_name.replace(".json", ""): read_json(os.path.join(self.label_dir_path, file_name)) for file_name in label_file_names } return labels
def all_country(country): try: data = util.read_json("data.json") for region in data: if util.pattern_match(country, region["country"], region["iso2"], region["iso3"]): return jsonify(region) raise CountryNotFound("This region cannot be found. Please try again.") except CountryNotFound as e: return util.response_error(message=f"{type(e).__name__} : {e}", status=404) except Exception as e: return util.response_error(message=f"{type(e).__name__} : {e}")
def __init__(self): super().__init__() self.class_names = read_json('./models/yolo/labels.json') self.model = cv2.dnn.readNetFromDarknet( # 'models/yolo/yolov3.cfg', # 'models/yolo/yolov3.weights') 'models/yolo/yolov3-tiny.cfg', 'models/yolo/yolov3-tiny.weights') self.colors = np.random.uniform(0, 255, size=(len(self.class_names), 3)) self.threshold = 0.3 self.scale = 0.00392 # 1/255 self.nms_threshold = 0.4 # Non Maximum Supression threshold self.swaprb = True
def __read_messages(self, inbox_path): messages = [] oldest_timestamp_ms = 100000000000000 for msg_file in os.scandir(inbox_path): if not msg_file.is_file() or not msg_file.name.startswith("message_"): continue json = utils.read_json(msg_file.path) for participant in json["participants"]: self.__name_inference_counter[participant["name"]] += 1 for msg_json in json["messages"]: messages.append(Message(**msg_json)) oldest_timestamp_ms = min(oldest_timestamp_ms, msg_json["timestamp_ms"]) return messages, oldest_timestamp_ms
def do_grouping(self, content_id, custom_comparing=None): self.changed = True # create groups with full content and comparison information if custom_comparing is None: custom_comparing = self.custom_comparing groups_and_stats = utils.get_grouped_responses( self.results['contents'][content_id], custom_comparing) # turn content into dict of groups, stats and created files (responses are now stored in the groups) self.results['contents'][content_id] = groups_and_stats self.results['contents'][content_id]['files'] = [] # add a combined and enriched representative response for every group for i, group in enumerate( self.results['contents'][content_id]['groups']): representative = copy.deepcopy(group['responses'][0]) representative[ 'body_length'] = f"{len(representative['body'])} bytes" representative[ 'headers_length'] = f"{len(utils.format_json(representative['headers']))} bytes" # get min-max send and response times keys_of_interest = ['send_time', 'response_time'] min_max = self.get_min_max_dates(group['responses'], keys_of_interest) if len(min_max.keys()) > len(keys_of_interest): # there is at least one difference, update repr. keys for key in keys_of_interest: if key + "_min" in min_max: representative[key + "_min"] = min_max[key + "_min"] representative[key + "_max"] = min_max[key + "_max"] del representative[key] # parse body if the content is known (HTML or JSON) content_type = self.get_content_type(representative['headers'], representative['body']) if content_type == 'html': link, path = self.create_temp_html( f"{self.name}-req-{content_id}-group-{i}", representative['body'], "html") self.results['contents'][content_id]['files'].append(path) representative['body'] = link elif content_type == 'json': # it defaults to no parsing if the content is not json after all.. try: representative['body'] = utils.read_json( representative['body']) except Exception as _: pass self.results['contents'][content_id]['groups'][i][ 'representative'] = representative
def get_ct_ddpg(exp, use_cuda, train_mode, gamma): conf = Config_Env(exp) npz_config = read_json(conf.npz_config_path) scope = fluid.Scope() with fluid.scope_guard(scope): with fluid.unique_name.guard(): model = DDPGRNN(conf, npz_config) algorithm = DDPGAlgorithm(model, optimizer=conf.optimizer, lr=conf.lr, gpu_id=(0 if use_cuda == 1 else -1), gamma=gamma) ct = RLComputationTask(algorithm, model_dir=conf.model_dir, mode=train_mode, scope=scope) return ct
def main(args): print_args(args, 'args') conf = Config(args.exp) ### build model npz_config = read_json(conf.npz_config_path) scope = fluid.Scope() with fluid.scope_guard(scope): with fluid.unique_name.guard(): if args.model == 'DNN': model = DNN(conf, npz_config) elif args.model == 'UniRNN': model = UniRNN(conf, npz_config) algorithm = GenAlgorithm(model, optimizer=conf.optimizer, lr=conf.lr, gpu_id=(0 if args.use_cuda == 1 else -1)) td_ct = GenComputationTask(algorithm, model_dir=conf.model_dir, mode=args.train_mode, scope=scope) # get eval model eval_args = copy.deepcopy(args) eval_args.exp = args.eval_exp eval_args.model = args.eval_model eval_args.task = 'eval' eval_td_ct = eval_entry_func(eval_args) ### other tasks if args.task == 'test': test(td_ct, args, conf, None, td_ct.ckp_step) exit() elif args.task == 'eps_greedy_sampling': eps_greedy_sampling(td_ct, eval_td_ct, args, conf, None, td_ct.ckp_step) exit() elif args.task == 'evaluate': evaluate(td_ct, eval_td_ct, args, conf, td_ct.ckp_step) exit() ### start training summary_writer = tf.summary.FileWriter(conf.summary_dir) for epoch_id in range(td_ct.ckp_step + 1, conf.max_train_steps): train(td_ct, args, conf, summary_writer, epoch_id) td_ct.save_model(conf.model_dir, epoch_id) test(td_ct, args, conf, summary_writer, epoch_id) eps_greedy_sampling(td_ct, eval_td_ct, args, conf, summary_writer, epoch_id)
def history_region_world(data_type): try: data = util.read_json(f"csv_{data_type}.json") ret = {"history": {}} for d in data.keys(): for h in data[d]["history"].keys(): if h not in ret["history"]: ret["history"][h] = int(data[d]["history"][h]) else: ret["history"][h] += int(data[d]["history"][h]) return jsonify(ret) except Exception as e: return util.response_error(message=f"{type(e).__name__} : {e}")
def __init__(self, list_txt_path, npz_config_path, fetched_slot_names, credit_dataset=None, if_random_shuffle=True, one_pass=True): self.list_npz_file = self.read_list_txt(list_txt_path) self.npz_config = read_json(npz_config_path) self.fetched_slot_names = set(fetched_slot_names) self.credit_dataset = credit_dataset self.if_random_shuffle = if_random_shuffle self.one_pass = one_pass self._check_credits_exists()
def get_ct_credit(exp, use_cuda, train_mode, credit_scale): conf = Config_Env(exp, label_type='credit') npz_config = read_json(conf.npz_config_path) scope = fluid.Scope() model = UniRNN(conf, npz_config, scope=scope, cell_type='gru', output_type='credit') algorithm = GenAlgorithm(model, optimizer=conf.optimizer, lr=conf.lr, gpu_id=(0 if use_cuda else -1), credit_scale=credit_scale) ct = GenComputationTask(algorithm, model_dir=conf.model_dir, mode=train_mode) return ct
def history_country(data_type, country): try: data = util.read_json(f"csv_{data_type}.json") for region in list(data.keys()): if util.pattern_match(country, region, data[region]["iso2"], data[region]["iso3"]): ret = data[region] ret["name"] = region return jsonify(ret) raise CountryNotFound("This region cannot be found. Please try again.") except CountryNotFound as e: return util.response_error(message=f"{type(e).__name__} : {e}", status=404) except Exception as e: return util.response_error(message=f"{type(e).__name__} : {e}")
def proportion_region_world(data_type): try: data = util.read_json(f"csv_{data_type}.json") ret = {"proportion": {}} for d in data.keys(): for h in data[d]["history"].keys(): if h not in ret["proportion"]: ret["proportion"][h] = int(data[d]["history"][h]) else: ret["proportion"][h] += int(data[d]["history"][h]) for h in ret["proportion"]: ret["proportion"][ h] = f"{round(int(ret['proportion'][h]) / int(util.WORLD_POPULATION) * 100, 5):.5f}" return jsonify(ret) except Exception as e: return util.response_error(message=f"{type(e).__name__} : {e}")
def daily(data_type): try: data = util.read_json(f"csv_{data_type}.json") for region in list(data.keys()): ret = {"daily": {}} prev = 0 for d, h in data[region]["history"].items(): ret["daily"][d] = h - prev prev = int(h) ret["iso2"] = data[region]["iso2"] ret["iso3"] = data[region]["iso3"] data[region] = ret return jsonify(data) except Exception as e: return util.response_error(message=f"{type(e).__name__} : {e}")