Esempio n. 1
0
    def load_or_gen_list_dict(rec_f, rec):
        cls_idx_dict_f = os.path.splitext(rec_f)[0] + '.json'
        idx_cls_lst_f = os.path.splitext(rec_f)[0] + '-lst.json'
        if os.path.exists(cls_idx_dict_f) and os.path.exists(idx_cls_lst_f):
            idx_cls_lst = load_json(idx_cls_lst_f)
            cls_idx_dict = load_json(cls_idx_dict_f)

            idx_cls_lst = [int(item) for item in idx_cls_lst]

            keys = list(cls_idx_dict.keys())
            for k in keys:
                cls_idx_dict[int(k)] = cls_idx_dict.pop(k)
            return idx_cls_lst, cls_idx_dict
        else:
            idx_cls_lst = []
            for idx in rec.idx.keys():
                record = rec.read_idx(idx)
                h, _ = recordio.unpack(record)
                idx_cls_lst.append([idx, int(h.label)])

            cls_idx_dict = {}
            for idx, y in idx_cls_lst:
                if y in cls_idx_dict:
                    cls_idx_dict[y].append(idx)
                else:
                    cls_idx_dict[y] = [idx]

            idx_cls_lst = [int(l) for _, l in idx_cls_lst]

            save_json(idx_cls_lst, idx_cls_lst_f)
            save_json(cls_idx_dict, cls_idx_dict_f)

            return idx_cls_lst, cls_idx_dict
    def test_compress_and_decode_full_report(self):
        """
        Ensures that a typical, complete report is compressed properly.
        """
        report = load_json(fetch_file(resource_uri("reports/DAOBug.json")))
        hexstring = self.compress_report(report)
        decoded_report = self.decode_report(hexstring)

        expected_report = load_json(
            fetch_file(resource_uri("reports/DAOBugDecompressed.json")))
        self.__compare_json(decoded_report, expected_report)
Esempio n. 3
0
 def compare_json(self,
                  audit_file,
                  report_file_path,
                  json_loaded=False,
                  ignore_id=False):
     if not json_loaded:
         actual_json = load_json(audit_file)
     else:
         actual_json = audit_file
     expected_json = load_json(fetch_file(resource_uri(report_file_path)))
     if ignore_id:
         expected_json['request_id'] = actual_json['request_id']
     diff = DeepDiff(
         actual_json,
         expected_json,
         exclude_paths={
             "root['contract_uri']",
             "root['version']",
             # There is no keystore used for testing. Accounts
             # are dynamic and therefore cannot be compared
             "root['auditor']",
             "root['requestor']",
             # Path is different depending on whether running inside Docker
             "root['timestamp']",
             "root['start_time']",
             "root['end_time']",
             "root['analyzers_reports'][0]['analyzer']['command']",
             "root['analyzers_reports'][0]['coverages'][0]['file']",
             "root['analyzers_reports'][0]['potential_vulnerabilities'][0]['file']",
             "root['analyzers_reports'][0]['start_time']",
             "root['analyzers_reports'][0]['end_time']",
             "root['analyzers_reports'][1]['analyzer']['command']",
             "root['analyzers_reports'][1]['coverages'][0]['file']",
             "root['analyzers_reports'][1]['potential_vulnerabilities'][0]['file']",
             "root['analyzers_reports'][1]['start_time']",
             "root['analyzers_reports'][1]['end_time']",
             "root['analyzers_reports'][2]['analyzer']['command']",
             "root['analyzers_reports'][2]['coverages'][0]['file']",
             "root['analyzers_reports'][2]['potential_vulnerabilities'][0]['file']",
             "root['analyzers_reports'][2]['start_time']",
             "root['analyzers_reports'][2]['end_time']",
             # Once scripts are either executed or skipped. The traces at position 1 differ.
             "root['analyzers_reports'][0]['trace']",
             "root['analyzers_reports'][1]['trace']",
             "root['analyzers_reports'][2]['trace']"
         })
     pprint(diff)
     self.assertEqual(diff, {})
     self.assertEqual(ntpath.basename(actual_json['contract_uri']),
                      ntpath.basename(expected_json['contract_uri']))
Esempio n. 4
0
def main(args):
    if args.delete:
        ans = raw_input("Are you sure (y/[n])? ")
        if ans == 'y':
            print es.delete_index(args.index)
        else:
            print "Aborted."
        return

    page_mapping = io.load_json(config.ELASTICSEARCH_PAGE_MAPPING_FILE)
    sentence_mapping = io.load_json(config.ELASTICSEARCH_SENTENCE_MAPPING_FILE)
    print es.create_index(args.index)
    print es.put_page_mapping(page_mapping)
    print es.put_sentence_mapping(sentence_mapping)
Esempio n. 5
0
    def load_or_gen_dict(rec_f, rec):
        cls_idx_dict_f = os.path.splitext(rec_f)[0] + '.json'
        if os.path.exists(cls_idx_dict_f):
            cls_idx_dict = load_json(cls_idx_dict_f)
            keys = list(cls_idx_dict.keys())
            for k in keys:
                cls_idx_dict[int(k)] = cls_idx_dict.pop(k)
            return cls_idx_dict
        else:
            idx_cls_lst = []
            for idx in rec.idx.keys():
                record = rec.read_idx(idx)
                h, _ = recordio.unpack(record)
                idx_cls_lst.append([idx, h.label])

            cls_idx_dict = {}
            for idx, y in idx_cls_lst:
                y = int(y)
                if y in cls_idx_dict:
                    cls_idx_dict[y].append(idx)
                else:
                    cls_idx_dict[y] = [idx]

            save_json(cls_idx_dict, os.path.splitext(rec_f)[0] + '.json')

            return cls_idx_dict
Esempio n. 6
0
 def from_json(cls, json_file):
     try:
         w2i = cls(io.load_json(json_file))
     except json.decoder.JSONDecodeError:
         words = io.load_txt(json_file)
         w2i = cls({w: i for i, w in enumerate(words)})
     return w2i
Esempio n. 7
0
    def create_visda_datasets(self, train_tforms, eval_tforms):
        """
        Create visda datasets
        :param train_tforms: training transformers
        :param eval_tforms: evaluation transformers
        :return:
            trs_set: training source set
            trt_set: training target set
            tes_set: testing source set
            tet_set: testing target set
        """
        # Read config
        cfg = load_json(self.args.cfg)

        trs = cfg['SRC']['TRAIN']
        trt = cfg['TGT']['TRAIN']
        tes = cfg['SRC']['TRAIN']
        tet = cfg['TGT']['TEST']

        self.label_dict = cfg['Label']

        trs_set = DomainRecDataset(trs,
                                   trt,
                                   tforms=train_tforms,
                                   tformt=train_tforms,
                                   ratio=self.args.ratio)
        tes_set = DomainRecDataset(tes, tforms=eval_tforms)
        tet_set = DomainRecDataset(tet, tforms=eval_tforms)

        return trs_set, tes_set, tet_set
Esempio n. 8
0
 def load_training_state(self, load_dir):
     state = load_json(os.path.join(load_dir, "training_state.json"))
     self.steps_since_lr_change = state["steps_since_lr_change"]
     self.best_self_model_step = state["best_self_model_step"]
     self.best_rule_model_step = state["best_rule_model_step"]
     self.lr_tracker = state["learning_rate"]
     self.current_step = state["current_step"]
Esempio n. 9
0
    def create_office_datasets(self, train_tforms, eval_tforms):
        """
        Create office datasets
        :param train_tforms: training transformers
        :param eval_tforms: evaluation transformers
        :return:
            trs_set: training source set
            trt_set: training target set
            tes_set: testing source set
            tet_set: testing target set
        """
        cfg = load_json(self.args.cfg)
        cfg = split_office_train_test(cfg, 1, self.args.seed)

        trs = cfg[self.args.src.upper()]['SRC-TR']
        trt = cfg[self.args.tgt.upper()]['TGT-TR']
        tes = cfg[self.args.src.upper()]['TGT-TE']
        tet = cfg[self.args.tgt.upper()]['TGT-TE']

        trs_set = DomainFolderDataset(trs,
                                      trt,
                                      tforms=train_tforms,
                                      tformt=train_tforms,
                                      ratio=self.args.ratio)
        tes_set = DomainFolderDataset(tes, tforms=eval_tforms)
        tet_set = DomainFolderDataset(tet, tforms=eval_tforms)

        return trs_set, tes_set, tet_set
Esempio n. 10
0
def main():
    params = load_json(INPUT_SETTINGS)
    params[1] = params["1"]
    params[2] = params["2"]

    start_time_1 = time.time()

    gameplay = get_gameplay(params)
    play_game(gameplay, params)

    start_time_2 = time.time()

    wins = []
    for _ in tqdm(range(N_GAMES)):
        winner = play_game(gameplay, params)
        wins.append(winner)

    end_time = time.time()
    time_elapsed_1 = end_time - start_time_1
    time_elapsed_2 = end_time - start_time_2
    compilation_time = time_elapsed_1 - time_elapsed_2
    time_per_game = time_elapsed_2 / float(N_GAMES)

    print(f"Time taken for {N_GAMES} games with compilation time: {time_elapsed_1}")
    print(f"Time taken for {N_GAMES} games without compilation time: {time_elapsed_2}")
    print(f"Compilation time: {compilation_time}")
    print(f"Seconds per game: {time_per_game}")

    wins = np.array(wins)
    p1_win_rate = np.sum(wins == 1) / float(N_GAMES)
    p2_win_rate = np.sum(wins == 2) / float(N_GAMES)

    print(f"Player 1 win rate: {p1_win_rate}, player 2 win rate: {p2_win_rate}")
Esempio n. 11
0
 def get_answers_from_json(cls, json_file, extra_param):
     question_json = io.load_json(json_file)
     answers = []
     for question in question_json['questions']:
         if len(question['answer'].split()) > 1:
             raise Exception('Answer is not a single word')
         answers.append(question['answer'])
     return answers
Esempio n. 12
0
 def _init_sentences(self, global_hook=None):
     # sentences 最后存储着{'1': 对象, '2': 对象, '3': 对象} 数据最后都在这个字典里了
     sentences = {}
     sentences_cfg = load_json(self.domain_cfg_path)
     for cfg in sentences_cfg.items():
         s_id = cfg[0]
         sentences[s_id] = Sentence(self.domain_name, cfg, global_hook)
     return sentences
 def test_encode_decode_idempotence(self):
     """
     Ensures that encode(decode(report)) == encode(decode(encode(decode(report))))
     """
     report = load_json(fetch_file(resource_uri("reports/DAOBug.json")))
     decoded_report = self.decode_report(self.compress_report(report))
     twice_decoded_report = self.decode_report(
         self.compress_report(decoded_report))
     self.__compare_json(decoded_report, twice_decoded_report)
Esempio n. 14
0
    def load_office_cfg(self):
        cfg = load_json(self.args.cfg)
        cfg = split_office_train_test(cfg, 1, self.args.seed)

        trs = cfg[self.args.src.upper()]['SRC-TR']
        trt = cfg[self.args.tgt.upper()]['TGT-TR']
        tes = cfg[self.args.src.upper()]['TGT-TE']
        tet = cfg[self.args.tgt.upper()]['TGT-TE']

        return trs, trt, tes, tet
Esempio n. 15
0
    def load_visda_cfg(self):
        cfg = load_json(self.args.cfg)

        trs = cfg['SRC']['TRAIN']
        trt = cfg['TGT']['TRAIN']
        tes = cfg['SRC']['TRAIN']
        tet = cfg['TGT']['TEST']
        self.label_dict = cfg['Label']

        return trs, trt, tes, tet
Esempio n. 16
0
    def load_digits_cfg(self):
        cfg = load_json(self.args.cfg)
        cfg = split_digits_train_test(cfg, self.args.src.upper(),
                                      self.args.tgt.upper(), 1, self.args.seed)

        trs = cfg[self.args.src.upper()]['TR']
        trt = cfg[self.args.tgt.upper()]['TR']
        tes = cfg[self.args.src.upper()]['TE']
        tet = cfg[self.args.tgt.upper()]['TE']

        return trs, trt, tes, tet
Esempio n. 17
0
    def create_contract(self, web3_client, contract_abi_uri, contract_address):
        """
        Creates the audit contract from ABI.
        """
        abi_file = io_utils.fetch_file(contract_abi_uri)
        abi_json = io_utils.load_json(abi_file)

        return web3_client.eth.contract(
            address=contract_address,
            abi=abi_json,
        )
Esempio n. 18
0
    def execute(self):
        self.game_params = load_json(INPUT_SETTINGS)
        self.game_params[1] = self.game_params["1"]
        self.game_params[2] = self.game_params["2"]

        self.controller = Controller(self.game_params)
        self.controller.run()
        while self.show_screen:
            for event in pg.event.get():
                if event.type == pg.QUIT:
                    self.show_screen = False
        self.cleanup()
Esempio n. 19
0
    def __assert_all_analyzers(self, request_id):
        """
        Asserts that all configured analyzers were executed and are included in the report.
        """
        row = self.__fetch_audit_from_db(request_id)

        audit_uri = row['audit_uri']
        audit_file = fetch_file(audit_uri)
        actual_json = load_json(audit_file)
        executed_analyzers = [x['analyzer']['name'] for x in actual_json['analyzers_reports']]
        for analyzer in self.__config.analyzers_config:
            name, conf = list(analyzer.items())[0]
            self.assertTrue(name in executed_analyzers)
Esempio n. 20
0
def generate_csv_per_scene_twostep(src_scene):
    print(src_scene)
    fov_h, fov_v = set_vis_fov(radius)

    cached_csv_lines_list = []
    for i in range(len(label_list)):
        cached_csv_lines = [["depth_path", "vismap_twostep", "label"]]
        cached_csv_lines_list.append(cached_csv_lines)

    src_scene_label_path = os.path.join(src_dataset_folder, src_scene, "motionlabeltwostep.json")

    if os.path.isfile(src_scene_label_path):
        skip_csv = True
        csv_file_path_lists = []
        for i in range(len(label_list)):
            csv_file_path = os.path.join(dst_dataset_folder, "{}_{}.csv".format(src_scene, label_list[i]))
            print(csv_file_path)
            csv_file_path_lists.append(csv_file_path)
            skip_csv = skip_csv and os.path.exists(csv_file_path)
        if skip_csv and (not enforce_regeneration):
            print("csv files of scene {} exists. Skip".format(src_scene))
        else:
            print("produce csv files of scene {}".format(src_scene))
            src_scene_depth_path = os.path.join(src_dataset_folder, src_scene, "depth")
            src_scene_vismap_path = os.path.join(src_dataset_folder, src_scene, "{}_{}".format(config["visibility_folder"], str(int(radius*100))))

            # load the json data
            label_raw_data = io.load_json(src_scene_label_path)
            radius_key = str(radius)
            # iterate over each pose
            for pose_id_key in label_raw_data[radius_key]:
                depth_path = os.path.join(src_scene_depth_path, "depth{}.png".format(pose_id_key))
                # iterate over each status
                for status_key in label_raw_data[radius_key][pose_id_key]:
                    status = float(status_key)
                    selected_label_list = label_raw_data[radius_key][pose_id_key][status_key]["selected_label"]
                    # iterate over each label for each combinations of neighbouring poses achieving the status
                    for i, label in enumerate(selected_label_list):
                        cached_csv_line = []
                        cached_csv_line.append(depth_path)
                        vis_map_prefix = os.path.join(src_scene_vismap_path, "pose_{}_recons_{:d}_H{:d}_V{:d}".format(pose_id_key,int(status * 100), int(
                                                              round(fov_h / np.pi * 180)), int(
                                                              round(fov_v / np.pi * 180))))
                        vis_map_path = os.path.join(src_scene_vismap_path, "{}_comb_{:d}.png".format(vis_map_prefix, i))
                        cached_csv_line.append(vis_map_path)
                        cached_csv_line.append(label)
                        cached_csv_lines_list[int(label)].append(cached_csv_line)
            for i in range(len(label_list)):
                c_io.save_csv(csv_file_path_lists[i], cached_csv_lines_list[i])
    else:
        print("motion label is not available yet")
Esempio n. 21
0
    def __init__(self, image_root_path, question_json_path, question_vocab_path, answer_vocab_path,
                 transform,  target_transform=None, data_nums='all', question_per_image=None):
        super(ClevrDataset, self).__init__()
        self.image_root_path = image_root_path
        if isinstance(question_json_path, str):
            self.question_json = io.load_json(question_json_path)
        else:
            self.question_json = question_json_path
        self.dataset_info = self.question_json['info']
        self.question_vocab = Vocab.from_json(question_vocab_path)
        self.answer_vocab = Vocab.from_json(answer_vocab_path)
        self.transform = transform
        self.target_transform = target_transform

        self.annotations = ClevrDataset.build_annotation(self.question_json, data_nums, question_per_image)
Esempio n. 22
0
    def load_or_gen_list(rec_f, rec):
        idx_cls_lst_f = os.path.splitext(rec_f)[0] + '-lst.json'
        if os.path.exists(idx_cls_lst_f):
            idx_cls_lst = load_json(idx_cls_lst_f)
            idx_cls_lst = [int(l) for l in idx_cls_lst]
            return idx_cls_lst
        else:
            idx_cls_lst = []
            for idx in rec.idx.keys():
                record = rec.read_idx(idx)
                h, _ = recordio.unpack(record)
                idx_cls_lst.append(int(h.label))

            save_json(idx_cls_lst, idx_cls_lst_f)

            return idx_cls_lst
Esempio n. 23
0
    def test_load_json_for_existent_file(self):
        json_obj = {
            'key1': 123,
            'key2': 456,
        }

        fd, tmp = mkstemp(text=True)

        with os.fdopen(fd, "w") as json_file:
            json.dump(json_obj, json_file)

        json_file.close()

        found = load_json(tmp)
        expected = json_obj

        self.assertEqual(found, expected)
Esempio n. 24
0
def generate_csv_per_scene(src_scene):
    print(src_scene)
    cached_csv_lines_list = []
    for i in range(len(label_list)):
        cached_csv_lines = [["depth_path", "label"]]
        cached_csv_lines_list.append(cached_csv_lines)

    src_scene_label_path = os.path.join(src_dataset_folder, src_scene,
                                        "motionlabeltwostep.json")
    if os.path.isfile(src_scene_label_path):
        skip_csv = True
        csv_file_path_lists = []
        for i in range(len(label_list)):
            csv_file_path = os.path.join(
                dst_dataset_folder, "{}_{}.csv".format(src_scene,
                                                       label_list[i]))
            csv_file_path_lists.append(csv_file_path)
            skip_csv = skip_csv and os.path.exists(csv_file_path)
        if skip_csv and (not enforce_regeneration):
            print("csv files of scene {} exists. Skip".format(src_scene))
        else:
            print("produce csv files of scene {}".format(src_scene))
            src_scene_depth_path = os.path.join(src_dataset_folder, src_scene,
                                                "depth")
            src_scene_label_path = os.path.join(src_dataset_folder, src_scene,
                                                "motionlabeltwostep.json")
            # load the json data
            label_raw_data = io.load_json(src_scene_label_path)
            neigh_range_key = str(neigh_range)
            # for each item in label
            # data_result[radius_key][pose_id_key][status_key]["selected_label"]
            for pose_id_key in label_raw_data[neigh_range_key]:
                depth_path = os.path.join(src_scene_depth_path,
                                          "depth{}.png".format(pose_id_key))
                selected_label_list = label_raw_data[neigh_range_key][
                    pose_id_key]["0"]["selected_label"]
                for i, label in enumerate(selected_label_list):
                    cached_csv_line = []
                    cached_csv_line.append(depth_path)
                    cached_csv_line.append(label)
                    cached_csv_lines_list[int(label)].append(cached_csv_line)
            for i in range(len(label_list)):
                c_io.save_csv(csv_file_path_lists[i], cached_csv_lines_list[i])

    else:
        print("motion label is not available yet")
Esempio n. 25
0
def analyse_data_list_coverage(rooms, methods, dome_data_result,
                               vox_num_option):
    coverage_array_dict = {}
    coverage_std_array_dict = {}
    mean_coverage_array_dict = {}
    std_coverage_array_dict = {}
    for room in rooms:
        file_path = os.path.join(
            proj_path, result_folder,
            "{}_{}_{}.json".format(dataset_name, room, vox_num_option))
        raw_data = io_local.load_json(file_path)
        coverage_array_dict_room = {}
        for start_index in raw_data:
            for method in raw_data[start_index]:
                temp_array = np.asarray(
                    raw_data[start_index][method]["volume_size"]) / float(
                        dome_data_result[dataset_name][room])
                temp_array[temp_array > 1] = 1.0
                if method in coverage_array_dict:
                    coverage_array_dict[method] = np.vstack(
                        (coverage_array_dict[method], temp_array))
                else:
                    coverage_array_dict[method] = temp_array
                if method in coverage_array_dict_room:
                    coverage_array_dict_room[method] = np.vstack(
                        (coverage_array_dict_room[method], temp_array))
                else:
                    coverage_array_dict_room[method] = temp_array

        # compute the std for each room, the final one will be averaged by rooms
        for method in methods:

            temp_std_array = np.std(coverage_array_dict_room[method], axis=0)
            if method in coverage_std_array_dict:
                coverage_std_array_dict[method] = np.vstack(
                    (coverage_std_array_dict[method], temp_std_array))
            else:
                coverage_std_array_dict[method] = temp_std_array

    for method in methods:
        mean_coverage_array_dict[method] = np.mean(coverage_array_dict[method],
                                                   axis=0)
        std_coverage_array_dict[method] = np.mean(
            coverage_std_array_dict[method], axis=0)
    return mean_coverage_array_dict, std_coverage_array_dict
    def test_create_set_from_compressed_report(self):
        # Tests whether vulnerability sets for compressed reports match those from
        # their corresponding uncompressed ones.
        for report in os.listdir(fetch_file(resource_uri("reports/"))):
            uncompressed_report = load_json(
                fetch_file(resource_uri("reports/DAOBug.json")))
            expected_set = VulnerabilitiesSet.from_uncompressed_report(
                uncompressed_report)

            request_id = uncompressed_report['request_id']

            encoder = ReportEncoder()
            compressed_report = encoder.compress_report(
                uncompressed_report, request_id)
            decompressed_report = encoder.decode_report(
                compressed_report, request_id)
            found_set = VulnerabilitiesSet.from_uncompressed_report(
                decompressed_report)

            self.assertEquals(expected_set, found_set)
Esempio n. 27
0
def fetch_config(inject_contract=False):
    # create config from file, the contract is not provided and will be injected separately
    config_file_uri = resource_uri("test_config.yaml")
    config = ConfigFactory.create_from_file(config_file_uri,
                                            os.getenv("QSP_ENV",
                                                      default="dev"),
                                            validate_contract_settings=False)
    if inject_contract:
        contract_source_uri = "./tests/resources/QuantstampAuditMock.sol"
        contract_metadata_uri = "./tests/resources/QuantstampAudit-metadata.json"
        audit_contract_metadata = load_json(fetch_file(contract_metadata_uri))
        audit_contract_name = get(audit_contract_metadata, '/contractName')

        addr, contract = __load_audit_contract_from_src(
            config.web3_client, contract_source_uri, audit_contract_name,
            config.account)

        config._Config__audit_contract_address = addr
        config._Config__audit_contract = contract

        config_utils = ConfigUtils(config.node_version)
        config_utils.check_configuration_settings(config)

    return config
Esempio n. 28
0
def analyse_data_list_computation(rooms, methods, key, vox_num_option):
    computation_array_dict = {}
    mean_computation_array_dict = {}
    std_computation_array_dict = {}
    # data_result[start_index_key][method]["volume_size"] = volume_sizes
    for room in rooms:
        file_path = os.path.join(
            proj_path, result_folder,
            "{}_{}_{}.json".format(dataset_name, room, vox_num_option))
        raw_data = io_local.load_json(file_path)
        for start_index in raw_data:
            for method in raw_data[start_index]:
                temp_array = np.asarray(raw_data[start_index][method][key])
                if method in computation_array_dict:
                    computation_array_dict[method] = np.vstack(
                        (computation_array_dict[method], temp_array))
                else:
                    computation_array_dict[method] = temp_array
    for method in methods:
        mean_computation_array_dict[method] = np.mean(
            computation_array_dict[method], axis=0)
        std_computation_array_dict[method] = np.std(
            computation_array_dict[method], axis=0)
    return mean_computation_array_dict, std_computation_array_dict
Esempio n. 29
0
 def parse_cfg(self):
     return collections.OrderedDict(
         load_json(os.path.join(self.stat_cfg_path, 'stat.json')))
 def __load_report(self, report_file_path):
     return load_json(fetch_file(resource_uri(report_file_path)))