def main(input_path: str) -> None:
    config = Config.get(input_conf=load_json_file(file_path=input_path))
    data = load_pickle(file_path=config.data_path)
    np.random.seed(seed=config.seed)
    results = generate_empty_results_dict()
    results['method_names'] = config.method_names
    print(f'Input: {asdict(config)}')
    print('Run experiments!')
    for weights, data_list in data.items():
        results_per_weight = list()
        print(f'Run experiments for weights: {weights}')
        for data in tqdm(data_list):
            results_per_weight += [main_experiment(data=data, config=config)]

        results['results'][weights] = results_per_weight
        date = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
        suffix = '_'.join(
            ['results_agnostic_sample_based', date, f'_{weights}'])
        to_pickle(output_dir=config.output_dir,
                  data=results_per_weight,
                  suffix=suffix)

    date = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
    pattern_type = f'pattern_type_{extract_pattern_type(data_path=config.data_path)}'
    suffix = '_'.join(['results_agnostic_sample_based', date, pattern_type])
    to_pickle(output_dir=config.output_dir, data=results, suffix=suffix)
Example #2
0
def main(input_path: str) -> None:
    config = Config.get(input_conf=load_json_file(file_path=input_path))
    print(f'Input: {asdict(config)}')
    for type_of_experiment in config.experiment_types:
        print(f'Type of experiment: {type_of_experiment}.')
        for type_of_signal in config.signal_types:
            print(f'Type of signal: {type_of_signal}.')
            data = create_datasets(config=config,
                                   signal_type=type_of_signal,
                                   experiment_type=type_of_experiment)
            date = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
            pattern_type = f'pattern_type_{config.pattern_type}'
            suffix = '_'.join(
                [type_of_experiment, type_of_signal, date, pattern_type])
            to_pickle(output_dir=config.output_dir, data=data, suffix=suffix)
def main(config_path: str):
    config = Config.get(input_conf=load_json_file(file_path=config_path))
    paths_evaluation_results = evaluate(config=config)
    plot(config=config, score_paths=paths_evaluation_results)
    print('Save results!')
    output_paths = list()
    date = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
    pattern_type = f'pattern_type_{extract_pattern_type(data_path=config.data_path)}'
    suffix = '_'.join(['evaluation', date, pattern_type])
    output_paths += [
        to_pickle(output_dir=config.output_dir_scores,
                  data=scores,
                  suffix=suffix)
    ]
    return output_paths


def get_command_line_arguments() -> argparse.Namespace:
    parser = argparse.ArgumentParser()
    parser.add_argument('--path',
                        dest='path',
                        required=True,
                        help='Input file path of json file containing'
                        'input parameter for the experiment!')
    return parser.parse_args()


if __name__ == '__main__':
    args = get_command_line_arguments()
    try:
        evaluate(config=Config.get(input_conf=load_json_file(
            file_path=args.path)))
    except KeyboardInterrupt as e:
        print(e)
Example #5
0
 def setUp(self):
     self.tol = 1e-23
     self.Fe78_res_dict = load_json_file(resolve_resource("pyisocalc", "single_pattern", "Fe78"))
Example #6
0
    }),
    'H': SimpleMock({
        'get_segments': lambda: [SegmentStub(element_stubs['H'], 1)],
        'charge': lambda: 1
    }),
    'Fe100H89O10': SimpleMock({
        'get_segments': lambda: [SegmentStub(element_stubs['Fe'], 100), SegmentStub(element_stubs['H'], 89),
                                 SegmentStub(element_stubs['O'], 10)],
        'charge': lambda: 369
    })
}

chemcalc_ref_values = {}
for sf_str in sf_stubs:
    fn = resolve_resource("pyisocalc", "perfect_pattern", sf_str)
    res_dict = load_json_file(fn)
    chemcalc_ref_values[sf_str] = res_dict


class ElementTest(unittest.TestCase):
    def test_init(self):
        valid_args = ('H', 'Ar', 1, 49)
        valueerror_args = ('', 'foo', -45, 2345)
        typeerror_args = (1.0, None)
        for arg in valid_args:
            Element(arg)
        for arg in valueerror_args:
            self.assertRaises(ValueError, Element, arg)
        for arg in typeerror_args:
            self.assertRaises(TypeError, Element, arg)
Example #7
0
import base64
import utils
import common
import os

if __name__ == "__main__":
    CWD = os.path.dirname(os.path.realpath(__file__))
    config = common.load_json_file(CWD + "/config.json")
    """ the password / ipv4 encrypted base64.b64encode("PASS")
        import base64
        print base64.b64encode()
        print base64.b64decode("cGFzc3dvcmQ=")
    """

    with utils.SSH(base64.b64decode(config["SSH"]['IP_ADDRESS']),
                   the_user=base64.b64decode(config["SSH"]['THE_USER']),
                   the_password=base64.b64decode(config["SSH"]['THE_PASSWORD'])
                   ) as ssh:
        cloud = common.UnderCloud(config=config)
        cloud.copy_from_workspace(ssh, local_dest_dir=CWD,
                                          local_file="tempest_cleanup.sh",
                                          remote_file="/home/stack/tempest_cleanup.sh",
                                          chown="stack:stack")
        ssh.send_cmd("sudo /home/stack/tempest_cleanup.sh", ignore_exit=True)
        # copy private key to workspace
        cloud.copy_to_workspace(ssh, local_dest_dir=CWD, local_file="id_rsa.tar.gz",
                                remote_file="/home/stack/.ssh/id_rsa")
        cloud.get_undercloud_nodes(ssh)
        cloud.show_overcloud_nodes()
        cloud.get_overcloud_status(ssh)
        # cleanup logs on all machines
                              ('precision_based_scores', 'max_precision'),
                              # ('precision_based_scores', 'avg_precision')
                              ])

    print(f'Create heat maps!')
    pattern_type = int(extract_pattern_type(data_path=config.data_path))
    global_heat_maps(scores=scores, config=config, rnd_experiment_idx=idx,
                     pattern_type=pattern_type, snrs_of_interest=['0.00', '0.04', '0.08'])
    sample_based_heat_maps(scores=scores, config=config, data=data_dict,
                           rnd_sample_idx=rnd_sample_idx, pattern_type=pattern_type,
                           snrs_of_interest=['0.00', '0.04', '0.08'])


def get_command_line_arguments() -> argparse.Namespace:
    parser = argparse.ArgumentParser()
    parser.add_argument('--path', dest='path', required=True,
                        help='Input file path of json file containing'
                             'input parameter for the experiment!')
    return parser.parse_args()


if __name__ == '__main__':
    args = get_command_line_arguments()
    path = args.path
    try:
        c = Config.get(input_conf=load_json_file(file_path=path))
        input_paths = ['results/scores/data_evaluation_2021-04-30-12-32-54_pattern_type_0.pkl']
        plot(config=c, score_paths=input_paths)
    except KeyboardInterrupt as e:
        print(e)