def login_to_qrz(session: requests.Session, call_sign: str,
                 password: str) -> requests.Session:
    return chain(
        format_qrz_login_data,
        to(post(session=session, url='https://www.qrz.com/login'), kw='data'),
        to(return_session_if_login_verified(session=session),
           kw='response'))((call_sign, password))
def main(arrl_session: requests.Session,
         qrz_session: requests.Session) -> dict:
    return chain(
        collect_arrl_data,
        split_to(handle_arrl_data(qrz_session=qrz_session),
                 kws=['missing_countries',
                      'contacted_call_signs']))(session=arrl_session)
def setup(own_call_sign: str, arrl_pass: str,
          qrz_pass: str) -> (requests.Session, requests.Session):
    return chain(
        start_session,
        to(concurrently(
            login_to_lotw_arrl(password=arrl_pass, call_sign=own_call_sign),
            login_to_qrz(password=qrz_pass, call_sign=own_call_sign)),
           kw='session'))()
def handle_arrl_data(contacted_call_signs: tuple, missing_countries: dict,
                     qrz_session: requests.Session) -> dict:
    return chain(
        map(func=handle_country(filter_func=filterer(
            iterable=contacted_call_signs, kw='string'),
                                qrz_session=qrz_session),
            kw='country'), filter_empty_values,
        dict)(iterable=missing_countries.items())
def handle_country(country: str, filter_func: callable,
                   qrz_session: requests.Session) -> (str, list):
    return country[1], chain(
        filter_func,
        to(map(func=get_call_sign_data(qrz_session=qrz_session),
               kw='call_sign'),
           kw='iterable'),
    )(func=startswith(substring=country[0]))
def login_to_lotw_arrl(session: requests.Session, call_sign: str,
                       password: str) -> requests.Session:
    return chain(
        format_arrl_login_data,
        to(post(session=session, url='https://lotw.arrl.org/lotwuser/login'),
           kw='data'),
        to(return_session_if_login_verified(session=session),
           kw='response'))(call_sign, password)
__license__ = "GNU GPL3"
__version__ = "0.2.0"
__maintainer__ = "Nicholas Logan"
__email__ = "*****@*****.**"
__status__ = "Prototype"
import requests
from lxml import etree
import re
import pprint
from functional import chain, concurrently, to, split_to, map, filterer, zipper, curry

start_session = requests.session
filter_empty_values = to(filterer(func=lambda item: item[1], kw='item'),
                         kw='iterable')
format_arrl_login_data = 'login={}&password={}&acct_sel=&thisForm=login'.format
format_qrz_login_data = to(chain(zipper(iter1=('username', 'password')), dict),
                           kw='iter2')


@curry
def post(session: requests.Session, url: str, data: str
         or dict) -> requests.Response:
    return session.post(url, data=data)


@curry
def startswith(string: str, substring: str) -> bool:
    return string.startswith(substring)


def get_missing_credits(session: requests.Session) -> dict:
Пример #8
0
    def __call__(self):

        pandda_start_time = time.time()

        working_phil = parse_phil_args(master_phil=pandda_phil,
                                       args=self.args,
                                       blank_arg_prepend=None,
                                       home_scope=None)

        # # TODO: remove
        # print("printing welcome")
        # sys.stdout.flush()
        # # welcome()
        #
        # # TODO: remove
        # print("getting phil")
        # sys.stdout.flush()
        # p = working_phil.extract()
        #
        # # TODO: remove
        # print("making directories")
        # sys.stdout.flush()
        # out_dir = easy_directory(os.path.abspath(p.pandda.output.out_dir))
        # _log_dir = easy_directory(os.path.join(out_dir, 'logs'))
        # # TODO: remove
        # print("made directories")
        # sys.stdout.flush()
        #
        # _log_filename = 'pandda-{}.log'.format(time.strftime("%Y-%m-%d-%H%M", time.gmtime()))
        #
        # # TODO: remove
        # print("got log fileename")
        # sys.stdout.flush()
        # _def_filename = _log_filename.replace('.log', '.def')
        # _eff_filename = _log_filename.replace('.log', '.eff')
        #
        # # TODO: remove
        # print("makeing log")
        # sys.stdout.flush()
        # log = Log(log_file=os.path.join(_log_dir, _log_filename), verbose=p.settings.verbose)
        #
        # # TODO: remove
        # print("args processor")
        # sys.stdout.flush()
        # pandda_arg_processor = PanddaArgsProcessor(log, p.pandda)
        # args = pandda_arg_processor()
        # params = args.params
        # settings = p.settings
        # # TODO: remove
        # print("got settings")
        # sys.stdout.flush()
        #
        # pickled_dataset_meta = Meta({'number_of_datasets': 0, 'dataset_labels': [], 'dataset_pickle_list': []})

        ################################################################################################################

        # Maps options to code abstractions
        pandda_config = PanDDAConfig(working_phil)

        # Get Dataset
        pandda_dataset = mdc.dataset.dataset.MultiCrystalDataset(
            dataloader=pandda_config.dataloader,
            sample_loader=pandda_config.sample_loader)

        reference = pandda_config.get_reference(pandda_dataset.datasets)
        pandda_dataset.sample_loader.reference = reference

        # transform dataset
        dataset = chain(pandda_dataset, [
            pandda_config.check_data, pandda_config.scale_diffraction,
            pandda_config.filter_structure, pandda_config.filter_wilson,
            pandda_config.align
        ])

        # Get grid
        grid = pandda_config.get_grid(reference)

        # Get file tree
        tree = pandda_config.pandda_output(dataset)

        # Define event Model
        pandda_event_model = PanDDAEventModel(
            pandda_config.statistical_model,
            pandda_config.clusterer,
            pandda_config.event_finder,
            bdc_calculator=pandda_config.bdc_calculator,
            statistics=[],
            map_maker=pandda_config.map_maker,
            event_table_maker=pandda_config.event_table_maker,
            cpus=pandda_config["args"]["cpus"],
            tree=tree)

        # Get partitions
        partitions = pandda_config.partitioner(dataset)

        # instatiate a dataloader for the datasets
        # dataloader = DefaultPanDDADataloader(min_train_datasets=60,
        #                                      max_test_datasets=60)

        # Get the datasets to iterate over
        ds = [(idx, d) for idx, d in pandda_config.dataloader(dataset)]

        # Iterate over resolution shells
        for shell_num, shell_dataset in ds:

            client = get_client()

            # ###############################################
            # Get resolution
            # ###############################################
            resolutions_test = max([
                dts.data.summary.high_res for dtag, dts in
                shell_dataset.partition_datasets("test").items()
            ])
            resolutions_train = max([
                dts.data.summary.high_res for dtag, dts in
                shell_dataset.partition_datasets("train").items()
            ])
            max_res = max(resolutions_test, resolutions_train)

            # ###############################################
            # Instantiate sheel variable names
            # ###############################################

            # Dataset names
            dtags = set(
                shell_dataset.partition_datasets("test").keys() +
                shell_dataset.partition_datasets("train").keys())

            dask_dtags = {
                "{}".format(dtag.replace("-", "_")): dtag
                for dtag in dtags
            }
            train_dtags = [
                dtag for dtag in dask_dtags
                if (dask_dtags[dtag] in shell_dataset.partition_datasets(
                    "train").keys())
            ]
            test_dtags = [
                dtag for dtag in dask_dtags
                if (dask_dtags[dtag] in shell_dataset.partition_datasets(
                    "test").keys())
            ]

            # ###############################################
            # Truncate datasets
            # ###############################################
            # TODO: move to imports section

            truncated_reference, truncated_datasets = PanddaDiffractionDataTruncater(
            )(shell_dataset.datasets, reference)

            # ###############################################
            # Load computed variables into dask
            # ###############################################
            # Rename trucnated datasets
            for ddtag, dtag in dask_dtags.items():
                truncated_datasets[ddtag] = truncated_datasets[dtag]

            # record max res of shell datasets
            shell_max_res = max_res

            # ###############################################
            # Generate maps
            # ###############################################
            # Generate reference map for shell
            shell_ref_map = delayed(get_reference_map)(
                pandda_config.reference_map_getter, reference, shell_max_res,
                grid)

            # Load maps
            xmaps = {}
            for dtag in dask_dtags:
                xmaps[dtag] = delayed(load_sample)(pandda_config.map_loader,
                                                   truncated_datasets[dtag],
                                                   grid, shell_ref_map,
                                                   shell_max_res)

            # ###############################################
            # Fit statistical model to trianing sets
            # ###############################################
            xmaps_persisted_futures = client.persist(
                [xmaps[dtag] for dtag in dask_dtags])
            xmaps_computed = {
                dtag: client.compute(xmaps_persisted_futures[i]).result()
                for i, dtag in enumerate(dask_dtags)
            }

            shell_fit_model = fit(
                pandda_config.statistical_model,
                [xmaps_computed[dtag] for dtag in train_dtags],
                [xmaps_computed[dtag] for dtag in test_dtags])

            shell_fit_model_scattered = client.scatter(shell_fit_model)

            xmaps_scattered = client.scatter(
                [xmaps_computed[dtag] for dtag in dask_dtags])
            xmaps_scattered_dict = {
                dtag: xmaps_scattered[i]
                for i, dtag in enumerate(dask_dtags)
            }

            grid_scattered = client.scatter(grid)

            # ###############################################
            # Find events
            # ###############################################
            zmaps = {}
            clusters = {}
            events = {}
            bdcs = {}
            for dtag in dask_dtags:
                # Get z maps by evaluating model on maps
                zmaps[dtag] = delayed(evaluate_model)(
                    shell_fit_model_scattered, xmaps_scattered_dict[dtag])

                # Cluster outlying points in z maps
                clusters[dtag] = delayed(cluster_outliers)(
                    pandda_config.clusterer, truncated_datasets[dtag],
                    zmaps[dtag], grid_scattered)

                # Find events by filtering the clusters
                events[dtag] = delayed(filter_clusters)(
                    pandda_config.event_finder, truncated_datasets[dtag],
                    clusters[dtag], grid_scattered)

            events_persisted_futures = client.persist(
                [events[dtag] for dtag in dask_dtags])
            events_computed = {
                dtag: client.compute(events_persisted_futures[i]).result()
                for i, dtag in enumerate(dask_dtags)
            }

            events_scattered = client.scatter(
                [events_computed[dtag] for dtag in dask_dtags])
            events_scattered_dict = {
                dtag: xmaps_scattered[i]
                for i, dtag in enumerate(dask_dtags)
            }

            # Calculate background correction factors
            for dtag in dask_dtags:
                bdcs[dtag] = delayed(estimate_bdcs)(
                    pandda_config.bdc_calculator, truncated_datasets[dtag],
                    xmaps_scattered_dict[dtag], shell_ref_map, events[dtag],
                    grid_scattered)

            # Criticise each indiidual dataset (generate statistics, event map and event table)
            event_maps = {}
            for dtag in dask_dtags:
                event_maps[dtag] = delayed(make_event_map)(
                    pandda_config.map_maker, tree, pandda_config.map_loader,
                    truncated_datasets[dtag], shell_ref_map, events[dtag],
                    bdcs[dtag])

            event_maps_persisted_futures = client.persist(
                [event_maps[dtag] for dtag in dask_dtags])

            event_maps_computed = {
                dtag: client.compute(event_maps_persisted_futures[i]).result()
                for i, dtag in enumerate(dask_dtags)
            }

            shell_maps = delayed(make_shell_maps)(pandda_config.map_maker,
                                                  tree, shell_num, reference,
                                                  shell_ref_map)

            shell_maps_persisted_futures = client.persist(shell_maps)
            shell_maps_computed = shell_maps_persisted_futures.result()

            event_table = delayed(make_event_table)(
                pandda_config.event_table_maker, tree, shell_num,
                shell_dataset, events_computed)

            event_table_persisted_future = client.persist(event_table)
            event_table_computed = event_table_persisted_future.result()

            client.close()