Ejemplo n.º 1
0
    def __init__(self, folder, verbose=False, very_verbose=False):
        self.folder = Path(folder)
        self.verbose = verbose

        times = {}

        with about_time() as times["df_fit_results"]:
            self._load_df_fit_results()

        with about_time() as times["df_fit_predictions"]:
            self._load_df_fit_predictions()

        with about_time() as times["ranges"]:
            self._compute_ranges()

        with about_time() as times["cmap"]:
            self._set_cmap()

        with about_time() as times["hover"]:
            self._set_hover_info()

        with about_time() as times["columns"]:
            self._set_columns_scatter()

        with about_time() as times["labels"]:
            self._set_labels()

        with about_time() as times["columns_scatter_forward_reverse"]:
            self._set_columns_scatter_forward_reverse()

        if very_verbose:
            for key, val in times.items():
                print(f"\t {key}: {val.duration_human}")
Ejemplo n.º 2
0
def test_duration_callable_mode(rand_offset, mock_timer):
    start, end = 1.4 + rand_offset, 2.65 + rand_offset
    mock_timer.side_effect = start, end

    at = about_time(lambda: 1)

    assert at.duration == pytest.approx(end - start)
Ejemplo n.º 3
0
def run_aoc(func: Callable, input_path: str, finite: Union[int, bool] = False, *args, **kwargs):
    """Default puzzle function runner code for AoC

    :param func: Function to run that takes arguments d (data) and bar (progress bar), expected to return final value
    :param input_path: Path to the input file to run from
    :param finite: Whether the progress bar should have a finite value or not, True for input lines, or int to specify.
    """
    # Load file data
    if not isfile(input_path):
        print("Input file does not exist")
        sys.exit(-1)
    with open(input_path, "r") as f:
        d = [l.strip() for l in f.readlines()]

    # Track runtime and start the function with a progress bar
    if type(finite) is int:
        b = alive_bar(finite, force_tty=True)
    elif finite is True:
        b = alive_bar(len(d), force_tty=True)
    else:
        b = alive_bar(force_tty=True, unknown='stars')
    with b as bar:
        with about_time() as at:
            r = func(d, bar, *args, **kwargs)
    # Print the run time and the return result
    print("Program successfully finished in {}, return value is{}".format(
        at.duration_human, ":\n{}".format(r) if type(r) is str and '\n' in r else " '{}'".format(r)))

    # Return result from function for tests and other functionality that may need it
    return r
Ejemplo n.º 4
0
    def tasks(self,
              tasks: Optional[str] = None,
              mode: Union[None, int, ModeTask] = None,
              limit: Optional[int] = None,
              reverse: bool = True) -> None:
        """Fetch current data from past tasks.

        Note that the `limit` field is just a hint, it may not be accurate.
        Also, the total number of tasks fetched may be slightly different from
        the server `max_tasks` setting.

        Args:
            tasks: the pattern to filter tasks
            mode: an optional display mode to present data
            limit: the maximum number of events to fetch, fetches all if None or 0 (default)
            reverse: if True (default), shows the most recent first

        See Also:
            ClearlyClient#capture_tasks()
            ClearlyClient#display_modes()

        """
        tasks_filter = ClearlyClient._parse_pattern(tasks)
        if not tasks_filter:
            raise UserWarning('Nothing would be selected.')

        mode = self._get_display_modes(mode)
        request = FilterTasksRequest(tasks_filter=tasks_filter,
                                     limit=limit,
                                     reverse=reverse)

        at = about_time(self._stub.filter_tasks(request))
        for task in at:
            self._display_task(task, mode.tasks)
        ClearlyClient._fetched_info(at)
Ejemplo n.º 5
0
    def workers(self,
                workers: Optional[str] = None,
                mode: Union[None, int, ModeWorker] = None) -> None:
        """Fetch current data from known workers.
        
        Args:
            workers: the pattern to filter workers
            mode: an optional display mode to present data

        See Also:
            ClearlyClient#capture_workers()
            ClearlyClient#display_modes()

        """
        workers_filter = ClearlyClient._parse_pattern(workers)
        if not workers_filter:
            raise UserWarning('Nothing would be selected.')

        mode = self._get_display_modes(mode)
        request = FilterWorkersRequest(workers_filter=workers_filter)

        at = about_time(self._stub.filter_workers(request))
        for worker in at:
            self._display_worker(worker, mode.workers)
        ClearlyClient._fetched_info(at)
Ejemplo n.º 6
0
    def filter_tasks(self, request, context):
        """Filter tasks by matching patterns to name, routing key and state."""
        _log_request(request, context)
        tasks_pattern, tasks_negate = PATTERN_PARAMS_OP(request.tasks_filter)
        state_pattern = request.state_pattern
        limit, reverse = request.limit, request.reverse

        pregex = re.compile(tasks_pattern)  # pattern filter condition
        sregex = re.compile(state_pattern)  # state filter condition

        def pcondition(task):
            return accepts(pregex, tasks_negate, task.name, task.routing_key)

        def scondition(task):
            return accepts(sregex, tasks_negate, task.state)

        found_tasks = (task for _, task in self.listener.memory.tasks_by_time(
            limit=limit or None, reverse=reverse)
                       if pcondition(task) and scondition(task))

        def callback(t):
            logger.debug('%s iterated %d tasks in %s (%s)',
                         self.filter_tasks.__name__, t.count, t.duration_human,
                         t.throughput_human)

        for task in about_time(callback, found_tasks):
            yield ClearlyServer._event_to_pb(task)[1]
Ejemplo n.º 7
0
    def bar_assembler_factory(length, spinner_factory=None):
        """Assembles this bar into an actual bar renderer.

        Args:
            length (int): the bar rendition length (excluding the borders)
            spinner_factory (Optional[spinner_factory]): enable this bar to act in unknown mode

        Returns:
            a bar renderer

        """
        with about_time() as t_compile:
            draw_known, running, ended, draw_unknown = inner_bar_factory(length, spinner_factory)

        def draw(percent):
            return draw_known(running, percent)

        def draw_end(percent):
            return draw_known(ended, percent)

        def bar_check(*args, **kwargs):  # pragma: no cover
            return check(draw, t_compile, *args, **kwargs)

        draw.__dict__.update(
            end=draw_end, unknown=draw_unknown,
            check=fix_signature(bar_check, check, 2),
        )

        if draw_unknown:
            def draw_unknown_end(_percent=None):
                return draw_known(ended, 1.)

            draw_unknown.end = draw_unknown_end

        return draw
Ejemplo n.º 8
0
def test_duration_context_manager_mode(rand_offset, mock_timer):
    start, end = 1.4 + rand_offset, 2.65 + rand_offset
    mock_timer.side_effect = start, end

    with about_time() as at:
        pass

    assert at.duration == pytest.approx(end - start)
Ejemplo n.º 9
0
def test_duration_counter_throughput_mode(rand_offset, mock_timer):
    start, end = 1.4 + rand_offset, 2.65 + rand_offset
    mock_timer.side_effect = start, end

    at = about_time(range(2))
    for _ in at:
        pass

    assert at.duration == pytest.approx(end - start)
Ejemplo n.º 10
0
def test_timer_all_modes(mode, rand_offset, mock_timer):
    start, end = 1.4 + rand_offset, 2.65 + rand_offset
    mock_timer.side_effect = (start, end)

    t = [None]
    if mode == 0:
        with about_time() as t[0]:
            pass
    elif mode == 1:
        t[0] = about_time(lambda: 1)
    else:

        def callback(h):
            t[0] = h

        for _ in about_time(callback, range(2)):
            pass

    assert t[0].duration == pytest.approx(end - start)
Ejemplo n.º 11
0
    def filter_fit_results(
        dropdown_file_selection,
        tax_id_filter_input,
        tax_id_button,
        slider_values,
        marker_size_max,
        marker_transformation,
        n_clicks_modal,
        tax_id_filter_input_descendants,
        tax_id_filter_subspecies,
        slider_ids,
        modal_is_open,
        # active_tab,
        prevent_initial_call=True,
    ):

        # if modal is open and the "close" button is clicked, close down modal
        if n_clicks_modal and modal_is_open:
            return dash.no_update, False

        # if no files selected
        if not dropdown_file_selection:
            raise PreventUpdate

        with about_time() as at1:

            fit_results.set_marker_size(marker_transformation, marker_size_max)

            d_filter = {"shortnames": dropdown_file_selection}
            slider_names = [id["index"] for id in slider_ids]
            for shortname, values in zip(slider_names, slider_values):
                d_filter[shortname] = values

            apply_tax_id_filter(
                d_filter,
                tax_id_filter_input,
            )

            apply_tax_id_descendants_filter(
                d_filter,
                tax_id_filter_input_descendants,
                tax_id_filter_subspecies,
            )

            df_fit_results_filtered = fit_results.filter(d_filter)

        # print(f"Time taken to filter : {at1.duration_human}")

        # raise modal warning if no results due to too restrictive filtering
        if len(df_fit_results_filtered) == 0:
            return dash.no_update, True

        return df_fit_results_filtered.to_dict("records"), dash.no_update
Ejemplo n.º 12
0
    def tasks(self,
              pattern=None,
              negate=False,
              state=None,
              limit=None,
              reverse=True,
              params=None,
              success=False,
              error=True):
        """Filters stored tasks and displays their current statuses.

        Note that, to be able to list the tasks sorted chronologically, celery retrieves
        tasks from the LRU event heap instead of the dict storage, so the total number
        of tasks fetched may be different than the server `max_tasks` setting. For
        instance, the `limit` field refers to max events searched, not max tasks.

        Args:
            Filter args:

            pattern (Optional[str]): a pattern to filter tasks
                ex.: '^dispatch|^email' to filter names starting with that
                      or 'dispatch.*123456' to filter that exact name and number
                      or even '123456' to filter that exact number anywhere.
            negate (bool): if True, finds tasks that do not match criteria
            state (Optional[str]): a celery task state to filter
            limit (int): the maximum number of events to fetch
                if None or 0, fetches all.
            reverse (bool): if True (default), shows the most recent first

            Display args:

            params (Optional[bool]): if True shows args and kwargs in the first and
                last seen states, if False never shows, and if None follows the
                success and error arguments.
                default is None
            success (bool): if True shows successful tasks' results
                default is False
            error (bool): if True shows failed and retried tasks' tracebacks.
                default is True, as you're monitoring to find errors, right?
        """
        request = clearly_pb2.FilterTasksRequest(
            tasks_filter=clearly_pb2.PatternFilter(pattern=pattern or '.',
                                                   negate=negate),
            state_pattern=state or '.',
            limit=limit,
            reverse=reverse)

        for task in about_time(ClearlyClient._fetched_callback,
                               self._stub.filter_tasks(request)):
            ClearlyClient._display_task(task, params, success, error)
Ejemplo n.º 13
0
def test_counter_throughput_mode(it, rand_offset, mock_timer):
    start, end = 1.4 + rand_offset, 2.65 + rand_offset
    mock_timer.side_effect = chain((start, ), repeat(end))
    it_see, it_copy = tee(it)

    at = about_time(it_see)
    assert at.count == 0  # count should work even before starting iterating.

    i = 0
    for i, elem in enumerate(at, 1):
        assert elem == next(it_copy)
        assert at.count == i  # count works in real time now!
        assert at.duration > 0  # ensure the timing ending is also updated in real time.

    assert at.throughput == pytest.approx(i / 1.25)
Ejemplo n.º 14
0
def test_counter_throughput(it, expected, rand_offset, mock_timer):
    callback = mock.Mock()

    start, end = 1.4 + rand_offset, 2.65 + rand_offset
    mock_timer.side_effect = (start, end)

    if expected:
        it_see, it_copy = tee(it)
    else:
        it_see, it_copy = it, None
    for elem in about_time(callback, it_see):
        assert elem == next(it_copy)

    callback.assert_called_once()
    (h, ), _ = callback.call_args
    print(h)
    assert h.count == expected
    assert h.throughput == pytest.approx(expected / 1.25)
Ejemplo n.º 15
0
        def spinner_compiler_dispatcher_factory(actual_length=None):
            """Compile this spinner factory into an actual spinner runner.
            The previous parameters were the styling parameters, which defined a style.
            These are called operational parameters, which `alive_progress` binds dynamically
            as needed. Do not call this manually.

            Args:
                actual_length (int): the actual length to compile the frames renditions

            Returns:
                a spinner runner

            """
            if skip_compiler:
                return spinner_inner_factory(actual_length, **op_params)

            with about_time() as t_compile:
                gen = spinner_inner_factory(actual_length, **op_params)
                spec = spinner_compiler(gen, natural, extra_commands.get(True, ()))
            return spinner_runner_factory(spec, t_compile, extra_commands.get(False, ()))
Ejemplo n.º 16
0
    def workers(self, pattern=None, negate=False, stats=True):
        """Filters known workers and prints their current status.
        
        Args:
            Filter args:

            pattern (Optional[str]): a pattern to filter workers
                ex.: '^dispatch|^email' to filter names starting with those
                      or 'dispatch.*123456' to filter that exact name and number
                      or even '123456' to filter that exact number anywhere.
            negate (bool): if True, finds tasks that do not match criteria

            Display args:

            stats (bool): if True shows worker stats
        """
        request = clearly_pb2.FilterWorkersRequest(
            workers_filter=clearly_pb2.PatternFilter(pattern=pattern or '.',
                                                     negate=negate), )

        for worker in about_time(_fetched_callback,
                                 self._stub.filter_workers(request)):
            ClearlyClient._display_worker(worker, stats)
Ejemplo n.º 17
0
    def filter_workers(self, request, context):
        """Filter workers by matching a pattern to hostname."""
        _log_request(request, context)
        workers_pattern, workers_negate = PATTERN_PARAMS_OP(
            request.workers_filter)

        hregex = re.compile(workers_pattern)  # hostname filter condition

        def hcondition(worker):
            return accepts(hregex, workers_negate,
                           worker.hostname)  # pragma: no branch

        found_workers = (worker for worker in sorted(
            self.listener.memory.workers.values(), key=WORKER_HOSTNAME_OP)
                         if hcondition(worker))

        def callback(t):
            logger.debug('%s iterated %d workers in %s (%s)',
                         self.filter_workers.__name__, t.count,
                         t.duration_human, t.throughput_human)

        for worker in about_time(callback, found_workers):
            yield ClearlyServer._event_to_pb(worker)[1]
Ejemplo n.º 18
0
    def filter_tasks(self, request, context):
        """Filter tasks by matching patterns to name, routing key and state.

        Yields:
            clearly_pb2.TaskMessage

        """
        RPCService._log_request(request, context)

        pattern, negate = PATTERN_FILTER_OP(request.tasks_filter)
        limit, reverse = request.limit, request.reverse
        pattern = re.compile(pattern)

        # generators are cool!
        found_tasks = (task for _, task in self.memory.tasks_by_time(
            limit=limit or None, reverse=reverse)
                       if accept_task(pattern, negate, task))

        at = about_time(found_tasks)
        for task in at:
            yield obj_to_message(task, TaskMessage)
        logger.debug('%s iterated %d tasks in %s (%s)',
                     self.filter_tasks.__name__, at.count, at.duration_human,
                     at.throughput_human)
Ejemplo n.º 19
0
    def filter_workers(self, request, context):
        """Filter workers by matching a pattern to hostname.

        Yields:
            clearly_pb2.WorkerMessage

        """
        RPCService._log_request(request, context)

        pattern, negate = PATTERN_FILTER_OP(request.workers_filter)
        pattern = re.compile(pattern)

        # generators are cool!
        found_workers = (worker
                         for worker in sorted(self.memory.workers.values(),
                                              key=WORKER_HOSTNAME_OP)
                         if accept_worker(pattern, negate, worker))

        at = about_time(found_workers)
        for worker in at:
            yield obj_to_message(worker, WorkerMessage)
        logger.debug('%s iterated %d workers in %s (%s)',
                     self.filter_workers.__name__, at.count, at.duration_human,
                     at.throughput_human)
Ejemplo n.º 20
0
def get_app(out_dir_default, verbose=True):

    # Third Party
    # from metadamage import dashboard

    dashboard.utils.set_custom_theme()

    if verbose:
        print(f"Getting app now from {out_dir_default}")

    with about_time() as at1:
        fit_results = dashboard.fit_results.FitResults(
            folder=out_dir_default,
            verbose=verbose,
            very_verbose=False,
        )
    # print(f"{at1.duration_human}")

    bootstrap = dashboard.bootstrap.Bootstrap(fit_results, graph_kwargs)

    #%%BOOTSTRAP

    # external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]

    app = dash.Dash(
        __name__,
        external_stylesheets=[dbc.themes.COSMO],
        external_scripts=[
            "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.4/"
            "MathJax.js?config=TeX-MML-AM_CHTML",
        ],
        suppress_callback_exceptions=True,
        title="Metadamage",
        update_title="Updating...",
    )

    # to allow custom css
    app.scripts.config.serve_locally = True

    #%%

    def get_figure_from_df_and_tab(df_fit_results_filtered, active_tab):
        if active_tab == "fig_fit_results":
            fig = dashboard.figures.plot_fit_results(fit_results,
                                                     df_fit_results_filtered)
        elif active_tab == "fig_histograms":
            fig = dashboard.figures.plot_histograms(fit_results,
                                                    df_fit_results_filtered)
        elif active_tab == "fig_scatter_matrix":
            fig = dashboard.figures.plot_scatter_matrix(
                fit_results, df_fit_results_filtered)
        elif active_tab == "fig_forward_reverse":
            fig = dashboard.figures.plot_forward_reverse(
                fit_results, df_fit_results_filtered)
        else:
            print("got here: get_figure_from_df_and_tab")
        return fig

    def get_main_figure(data_or_df, active_tab="fig_fit_results"):
        if data_or_df is None:
            return dashboard.figures.create_empty_figure(s="")

        if isinstance(data_or_df, list):
            df = pd.DataFrame.from_records(data_or_df)
        elif isinstance(data_or_df, pd.DataFrame):
            df = data_or_df
        else:
            raise AssertionError(
                f"Got wrong type for data_or_df: {type(data_or_df)}")
        return get_figure_from_df_and_tab(df, active_tab)

    def make_tab_from_data(data_or_df=None, active_tab="fig_fit_results"):

        figure = get_main_figure(data_or_df, active_tab)
        main_graph = dcc.Graph(figure=figure, id="main_graph", **graph_kwargs)

        if active_tab == "fig_fit_results" or active_tab == "overview":

            return (dbc.Container([
                dbc.Row(
                    dbc.Col(main_graph, width=12),
                    justify="center",
                ),
                dbc.Row(
                    dbc.Col(bootstrap.card_overview_marker, width=10),
                    justify="center",
                ),
            ], ), )

        elif active_tab == "fig_histograms":
            return (dbc.Container([
                dbc.Row(
                    dbc.Col(main_graph, width=12),
                    justify="center",
                ),
            ], ), )

        elif active_tab == "fig_scatter_matrix":
            return (dbc.Container([
                dbc.Row(
                    dbc.Col(main_graph, width=12),
                    justify="center",
                ),
            ], ), )

        elif active_tab == "fig_forward_reverse":
            return (dbc.Container([
                dbc.Row(
                    dbc.Col(main_graph, width=12),
                    justify="center",
                ),
            ], ), )

        else:
            print("got here: make_tab_from_data")

    #%%

    card_graph = dbc.Card(
        [
            html.Div(
                make_tab_from_data(active_tab="overview"),  # main_graph
                id="main_graph_div",
                className="loader-fade",
            ),
        ],
        body=True,  # spacing before border
        # style={"height": "50vh"},
    )

    #%%

    app.layout = dbc.Container(
        [
            dcc.Store(id="store"),
            html.Br(),
            dbc.Row(
                [
                    dbc.Col(bootstrap.filter_card, width=3),
                    dbc.Col([bootstrap.card_tabs, card_graph], width=6),
                    dbc.Col(bootstrap.card_mismatch_dropdowns_and_graph,
                            width=3),
                ],
                justify="center",
                # className="h-75",
            ),
            html.Hr(),
            dbc.Row(
                [dbc.Col(bootstrap.card_datatable, width=12)],
                justify="center",
                # className="h-25",
            ),
            dbc.Modal(
                [
                    dbc.ModalHeader("Filtering Error"),
                    dbc.ModalBody(
                        "Too restrictive filtering, no points left to plot. "
                        "Please choose a less restrictive filtering."),
                    dbc.ModalFooter(
                        dbc.Button("Close",
                                   id="modal_close_button",
                                   className="ml-auto")),
                ],
                centered=True,
                id="modal",
            ),
        ],
        fluid=True,  # fill available horizontal space and resize fluidly
        # style={"height": "90vh"},
    )

    #%%

    def key_is_in_list_case_insensitive(lst, key):
        return any([key.lower() in s.lower() for s in lst])

    @app.callback(
        Output("dropdown_file_selection", "value"),
        Input("dropdown_file_selection", "value"),
    )
    def update_dropdown_when_Select_All(dropdown_file_selection):
        if key_is_in_list_case_insensitive(dropdown_file_selection,
                                           "Select all"):
            dropdown_file_selection = fit_results.shortnames
        elif key_is_in_list_case_insensitive(dropdown_file_selection,
                                             "Deselect"):
            dropdown_file_selection = dashboard.elements.get_shortnames_each(
                fit_results.shortnames)
        return dropdown_file_selection

    def append_to_list_if_exists(d, key, value):
        if key in d:
            d[key].append(value)
        else:
            d[key] = [value]

    def apply_tax_id_filter(d_filter, tax_id_filter_input):
        if tax_id_filter_input is None or len(tax_id_filter_input) == 0:
            return None

        for tax in tax_id_filter_input:
            if tax in fit_results.all_tax_ids:
                append_to_list_if_exists(d_filter, "tax_ids", tax)
            elif tax in fit_results.all_tax_names:
                append_to_list_if_exists(d_filter, "tax_names", tax)
            elif tax in fit_results.all_tax_ranks:
                append_to_list_if_exists(d_filter, "tax_ranks", tax)
            else:
                raise AssertionError(f"Tax {tax} could not be found. ")

    def apply_tax_id_descendants_filter(d_filter, tax_name,
                                        tax_id_filter_subspecies):
        if tax_name is None:
            return None

        tax_ids = taxonomy.extract_descendant_tax_ids(
            tax_name,
            include_subspecies=include_subspecies(tax_id_filter_subspecies),
        )
        N_tax_ids = len(tax_ids)
        if N_tax_ids != 0:
            if "tax_id" in d_filter:
                d_filter["tax_ids"].extend(tax_ids)
            else:
                d_filter["tax_ids"] = tax_ids

    @app.callback(
        Output("store", "data"),
        Output("modal", "is_open"),
        Input("dropdown_file_selection", "value"),
        Input("tax_id_filter_input", "value"),
        Input("tax_id_plot_button", "n_clicks"),
        Input({
            "type": "dynamic_slider",
            "index": ALL
        }, "value"),
        Input({
            "type": "slider_overview_marker_size",
            "index": ALL
        }, "value"),
        Input({
            "type": "dropdown_overview_marker_transformation",
            "index": ALL
        }, "value"),
        Input("modal_close_button", "n_clicks"),
        State("tax_id_filter_input_descendants", "value"),
        State("tax_id_filter_subspecies", "value"),
        State({
            "type": "dynamic_slider",
            "index": ALL
        }, "id"),
        State("modal", "is_open"),
        # State("tabs", "active_tab"),
    )
    def filter_fit_results(
        dropdown_file_selection,
        tax_id_filter_input,
        tax_id_button,
        slider_values,
        marker_size_max,
        marker_transformation,
        n_clicks_modal,
        tax_id_filter_input_descendants,
        tax_id_filter_subspecies,
        slider_ids,
        modal_is_open,
        # active_tab,
        prevent_initial_call=True,
    ):

        # if modal is open and the "close" button is clicked, close down modal
        if n_clicks_modal and modal_is_open:
            return dash.no_update, False

        # if no files selected
        if not dropdown_file_selection:
            raise PreventUpdate

        with about_time() as at1:

            fit_results.set_marker_size(marker_transformation, marker_size_max)

            d_filter = {"shortnames": dropdown_file_selection}
            slider_names = [id["index"] for id in slider_ids]
            for shortname, values in zip(slider_names, slider_values):
                d_filter[shortname] = values

            apply_tax_id_filter(
                d_filter,
                tax_id_filter_input,
            )

            apply_tax_id_descendants_filter(
                d_filter,
                tax_id_filter_input_descendants,
                tax_id_filter_subspecies,
            )

            df_fit_results_filtered = fit_results.filter(d_filter)

        # print(f"Time taken to filter : {at1.duration_human}")

        # raise modal warning if no results due to too restrictive filtering
        if len(df_fit_results_filtered) == 0:
            return dash.no_update, True

        return df_fit_results_filtered.to_dict("records"), dash.no_update

    #%%

    # def list_is_none_or_empty(l):
    #     return l is None or len(l) == 0

    def get_id_dict(child):
        return child["props"]["id"]

    def find_index_in_children(children, id_type, search_index):
        for i, child in enumerate(children):
            d_id = get_id_dict(child)
            if d_id["type"] == id_type and d_id["index"] == search_index:
                return i

    def get_current_names(current_ids):
        return [x["index"] for x in current_ids if x]

    def slider_is_added(current_names, dropdown_names):
        "Returns True if a new slider is added, False otherwise"
        return set(current_names).issubset(dropdown_names)

    def get_name_of_added_slider(current_names, dropdown_names):
        return list(set(dropdown_names).difference(current_names))[0]

    def get_name_of_removed_slider(current_names, dropdown_names):
        return list(set(current_names).difference(dropdown_names))[0]

    def remove_name_from_children(column, children, id_type):
        " Given a column, remove the corresponding child element from children"
        index = find_index_in_children(children,
                                       id_type=id_type,
                                       search_index=column)
        children.pop(index)

    def get_slider_name(column, low_high):
        if isinstance(low_high, dict):
            low = low_high["min"]
            high = low_high["max"]
        elif isinstance(low_high, (tuple, list)):
            low = low_high[0]
            high = low_high[1]

        if column in dashboard.utils.log_transform_columns:
            low = dashboard.utils.log_transform_slider(low)
            high = dashboard.utils.log_transform_slider(high)

        low = utils.human_format(low)
        high = utils.human_format(high)

        return f"{column}: [{low}, {high}]"

    def make_new_slider(column, id_type, N_steps=100):

        d_range_slider = dashboard.elements.get_range_slider_keywords(
            fit_results,
            column=column,
            N_steps=N_steps,
        )

        return dbc.Container(
            [
                dbc.Row(html.Br()),
                dbc.Row(
                    html.P(
                        get_slider_name(column, d_range_slider),
                        id={
                            "type": "dynamic_slider_name",
                            "index": column
                        },
                    ),
                    justify="center",
                ),
                dbc.Row(
                    dbc.Col(
                        dcc.RangeSlider(
                            id={
                                "type": "dynamic_slider",
                                "index": column
                            },
                            **d_range_slider,
                        ),
                        width=12,
                    ), ),
            ],
            id={
                "type": id_type,
                "index": column
            },
        )

    @app.callback(
        Output("dynamic_slider-container", "children"),
        Input("dropdown_slider", "value"),
        State("dynamic_slider-container", "children"),
        State({
            "type": "dynamic_slider",
            "index": ALL
        }, "id"),
        prevent_initial_call=True,
    )
    def add_or_remove_slider(
        dropdown_names,
        children,
        current_ids,
    ):

        id_type = "dbc"

        current_names = get_current_names(current_ids)

        # add new slider
        if slider_is_added(current_names, dropdown_names):
            column = get_name_of_added_slider(current_names, dropdown_names)
            new_element = make_new_slider(column, id_type=id_type)
            children.append(new_element)

        # remove selected slider
        else:
            column = get_name_of_removed_slider(current_names, dropdown_names)
            remove_name_from_children(column, children, id_type=id_type)

        return children

    @app.callback(
        Output({
            "type": "dynamic_slider_name",
            "index": MATCH
        }, "children"),
        Input({
            "type": "dynamic_slider",
            "index": MATCH
        }, "value"),
        State({
            "type": "dynamic_slider",
            "index": MATCH
        }, "id"),
        prevent_initial_call=True,
    )
    def update_slider_name(dynamic_slider_values, dynamic_slider_name):
        column = dynamic_slider_name["index"]
        name = get_slider_name(column, dynamic_slider_values)
        return name

    #%%

    @app.callback(
        Output("main_graph", "figure"),
        Input("store", "data"),
        Input("tabs", "active_tab"),
    )
    def update_main_graph(data, active_tab):
        if active_tab is None:
            print("update_main_graph got active_tab == None")
        if data is None:
            print("update_main_graph got data == None")

        figure = get_main_figure(data, active_tab)

        # allows to size of marker to change without loosing zoom level
        if active_tab == "fig_fit_results":
            figure["layout"]["uirevision"] = True

        return figure

    @app.callback(
        Output("main_graph_div", "children"),
        Input("tabs", "active_tab"),
        Input({
            "type": "button_reset",
            "index": ALL
        }, "n_clicks"),
        State("store", "data"),
    )
    def update_tab_layout(active_tab, button_n, data):
        if active_tab is None:
            print("update_tab_layout got active_tab == None")
        if data is None:
            # print("update_tab_layout got data == None")
            raise PreventUpdate

        return make_tab_from_data(data, active_tab)

    #%%

    @app.callback(
        Output("graph_mismatch", "figure"),
        Input("tabs", "active_tab"),
        Input("dropdown_mismatch_tax_id", "value"),
        Input("dropdown_mismatch_shortname", "value"),
    )
    def update_mismatch_plot(active_tab, dropdown_mismatch_tax_id,
                             dropdown_name):

        if dropdown_mismatch_tax_id is None:
            if active_tab == "fig_histograms":
                s = "Does not work for binned data"
                return dashboard.figures.create_empty_figure(s=s)
            else:
                return dashboard.figures.create_empty_figure()

        try:
            group = fit_results.get_single_count_group(
                shortname=dropdown_name,
                tax_id=dropdown_mismatch_tax_id,
            )
            fit = fit_results.get_single_fit_prediction(
                shortname=dropdown_name,
                tax_id=dropdown_mismatch_tax_id,
            )
            chosen_mismatch_columns = ["C→T", "G→A"]
            fig = dashboard.figures.plot_count_fraction(
                group,
                chosen_mismatch_columns,
                fit,
            )
            return fig

        # when selecting histogram without customdata
        except KeyError:
            raise PreventUpdate

    #%%

    @app.callback(
        Output("data_table", "data"),
        Input("main_graph", "clickData"),
        Input("tabs", "active_tab"),
    )
    def update_data_table(click_data, active_tab):
        if click_data is None:
            if active_tab == "fig_histograms":
                s = "Does not work for binned data (histograms)"
                ds = dashboard.datatable.create_empty_dataframe_for_datatable(
                    s)
            else:
                ds = dashboard.datatable.create_empty_dataframe_for_datatable()
            return ds.to_dict("records")

        try:
            tax_id = fit_results.parse_click_data(click_data, column="tax_id")
            shortname = fit_results.parse_click_data(click_data,
                                                     column="shortname")
            df_fit_results_filtered = fit_results.filter({
                "shortname": shortname,
                "tax_id": tax_id
            })
            return df_fit_results_filtered.to_dict("records")

        # when selecting histogram without customdata
        except KeyError:
            raise PreventUpdate

    #%%

    def get_tax_id_options_based_on_shortname(shortname,
                                              df_string="df_fit_results"):
        """df_string is a string, eg. df_fit_results or  df_counts.
        The 'df_' part is optional
        """
        tax_ids = sorted(
            fit_results.load_df_counts_shortname(
                shortname, columns="tax_id")["tax_id"].unique())
        options = [{"label": i, "value": i} for i in tax_ids]
        return options

    @app.callback(
        Output("dropdown_mismatch_tax_id", "options"),
        Input("dropdown_mismatch_shortname", "value"),
    )
    def update_dropdown_tax_id_options(shortname):
        # if shortname is None:
        # print("update_dropdown_tax_id_options got shortname==None")
        return get_tax_id_options_based_on_shortname(shortname,
                                                     df_string="counts")

    @app.callback(
        Output("dropdown_mismatch_shortname", "value"),
        Output("dropdown_mismatch_tax_id", "value"),
        Input("main_graph", "clickData"),
        State("tabs", "active_tab"),
    )
    def update_dropdowns_based_on_click_data(click_data, active_tab):
        if click_data is not None:
            if active_tab == "fig_histograms":
                # print("update_dropdowns_based_on_click_data got here")
                raise PreventUpdate
            try:
                tax_id = fit_results.parse_click_data(click_data,
                                                      column="tax_id")
                shortname = fit_results.parse_click_data(click_data,
                                                         column="shortname")
                return shortname, tax_id
            except KeyError:
                # print("update_dropdowns_based_on_click_data got KeyError")
                raise PreventUpdate
                # return None, None
        else:
            # print("update_dropdowns_based_on_click_data got click_data == None")
            raise PreventUpdate

    #%%

    def include_subspecies(subspecies):
        if len(subspecies) == 1:
            return True
        return False

    @app.callback(
        Output("tax_id_filter_counts_output", "children"),
        # Input("tax_id_filter_button", "n_clicks"),
        Input("tax_id_filter_input_descendants", "value"),
        Input("tax_id_filter_subspecies", "value"),
    )
    def update_tax_id_filter_counts(tax_name, subspecies):

        if tax_name is None or tax_name == "":
            return f"No specific Tax IDs selected, defaults to ALL."
            # raise PreventUpdate

        tax_ids = taxonomy.extract_descendant_tax_ids(
            tax_name,
            include_subspecies=include_subspecies(subspecies),
        )
        N_tax_ids = len(tax_ids)
        if N_tax_ids == 0:
            return f"Couldn't find any Tax IDs for {tax_name} in NCBI"
        return f"Found {utils.human_format(N_tax_ids)} Tax IDs for {tax_name} in NCBI"

    #%%

    @app.callback(
        Output("filters_dropdown_files", "is_open"),
        Output("filters_toggle_files_button", "outline"),
        Input("filters_toggle_files_button", "n_clicks"),
        State("filters_dropdown_files", "is_open"),
    )
    def toggle_collapse_files(n, is_open):
        # after click
        if n:
            return not is_open, is_open
        # initial setup
        return is_open, True

    @app.callback(
        Output("filters_dropdown_tax_ids", "is_open"),
        Output("filters_toggle_tax_ids_button", "outline"),
        Input("filters_toggle_tax_ids_button", "n_clicks"),
        State("filters_dropdown_tax_ids", "is_open"),
    )
    def toggle_collapse_tax_ids(n, is_open):
        if n:
            return not is_open, is_open
        return is_open, True

    @app.callback(
        Output("filters_dropdown_ranges_button", "is_open"),
        Output("filters_toggle_ranges_button", "outline"),
        Input("filters_toggle_ranges_button", "n_clicks"),
        State("filters_dropdown_ranges_button", "is_open"),
    )
    def toggle_collapse_ranges(n, is_open):
        if n:
            return not is_open, is_open
        return is_open, True

    return app
Ejemplo n.º 21
0
def test_callable_handler_dont_have_x(dont):
    t = about_time(lambda: 1)

    with pytest.raises(AttributeError):
        getattr(t, dont)
Ejemplo n.º 22
0
def test_counter_throughput_mode_dont_have_field(field):
    at = about_time(range(2))

    with pytest.raises(AttributeError):
        getattr(at, field)
Ejemplo n.º 23
0
def test_callable_mode_result(call, args, kwargs, expected):
    at = about_time(call, *args, **kwargs)
    assert at.result == expected
Ejemplo n.º 24
0
def test_callable_handler_has_result():
    t = about_time(lambda: 1)
    assert t.result == 1
Ejemplo n.º 25
0
def test_counter_throughput_must_have_fn():
    with pytest.raises(UserWarning):
        about_time(it=[])
Ejemplo n.º 26
0
def test_context_manager_mode_dont_have_field(field):
    with about_time() as at:
        pass

    with pytest.raises(AttributeError):
        getattr(at, field)
Ejemplo n.º 27
0
def test_context_manager_dont_have_x(dont):
    with about_time() as t:
        pass

    with pytest.raises(AttributeError):
        getattr(t, dont)
Ejemplo n.º 28
0
def test_wrong_params_must_complain(value):
    with pytest.raises(UserWarning):
        about_time(value)
Ejemplo n.º 29
0
def test_callable_mode_dont_have_field(field):
    at = about_time(lambda: 1)

    with pytest.raises(AttributeError):
        getattr(at, field)