def get_stack_overflow():
    """The Stack Overflow compontent writes the Questions, Results and a Distribution"""
    schema, results = get_data()

    questions_grid = stack_overflow_questions_grid(schema)
    items = [
        get_stack_overflow_intro(),
        ip.Markdown("""## Stack Overflow Questions 2019"""),
        questions_grid,
        ip.Markdown("## Stack Overflow Results 2019"),
        stack_overflow_results_grid(results, questions_grid),
        respondents_per_country_component(results),
    ]

    return to_output_widget(items)
Exemple #2
0
    def progress_report(self):
        """
        Reports the progress of the inference
        """
        self.get_intermediate_results()

        dead_samples = self.intermediate_results['rejected_points']
        live_samples = self.intermediate_results['live_points']
        likelihood = self.intermediate_results['logLikelihood']
        lnX = self.intermediate_results['lnX']
        if not ((dead_samples is None) or (likelihood is None) or
                (lnX is None)):
            fig = visualization.trace_plot(
                parameter_names=self._active_parameters,
                samples=dead_samples,
                live_samples=live_samples,
                likelihood=likelihood,
                lnX=lnX)
            fig_filepath = os.path.join(self._run_directory,
                                        'progress_report.pdf')
            msg = 'Saving progress report to {}'.format(fig_filepath)
            log.info(msg)
            fig.savefig(fig_filepath)
            if misc.is_notebook():
                ipd.clear_output()
                ipd.display(
                    ipd.Markdown(
                        "\n**Progress report:**"
                        '\nnumber of likelihood evaluations  {}'.format(
                            self._likelihood_evaluations_counter)))
                plt.show()
            else:
                print(msg)
Exemple #3
0
    def show(self, format=None):
        """show the data object content in Jupyter

        :param format: format to use (when there is no/wrong suffix), e.g. 'png'
        """
        if not is_ipython:
            logger.warning(
                "Jupyter/IPython was not detected, .show() will only display inside Jupyter"
            )
            return

        from IPython import display

        suffix = self.suffix.lower()
        if format:
            suffix = "." + format

        if suffix in [".jpg", ".png", ".gif"]:
            display.display(display.Image(self.get(), format=suffix[1:]))
        elif suffix in [".htm", ".html"]:
            display.display(display.HTML(self.get(encoding="utf-8")))
        elif suffix in [".csv", ".pq", ".parquet"]:
            display.display(self.as_df())
        elif suffix in [".yaml", ".txt", ".py"]:
            display.display(display.Pretty(self.get(encoding="utf-8")))
        elif suffix == ".json":
            display.display(display.JSON(orjson.loads(self.get())))
        elif suffix == ".md":
            display.display(display.Markdown(self.get(encoding="utf-8")))
        else:
            logger.error(f"unsupported show() format {suffix} for {self.url}")
Exemple #4
0
def hypothesis_testing():
    import IPython.display as dp
    notes = """### Assumption Tests
 
|Assumption test| Function |
| --- | --- |
| **Normality**| `scipy.stats.normaltest`|
| **Equal Variance** | `scipy.stats.levene`|


### Hypothesis Tests

| Parametric tests (means) | Function | Nonparametric tests (medians) | Function |
| --- | --- | --- | --- |
| **1-sample t test** |`scipy.stats.ttest_1samp()`|  **1-sample Wilcoxon** |`scipy.stats.wilcoxon`|
| **2-sample t test** |`scipy.stats.ttest_ind()` | **Mann-Whitney U test** |`scipy.stats.mannwhitneyu()`|
| **One-Way ANOVA** | `scipy.stats.f_oneway()` | **Kruskal-Wallis** | `scipy.stats.kruskal` | 

 
 ### Post-Hoc Tests/Calculations
 
 | Post-Hoc Tests/Calculatons|Function|
 |--- | --- |
 |**Tukey's Pairwise Comparisons** | `statsmodels.stats.multicomp.pairwise_tukeyhsd`|
 |**Effect Size**| `Cohens_d`|
 |**Statistical Power** | `statsmodels.stats.power`:<br>  `TTestIndPower` , `TTestPower`
 """
    return dp.Markdown(notes)
Exemple #5
0
def explore(obj):
    '''
    Interactive object explorer

    Args:
        obj: Object to introspect

    This widget displays an object and some information about it (eg: its
    signature, if it's callable) and provides a dropdown to select one of its
    attributes.  The selected attribute is displayed in the same way (including
    a dropdown of the attribute's attributes.)
    '''
    def show_markdown(markdown):
        display.display(display.Markdown(markdown.replace('>', '\\>')))

    flag_predicates = {'callable': callable}
    flag_predicates.update({
        k: v
        for k, v in vars(inspect).items() if k.startswith('is') and callable(v)
    })
    show_markdown('### {typ}'.format(typ=type(obj).__name__))
    display.display(obj)
    flag_names = sorted(k[2:] for k, v in flag_predicates.items() if v(obj))
    if flag_names:
        show_markdown('_({})_'.format(', '.join(flag_names)))
    if callable(obj):
        try:
            sig = inspect.signature(obj)
        except:
            show_markdown('_No signature available_')
            sig = None
        if sig is not None:
            param_attrs = ['name', 'kind', 'default', 'annotation']
            display.display(display.Markdown('##### Parameters'))

            def str_or_null(x):
                return '' if x is inspect._empty else str(x)

            display.display(
                display.HTML(
                    str(
                        html_table(
                            [[str_or_null(getattr(p, a)) for a in param_attrs]
                             for p in sig.parameters.values()],
                            header_row=param_attrs))))
    attr_descriptions = OrderedDict([('', None)])
    retval_key = '<return value>'
    if callable(obj):
        attr_descriptions[retval_key] = retval_key
    attr_descriptions.update(
        ('{val} ({typ})'.format(typ=type(getattr(obj, k)).__name__, val=k), k)
        for k in dir(obj))

    @ipywidgets.interact(attribute=attr_descriptions)
    def explore_attr(attribute):
        if attribute is not None:
            explore(obj() if attribute ==
                    retval_key else getattr(obj, attribute))
Exemple #6
0
 def evidence_report(self, sdigits=4):
     if misc.is_notebook():
         ipd.display(ipd.Markdown("**Evidence report:**"))
         out = r"\log\mathcal{{ Z }} = "
         out += q2tex(self.log_evidence, self.log_evidence_err)
         ipd.display(ipd.Math(out))
     else:
         print('Evidence report')
         print('logZ =', self.log_evidence, '±', self.log_evidence_err)
Exemple #7
0
 def _print_plain_override_for_ndarray(arg, p, cycle):
     """caller for pretty, for use in IPython 0.11"""
     import IPython.display as d
     if isinstance(arg, numpy.ndarray):
         if hasattr(arg.dtype, 'fields') and arg.dtype.fields is not None:
             mkd = 'structured array with fields: ' + ', '.join(
                 [j[0] for j in arg.dtype.descr])
         else:
             mkd = "array"
         if any(numpy.array(arg.shape) > 20):
             print("Matrix is too large (" +
                   'x'.join([str(j) for j in arg.shape]) +
                   ") -- using text numpy print")
             print(arg)
         elif len(arg.shape) == 2:
             math_str = render_matrix(arg)
             d.display(
                 d.Markdown("**numpy 2D " + mkd +
                            "** represented as a matrix:"))
             d.display(d.Math(math_str))
         elif len(arg.shape) == 1:
             d.display(
                 d.Markdown("**numpy 1D " + mkd +
                            "** represented as a row vector:"))
             d.display(d.Math(render_matrix(arg.reshape(1, -1))))
         elif len(arg.shape) == 3:
             d.display(
                 d.Markdown("***numpy 3D " + mkd +
                            ",*** represented as a series of matrices:"))
             math_str = r'\begin{bmatrix}' + '\n'
             for j in range(arg.shape[0]):
                 math_str += '\\text{Select slice with outermost dimension set to %d}' % j
                 math_str += r'\\' + '\n'
                 math_str += render_matrix(arg[j, :, :])
                 math_str += r'\\' + '\n'
             math_str += r'\end{bmatrix}' + '\n'
             d.display(d.Math(math_str))
         elif len(arg.shape) > 3:
             d.display(
                 d.Markdown(
                     "***numpy ND array*** $N>3$ dimensions ($" +
                     r'\times'.join(map(str, arg.shape)) +
                     "$), so I'm just giving the text representation:"))
             d.display(str(arg))
Exemple #8
0
def print_md(msg: str, color: str = None) -> None:
    """Print message with Markdown support.

    The message supports markdown syntax like **bold** or _italics_.

    :param msg: message to print.
    :param color: name of desired text color, e.g. 'red'
    """
    color_msg = "<span style='color:{}'>{}</span>".format(color, msg)
    ipd.display(ipd.Markdown(color_msg))
Exemple #9
0
def macro(code: str) -> Tuple[display.DisplayObject]:
    """
    >>> url = "https://test.com"
    >>> assert macro(url) and macro(''' {}
    ...
    ... '''.format(url))[0].data.strip() == url
    """
    lines = code.splitlines()
    if lines and lines[0].strip():
        if len(lines) is 1 and lines[0][:1].strip():
            type = mimetypes.guess_type(code)[0]
            is_image = type and type.startswith('image')
            disp = partial(display.Image,
                           embed=True) if is_image else display.Markdown
            if fnmatch(code, "* [[]*[]](*)*"):
                url = code.rsplit(')', 1)[0].rsplit('(', 1)[1].split(' ', 1)[0]
                if url and url != '#':
                    return (display.Markdown(data=code), *macro(url))
            if fnmatch(code, 'http*://*'):
                return is_image and display.Image(url=code) or display.IFrame(
                    code, width=600, height=400),
        return display.Markdown(data=code),
    return tuple()
def get_resources():
    items = [
        ip.Markdown("""## Introduction

You can use Python and Voila as shown in the code below to create a web app!
"""),
        # Use https://gist.github.com/jiffyclub/5385501 to format code
        ip.Code(
            """
import ipython.display as ip
ip.display(
    ip.Markdown(
    "For more info watch the ***4 minutes introduction*** to Streamlit"
    ),
    ip.YouTubeVideo("VtchVpoSdoQ")
)
""",
            language="python3",
        ),
        ip.Markdown(
            "For more info watch the ***30 minutes introduction*** to Voila"),
        ip.YouTubeVideo("VtchVpoSdoQ"),
    ]
    return to_output_widget(items)
Exemple #11
0
    def posterior_report(self, sdigits=2, **kwargs):
        """
        Displays the best fit values and 1-sigma errors for each active
        parameter. Also produces a corner plot of the samples, which is
        saved to the run directory.

        If running on a jupyter-notebook, a nice LaTeX display is used, and
        the plot is shown.

        Parameters
        ----------
        sdigits : int
            The number of significant digits to be used
        """
        out = ''
        for param, pdict in self.posterior_summary.items():
            if misc.is_notebook():
                # Extracts LaTeX representation from astropy unit object
                out += r"\\ \text{{ {0}: }}\; ".format(param)
                out += q2tex(*map(pdict.get, ['median', 'errup', 'errlo']),
                             sdigits=sdigits)
                out += r"\\"
            else:
                out += r"{0}: ".format(param)
                md, errlo, errup = map(pdict.get, ['median', 'errlo', 'errup'])
                if isinstance(md, apu.Quantity):
                    unit = str(md.unit)
                    md, errlo, errup = map(lambda x: x.value,
                                           [md, errlo, errup])
                else:
                    unit = ""
                v, l, u = misc.adjust_error_intervals(md,
                                                      errlo,
                                                      errup,
                                                      sdigits=sdigits)
                out += r'{0} (-{1})/(+{2}) {3}\n'.format(v, l, u, unit)

        fig = self.corner_plot(**kwargs)
        fig.savefig(os.path.join(self._run_directory, 'corner_plot.pdf'))
        if misc.is_notebook():
            ipd.display(ipd.Markdown("\n**Posterior report:**"))
            plt.show()
            ipd.display(ipd.Math(out))
        else:
            # Restores linebreaks and prints
            print('Posterior report')
            print(out.replace(r'\n', '\n'))
def get_stack_overflow_intro():
    items = [
        ip.Markdown(f"""
## Stack Overflow 2019

You will be analyzing and providing insights from the Stack Overflow 2019 survey

<a href="{stack_overflow.SURVEY_2019_URL}" target="_blank">
<img src="{stack_overflow.IMAGE_2019_URL}"style="width=100%;">
</a>

Results: [{stack_overflow.SURVEY_2019_URL}]({stack_overflow.DATA_URL})

Data: [{stack_overflow.DATA_URL}]({stack_overflow.DATA_URL})
""")
    ]
    return to_output_widget(items)
Exemple #13
0
def run_markdown(cpp_filename):
    """
    Run execution file from the cpp file
    and present the output as markdown
    cpp_filename : ex) test or test.cpp
    """
    basename, ext = os.path.splitext(cpp_filename)
    # https://stackoverflow.com/questions/4760215/running-shell-command-from-python-and-capturing-the-output
    # https://stackoverflow.com/questions/35160256/how-do-i-output-lists-as-a-table-in-jupyter-notebook

    # Run executable while capturing output
    result = subprocess.run(
        [os.path.join(os.curdir, basename)],
        stdout=subprocess.PIPE,
        check=True,
    )
    # present output as a markdown
    disp.display(disp.Markdown(result.stdout.decode()))
Exemple #14
0
def prob_combinations():
    import IPython.display as dp

    combinations=\
r"""### Combinations

> ***How many ways can we create a subset $k$ out of $n$ objects?***

- Combinations are unordered.

- **The # of combinations equals number of variations, over the number of permutations:**
$$ \large C_{p}^{n}  = \frac{V_{p}^{n}}{P_p} = \frac{n!}{p!(n-p)!} $$


- Also represented as:
$$\displaystyle\binom{n}{k} = \dfrac{P_{k}^{n}}{k!}=\dfrac{ \dfrac{n!}{(n-k)!}}{k!} = \dfrac{n!}{(n-k)!k!}$$

"""
    dp.display(dp.Markdown(combinations))
Exemple #15
0
    def progress_report(self):
        """
        Reports the progress of the inference
        """
        # Try to call get_intermediate_results
        try:
            self.get_intermediate_results()
        # If this method is not implemented, show error instead and continue
        except NotImplementedError:
            print("Progress reports can only be made if the "
                  "'get_intermediate_results()'-method is overridden! "
                  "Skipping report.")
        # If this method is implemented, create progress report
        else:
            if mpirank!=0:
                return

            dead_samples = self.intermediate_results['rejected_points']
            live_samples = self.intermediate_results['live_points']
            likelihood = self.intermediate_results['logLikelihood']
            lnX = self.intermediate_results['lnX']

            if dead_samples is not None:
                fig = visualization.trace_plot(
                    parameter_names=self._active_parameters,
                    samples=dead_samples,
                    live_samples=live_samples,
                    likelihood=likelihood, lnX=lnX)
                fig_filepath = os.path.join(self._run_directory,
                                            'progress_report.pdf')
                msg = 'Saving progress report to {}'.format(fig_filepath)
                log.info(msg)
                fig.savefig(fig_filepath)
                if misc.is_notebook():
                    ipd.clear_output()
                    ipd.display(ipd.Markdown(
                        "\n**Progress report:**\nnumber of likelihood "
                        "evaluations  {}".format(
                            self._likelihood_evaluations_counter)))
                    plt.show()
                else:
                    print(msg)
Exemple #16
0
def prob_permutations():
    from IPython.display import display,Markdown,HTML

    permutations=\
r"""### Permutations
- **Permutations have to do with arranging objects.**
- Order is important

### Permutations of $n$:
> ***How many ways to arrange $n$ objects?***

$$P(n) = n!$$
- where $n$ = total number of elements

### Permutations without replacement (the $k$-permutation of $n$)
> ***How many ways to select $k$ elements out of a pool of $n$ objects?***

$$ \large P_{k}^{n}= \dfrac{n!}{(n-k)!}$$ 
<center> this is known as a $k$-permutation of $n$. </center>

### Permutations with Replacement:

$$ \large P_{j}^{n} = n^j $$
- where $n$ = total number of elements
- $j$ = number of positions to fill

#### Sometimes called **Variations** with notation:

$$ \large \bar{V}_{j}^{n} = n^j$$


### Permutations with repeated elements
- e.g. selecting random letters from the word TENNESEE

$$\dfrac{n!}{n_1!n_2!\ldots n_j!}$$


"""
    import IPython.display as dp
    display(dp.Markdown(permutations))
def stack_overflow_results_grid(
        results, questions_grid: qgrid.QGridWidget) -> widgets.Widget:
    """This component writes the Stack Overflow Developer Survey Questions

    Arguments:
        results {[type]} -- A DataFrame of the Results
        questions_grid {qgrid.QGridWidget} -- The table of questions
    """
    def get_selected_questions(questions_grid):
        return list(questions_grid.get_selected_df()["Column"])

    results_grid = qgrid.show_grid(
        results[get_selected_questions(questions_grid)].head(10),
        column_options=styles.RESULTS_GRID_COL_OPTIONS,
        column_definitions=styles.RESULTS_GRID_COL_DEFS,
    )
    no_results_grid = ip.Markdown(
        "**Select one or more questions in the table above to show the results!**"
    )

    def update_results_grid():
        selected_questions = list(questions_grid.get_selected_df()["Column"])
        if selected_questions:
            results_to_show = results
            results_to_show = results_to_show[selected_questions]
            results_grid.df = results_to_show
        else:
            results_grid.df = pd.DataFrame()

    def questions_grid_handler(change):
        update_results_grid()

    questions_grid.observe(handler=questions_grid_handler,
                           names="_selected_rows")

    return to_output_widget([no_results_grid, results_grid])
def respondents_per_country_component(results):
    """This component writes a bar chart showing number of Respondants per Country

    Arguments:
        results {[type]} -- A DataFrame of the Results
    """
    distributions = ((results[["Country", "Respondent"
                               ]].groupby("Country").count().reset_index()
                      ).sort_values("Respondent").tail(50))
    fig = px.bar(
        distributions,
        x="Respondent",
        y="Country",
        title="Count",
        orientation="h",
        height=800,
        width=1200,
    )
    return to_output_widget([
        ip.Markdown("""### Respondents per Countrys
You can plot using matplot, seaborn, vega lite, plotly and other. Here we have chosen plotly
            """),
        go.FigureWidget(fig),
    ])
def freespaceImageAnalysis(fids,
                           guesses=None,
                           fit=True,
                           bgInput=None,
                           bgPcInput=None,
                           shapes=[None],
                           zeroCorrection=0,
                           zeroCorrectionPC=0,
                           keys=None,
                           fitModule=bump,
                           extraPicDictionaries=None,
                           newAnnotation=False,
                           onlyThisPic=None,
                           pltVSize=5,
                           plotSigmas=False,
                           plotCounts=False,
                           manualColorRange=None,
                           calcTemperature=False,
                           clearOutput=True,
                           dataRange=None,
                           guessTemp=10e-6,
                           trackFitCenter=False,
                           picsPerRep=1,
                           startPic=0,
                           binningParams=None,
                           win=pw.PictureWindow(),
                           transferAnalysisOpts=None,
                           tferBinningParams=None,
                           tferWin=pw.PictureWindow(),
                           extraTferAnalysisArgs={},
                           emGainSetting=300,
                           lastConditionIsBackGround=True,
                           showTferAnalysisPlots=True,
                           show2dFitsAndResiduals=True,
                           plotFitAmps=False,
                           indvColorRanges=False,
                           fitF2D=gaussian_2d.f_notheta,
                           rmHighCounts=True,
                           useBase=True,
                           weightBackgroundByLoading=True,
                           returnPics=False,
                           forceNoAnnotation=False):
    """
    returnPics is false by default in order to conserve memory. The total picture arrays of lots of experiments can take up a lot of RAM and are usually unnecessary,
    """
    fids = [fids] if type(fids) == int else fids
    keys = [None for _ in fids] if keys is None else keys
    sortedStackedPics = {}
    initThresholds = [None]
    picsForBg = []
    bgWeights = []
    isAnnotatedList = []
    for filenum, fid in enumerate(fids):
        if transferAnalysisOpts is not None:
            res = ta.stage1TransferAnalysis(fid,
                                            transferAnalysisOpts,
                                            useBase=useBase,
                                            **extraTferAnalysisArgs)
            (initAtoms, tferAtoms, initAtomsPs, tferAtomsPs, key, keyName,
             initPicCounts, tferPicCounts, repetitions, initThresholds,
             avgPics, tferThresholds, initAtomImages, tferAtomImages,
             basicInfoStr, ensembleHits, groupedPostSelectedPics,
             isAnnotated) = res
            isAnnotatedList.append(isAnnotated)
            # assumes that you only want to look at the first condition.
            for varPics in groupedPostSelectedPics:  # don't remember why 0 works if false...
                picsForBg.append(
                    varPics[-1 if lastConditionIsBackGround else 0])
                bgWeights.append(len(varPics[0]))
            allFSIPics = [
                varpics[0][startPic::picsPerRep]
                for varpics in groupedPostSelectedPics
            ]
            if showTferAnalysisPlots:
                fig, axs = plt.subplots(1, 2)
                mp.makeAvgPlts(axs[0], axs[1], avgPics, transferAnalysisOpts,
                               ['r', 'g', 'b'])
            allFSIPics = [win.window(np.array(pics)) for pics in allFSIPics]
            allFSIPics = ah.softwareBinning(binningParams, allFSIPics)
        elif type(fid) == int:
            ### For looking at either PGC imgs or FSI imgs
            with exp.ExpFile(fid) as file:
                # I think this only makes sense if there is a specific bg pic in the rotation
                picsForBg.append(list(file.get_pics()))
                allFSIPics = file.get_pics()[startPic::picsPerRep]
                _, key = file.get_key()
                if len(np.array(key).shape) == 2:
                    key = key[:, 0]
                file.get_basic_info()
            allFSIPics = win.window(allFSIPics)
            allFSIPics = ah.softwareBinning(binningParams, allFSIPics)
            allFSIPics = np.reshape(
                allFSIPics, (len(key), int(allFSIPics.shape[0] / len(key)),
                             allFSIPics.shape[1], allFSIPics.shape[2]))
        else:
            ### Assumes given pics have the same start pic and increment (picsPerRep).
            # doesn't combine well w/ transfer analysis
            picsForBg.append(fid)
            allFSIPics = fid[startPic::picsPerRep]
            print(
                "Assuming input is list of all pics, then splices to get FSI pics. Old code assumed the given were FSI pics."
            )
            allFSIPics = win.window(allFSIPics)
            allFSIPics = ah.softwareBinning(binningParams, allFSIPics)
            allFSIPics = np.reshape(
                allFSIPics, (len(key), int(allFSIPics.shape[0] / len(key)),
                             allFSIPics.shape[1], allFSIPics.shape[2]))
        # ##############
        if keys[filenum] is not None:
            key = keys[filenum]
        for i, keyV in enumerate(key):
            keyV = misc.round_sig_str(keyV)
            sortedStackedPics[keyV] = np.append(
                sortedStackedPics[keyV], allFSIPics[i],
                axis=0) if (keyV in sortedStackedPics) else allFSIPics[i]
    if lastConditionIsBackGround:
        bgInput, pcBgInput = getBgImgs(
            picsForBg,
            startPic=startPic,
            picsPerRep=picsPerRep,
            rmHighCounts=rmHighCounts,
            bgWeights=bgWeights,
            weightBackgrounds=weightBackgroundByLoading)
    elif bgInput == 'lastPic':
        bgInput, pcBgInput = getBgImgs(
            picsForBg,
            startPic=picsPerRep - 1,
            picsPerRep=picsPerRep,
            rmHighCounts=rmHighCounts,
            bgWeights=bgWeights,
            weightBackgrounds=weightBackgroundByLoading)
    if bgInput is not None:  # was broken and not working if not given bg
        bgInput = win.window(bgInput)
        bgInput = ah.softwareBinning(binningParams, bgInput)
    if bgPcInput is not None:
        bgPcInput = win.window(bgPcInput)
        bgPcInput = ah.softwareBinning(binningParams, bgPcInput)

    if extraPicDictionaries is not None:
        if type(extraPicDictionaries) == dict:
            extraPicDictionaries = [extraPicDictionaries]
        for dictionary in extraPicDictionaries:
            for keyV, pics in dictionary.items():
                sortedStackedPics[keyV] = (np.append(
                    sortedStackedPics[keyV], pics, axis=0) if keyV
                                           in sortedStackedPics else pics)
    sortedStackedKeyFl = [float(keyStr) for keyStr in sortedStackedPics.keys()]
    sortedKey, sortedStackedPics = ah.applyDataRange(
        dataRange, sortedStackedPics, list(sorted(sortedStackedKeyFl)))
    numVars = len(sortedStackedPics.items())
    if len(np.array(shapes).shape) == 1:
        shapes = [shapes for _ in range(numVars)]
    if guesses is None:
        guesses = [[None for _ in range(4)] for _ in range(numVars)]
    if len(np.array(bgInput).shape) == 2 or bgInput == None:
        bgInput = [bgInput for _ in range(numVars)]
    if len(np.array(bgPcInput).shape) == 2 or bgPcInput == None:
        bgPcInput = [bgPcInput for _ in range(numVars)]

    datalen, avgFitSigmas, images, hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D = [
        {} for _ in range(9)
    ]
    titles = ['Bare', 'Photon-Count', 'Bare-mbg', 'Photon-Count-mbg']
    assert (len(sortedKey) > 0)
    for vari, keyV in enumerate(sortedKey):
        keyV = misc.round_sig_str(keyV)
        if vari == 0:
            initKeyv = keyV
        varPics = sortedStackedPics[keyV]
        # 0 is init atom pics for post-selection on atom number... if we wanted to.
        expansionPics = rmHighCountPics(varPics,
                                        7000) if rmHighCounts else varPics
        datalen[keyV] = len(expansionPics)
        expPhotonCountImage = photonCounting(expansionPics,
                                             120)[0] / len(expansionPics)
        bgPhotonCountImage = np.zeros(
            expansionPics[0].shape
        ) if bgPcInput[vari] is None else bgPcInput[vari]
        expAvg = np.mean(expansionPics, 0)
        bgAvg = np.zeros(expansionPics[0].shape) if (
            bgInput[vari] is None
            or len(bgInput[vari]) == 1) else bgInput[vari]

        if bgPhotonCountImage is None:
            print('no bg photon', expAvg.shape)
            bgPhotonCount = np.zeros(photonCountImage.shape)
        avg_mbg = expAvg - bgAvg
        avg_mbgpc = expPhotonCountImage - bgPhotonCountImage
        images[keyV] = [expAvg, expPhotonCountImage, avg_mbg, avg_mbgpc]
        hFitParams[keyV], hFitErrs[keyV], vFitParams[keyV], vFitErrs[
            keyV], fitParams2D[keyV], fitErrs2D[keyV] = [[] for _ in range(6)]
        for imnum, (im, guess) in enumerate(zip(images[keyV], guesses[vari])):
            if fit:
                # fancy guess_x and guess_y values use the initial fitted value, typically short time, as a guess.
                _, pictureFitParams2d, pictureFitErrors2d, v_params, v_errs, h_params, h_errs = ah.fitPic(
                    im,
                    guessSigma_x=5,
                    guessSigma_y=5,
                    showFit=False,
                    guess_x=None
                    if vari == 0 else fitParams2D[initKeyv][imnum][1],
                    guess_y=None
                    if vari == 0 else fitParams2D[initKeyv][imnum][2],
                    fitF=fitF2D)
                fitParams2D[keyV].append(pictureFitParams2d)
                fitErrs2D[keyV].append(pictureFitErrors2d)
                hFitParams[keyV].append(h_params)
                hFitErrs[keyV].append(h_errs)
                vFitParams[keyV].append(v_params)
                vFitErrs[keyV].append(v_errs)
    # conversion from the num of pixels on the camera to microns at the focus of the tweezers
    cf = 16e-6 / 64
    mins, maxes = [[], []]
    imgs_ = np.array(list(images.values()))
    for imgInc in range(4):
        if indvColorRanges:
            mins.append(None)
            maxes.append(None)
        elif manualColorRange is None:
            mins.append(min(imgs_[:, imgInc].flatten()))
            maxes.append(max(imgs_[:, imgInc].flatten()))
        else:
            mins.append(manualColorRange[0])
            maxes.append(manualColorRange[1])
    numVariations = len(images)
    if onlyThisPic is None:
        fig, axs = plt.subplots(numVariations,
                                4,
                                figsize=(20, pltVSize * numVariations))
        if numVariations == 1:
            axs = np.array([axs])
        bgFig, bgAxs = plt.subplots(1, 2, figsize=(20, pltVSize))
    else:
        numRows = int(np.ceil((numVariations + 3) / 4))
        fig, axs = plt.subplots(numRows,
                                4 if numVariations > 1 else 3,
                                figsize=(20, pltVSize * numRows))
        avgPicAx = axs.flatten()[-3]
        avgPicFig = fig
        bgAxs = [axs.flatten()[-1], axs.flatten()[-2]]
        bgFig = fig
    if show2dFitsAndResiduals:
        fig2d, axs2d = plt.subplots(
            *((2, numVariations) if numVariations > 1 else (1, 2)))
    keyPlt = np.zeros(len(images))
    (totalSignal, hfitCenter, hFitCenterErrs, hSigmas, hSigmaErrs, h_amp,
     hAmpErrs, vfitCenter, vFitCenterErrs, vSigmas, vSigmaErrs, v_amp,
     vAmpErrs, hSigma2D, hSigma2dErr, vSigma2D,
     vSigma2dErr) = [np.zeros((len(images), 4)) for _ in range(17)]

    for vari, ((keyV, ims), hParamSet, hErr_set, vParamSet, vErr_set,
               paramSet2D, errSet2D) in enumerate(
                   zip(
                       images.items(), *[
                           dic.values() for dic in [
                               hFitParams, hFitErrs, vFitParams, vFitErrs,
                               fitParams2D, fitErrs2D
                           ]
                       ])):
        for which in range(4):
            if onlyThisPic is None:
                (im, ax, title, min_, max_, hparams, hErrs, vparams, vErrs,
                 param2d, err2d) = [
                     obj[which]
                     for obj in (ims, axs[vari], titles, mins, maxes,
                                 hParamSet, hErr_set, vParamSet, vErr_set,
                                 paramSet2D, errSet2D)
                 ]
            else:
                which = onlyThisPic
                ax = axs.flatten()[vari]
                (im, title, min_, max_, hparams, hErrs, vparams, vErrs,
                 param2d, err2d) = [
                     obj[which]
                     for obj in (ims, titles, mins, maxes, hParamSet, hErr_set,
                                 vParamSet, vErr_set, paramSet2D, errSet2D)
                 ]
            h_amp[vari][which], hfitCenter[vari][which], hSigmas[vari][
                which] = hparams[0], hparams[1], hparams[2] * cf * 1e6
            hAmpErrs[vari][which], hFitCenterErrs[vari][which], hSigmaErrs[
                vari][which] = hErrs[0], hErrs[1], hErrs[2] * cf * 1e6
            v_amp[vari][which], vfitCenter[vari][which], vSigmas[vari][
                which] = vparams[0], vparams[1], vparams[2] * cf * 1e6
            vAmpErrs[vari][which], vFitCenterErrs[vari][which], vSigmaErrs[
                vari][which] = vErrs[0], vErrs[1], vErrs[2] * cf * 1e6
            hSigma2D[vari][which], hSigma2dErr[vari][which], vSigma2D[vari][
                which], vSigma2dErr[vari][which] = [
                    val * cf * 1e6 for val in
                    [param2d[-3], err2d[-3], param2d[-2], err2d[-2]]
                ]

            totalSignal[vari][which] = np.sum(im.flatten())
            keyPlt[vari] = keyV
            res = mp.fancyImshow(fig,
                                 ax,
                                 im,
                                 imageArgs={
                                     'cmap': dark_viridis_cmap,
                                     'vmin': min_,
                                     'vmax': max_
                                 },
                                 hFitParams=hparams,
                                 vFitParams=vparams,
                                 fitModule=fitModule,
                                 flipVAx=True,
                                 fitParams2D=param2d)
            ax.set_title(
                keyV + ': ' + str(datalen[keyV]) + ';\n' + title + ': ' +
                misc.errString(hSigmas[vari][which], hSigmaErrs[vari][which]) +
                r'$\mu m$ sigma, ' +
                misc.round_sig_str(totalSignal[vari][which], 5),
                fontsize=12)
            if show2dFitsAndResiduals:
                X, Y = np.meshgrid(np.arange(len(im[0])), np.arange(len(im)))
                data_fitted = fitF2D((X, Y), *param2d)
                fitProper = data_fitted.reshape(im.shape[0], im.shape[1])
                ax1 = axs2d[0] if numVariations == 1 else axs2d[0, vari]
                ax2 = axs2d[1] if numVariations == 1 else axs2d[1, vari]
                imr = ax1.imshow(fitProper, vmin=min_, vmax=max_)
                mp.addAxColorbar(fig2d, ax1, imr)
                ax1.contour(np.arange(len(im[0])),
                            np.arange(len(im)),
                            fitProper,
                            4,
                            colors='w',
                            alpha=0.2)
                imr = ax2.imshow(fitProper - im)
                mp.addAxColorbar(fig2d, ax2, imr)
                ax2.contour(np.arange(len(im[0])),
                            np.arange(len(im)),
                            fitProper,
                            4,
                            colors='w',
                            alpha=0.2)
            if onlyThisPic is not None:
                break

    mp.fancyImshow(avgPicFig,
                   avgPicAx,
                   np.mean([img[onlyThisPic] for img in images.values()],
                           axis=0),
                   imageArgs={'cmap': dark_viridis_cmap},
                   flipVAx=True)
    avgPicAx.set_title('Average Over Variations')
    ### Plotting background and photon counted background
    mp.fancyImshow(bgFig,
                   bgAxs[0],
                   bgAvg,
                   imageArgs={'cmap': dark_viridis_cmap},
                   flipVAx=True)
    bgAxs[0].set_title('Background image (' +
                       str(len(picsForBg) / picsPerRep) + ')')
    mp.fancyImshow(bgFig,
                   bgAxs[1],
                   bgPhotonCountImage,
                   imageArgs={'cmap': dark_viridis_cmap},
                   flipVAx=True)
    bgAxs[1].set_title('Photon counted background image (' +
                       str(len(picsForBg) / picsPerRep) + ')')
    fig.subplots_adjust(left=0,
                        right=1,
                        bottom=0.1,
                        hspace=0.2,
                        **({
                            'top': 0.7,
                            'wspace': 0.4
                        } if (onlyThisPic is None) else {
                            'top': 0.9,
                            'wspace': 0.3
                        }))

    disp.display(fig)
    temps, tempErrs, tempFitVs, = [], [], []
    if calcTemperature:
        for sigmas, sigmaerrs in zip(
            [hSigmas, vSigmas, hSigma2D, vSigma2D],
            [hSigmaErrs, vSigmaErrs, hSigma2dErr, vSigma2dErr]):
            mbgSigmas = np.array([elt[2] for elt in sigmas])
            mbgSigmaErrs = np.array([elt[2] for elt in sigmaerrs])
            myGuess = [0.0, min((mbgSigmas) * 1e-6), guessTemp]
            temp, fitV, cov = ah.calcBallisticTemperature(
                keyPlt * 1e-3, (mbgSigmas) * 1e-6,
                guess=myGuess,
                sizeErrors=mbgSigmaErrs)
            error = np.sqrt(np.diag(cov))
            temps.append(temp)
            tempErrs.append(error[2])
            tempFitVs.append(fitV)
    numAxisCol = int(plotSigmas) + int(plotCounts) + int(trackFitCenter)
    if numAxisCol != 0:
        fig2, axs = plt.subplots(1, numAxisCol, figsize=(15, 5))
        fig2.subplots_adjust(top=0.75, wspace=0.4)
    colors = ['b', 'k', 'c', 'purple']
    if plotSigmas:
        ax = (axs if numAxisCol == 1 else axs[0])
        stdStyle = dict(marker='o', linestyle='', capsize=3)
        if onlyThisPic is not None:
            ax.errorbar(keyPlt,
                        hSigmas[:, onlyThisPic],
                        hSigmaErrs[:, onlyThisPic],
                        color=colors[0],
                        label='h ' + titles[onlyThisPic],
                        **stdStyle)
            ax.errorbar(keyPlt,
                        hSigma2D[:, onlyThisPic],
                        hSigma2dErr[:, onlyThisPic],
                        color=colors[1],
                        label='2dh ' + titles[onlyThisPic],
                        **stdStyle)
            ax.errorbar(keyPlt,
                        vSigmas[:, onlyThisPic],
                        vSigmaErrs[:, onlyThisPic],
                        color=colors[2],
                        label='v ' + titles[onlyThisPic],
                        **stdStyle)
            ax.errorbar(keyPlt,
                        vSigma2D[:, onlyThisPic],
                        vSigma2dErr[:, onlyThisPic],
                        color=colors[3],
                        label='2dv ' + titles[onlyThisPic],
                        **stdStyle)
        else:
            for whichPic in range(4):
                ax.errorbar(keyPlt,
                            hSigmas[:, whichPic],
                            hSigmaErrs[:, whichPic],
                            color='b',
                            label='h ' + titles[whichPic],
                            **stdStyle)
                ax.errorbar(keyPlt,
                            vSigmas[:, whichPic],
                            vSigmaErrs[:, whichPic],
                            color='c',
                            label='v ' + titles[whichPic],
                            **stdStyle)
        ax.set_ylim(max(0, ax.get_ylim()[0]), min([ax.get_ylim()[1], 5]))
        ax.set_ylabel(r'Fit Sigma ($\mu m$)')

        if calcTemperature:
            # converting time to s, hSigmas in um
            xPoints = np.linspace(min(keyPlt), max(keyPlt)) * 1e-3
            for num, fitV in enumerate(tempFitVs):
                #ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *myGuess)*1e6, label = 'guess')
                ax.plot(xPoints * 1e3,
                        LargeBeamMotExpansion.f(xPoints, *fitV) * 1e6,
                        color=colors[num])
        ax.legend()

    if plotFitAmps:
        ax = (axs if numAxisCol == 1 else axs[0])
        ampAx = ax.twinx()

        if onlyThisPic is not None:
            ampAx.errorbar(keyPlt,
                           h_amp[:, onlyThisPic],
                           hAmpErrs[:, onlyThisPic],
                           label='h ' + titles[onlyThisPic],
                           color='orange',
                           **stdStyle)
            ampAx.errorbar(keyPlt,
                           v_amp[:, onlyThisPic],
                           vAmpErrs[:, onlyThisPic],
                           label='v ' + titles[onlyThisPic],
                           color='r',
                           **stdStyle)
        else:
            for whichPic in range(4):
                ampAx.errorbar(keyPlt,
                               h_amp[:, whichPic],
                               hAmpErrs[:, whichPic],
                               label='h ' + titles[whichPic],
                               color='orange',
                               **stdStyle)
                ampAx.errorbar(keyPlt,
                               v_amp[:, whichPic],
                               vAmpErrs[:, whichPic],
                               label='v ' + titles[whichPic],
                               color='r',
                               **stdStyle)
        [tick.set_color('red') for tick in ampAx.yaxis.get_ticklines()]
        [tick.set_color('red') for tick in ampAx.yaxis.get_ticklabels()]
        ampAx.set_ylabel(r'Fit h_amps', color='r')

    hTotalPhotons, vTotalPhotons = None, None
    if plotCounts:
        # numAxCol = 1: ax = axs
        # numAxCol = 2: plotSigmas + plotCounts -- ax = axs[1]
        # numAxCol = 2: plotCounts + trackFitCenter -- ax = axs[0]
        # numAxCol = 3: ax = axs[1]
        if numAxisCol == 1:
            ax = axs
        elif numAxisCol == 2:
            ax = axs[1 if plotSigmas else 0]
        else:
            ax = axs[1]
        # Create axis to plot photon counts
        ax.set_ylabel(r'Integrated signal')
        photon_axis = ax.twinx()
        # This is not currently doing any correct for e.g. the loading rate.
        countToCameraPhotonEM = 0.018577 / (emGainSetting / 200
                                            )  # the float is is EM200.
        countToScatteredPhotonEM = 0.018577 / 0.07 / (emGainSetting / 200)

        if onlyThisPic is not None:
            # calculate number of photons
            hamp = h_amp[:, onlyThisPic] * len(
                expansionPics[0][0]
            )  # Horizontal "un"normalization for number of columns begin averaged.
            vamp = v_amp[:, onlyThisPic] * len(expansionPics[0])
            hsigpx = hSigmas[:, onlyThisPic] / (
                16 / 64)  # Convert from um back to to pixels.
            vsigpx = vSigmas[:, onlyThisPic] / (16 / 64)
            htotalCountsPerPic = bump.area_under(hamp, hsigpx)
            vtotalCountsPerPic = bump.area_under(vamp, vsigpx)
            hTotalPhotons = countToScatteredPhotonEM * htotalCountsPerPic
            vTotalPhotons = countToScatteredPhotonEM * vtotalCountsPerPic
            ax.plot(keyPlt,
                    totalSignal[:, onlyThisPic],
                    marker='o',
                    linestyle='',
                    label=titles[onlyThisPic])
            photon_axis.plot(keyPlt,
                             hTotalPhotons,
                             marker='o',
                             linestyle='',
                             color='r',
                             label='Horizontal')
            photon_axis.plot(keyPlt,
                             vTotalPhotons,
                             marker='o',
                             linestyle='',
                             color='orange',
                             label='Vertical')
        else:
            for whichPic in range(4):
                # See above comments
                amp = h_amp[:, whichPic] * len(expansionPics[0][0])
                sig = hSigmas[:, whichPic] / (16 / 64)
                totalCountsPerPic = bump.area_under(amp, sig)
                hTotalPhotons = countToScatteredPhotonEM * totalCountsPerPic
                ax.plot(keyPlt,
                        totalSignal[:, whichPic],
                        marker='o',
                        linestyle='',
                        label=titles[whichPic])
                photon_axis.plot(keyPlt,
                                 hTotalPhotons,
                                 marker='o',
                                 linestyle='',
                                 color=['red', 'orange', 'yellow',
                                        'pink'][whichPic])
        ax.legend()
        photon_axis.legend()
        [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklines()]
        [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklabels()]
        photon_axis.set_ylabel(r'Fit-Based Avg Scattered Photon/Img',
                               color='r')
    if trackFitCenter:
        #numaxcol = 1: ax = axs
        #numaxcol = 2: trackfitcenter + plothSigmas: ax = axs[1]
        #numaxcol = 2: trackfitcenter + plotCounts: ax = axs[1]
        #numaxcol = 3: ax = axs[2]
        ax = (axs if numAxisCol == 1 else axs[-1])
        if onlyThisPic is not None:
            #ax.errorbar(keyPlt, hfitCenter[:,onlyThisPic], hFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);
            ax.errorbar(keyPlt,
                        vfitCenter[:, onlyThisPic],
                        vFitCenterErrs[:, onlyThisPic],
                        marker='o',
                        linestyle='',
                        capsize=3,
                        label=titles[onlyThisPic])
            #def accel(t, x0, a):
            #    return x0 + 0.5*a*t**2
            #accelFit, AccelCov = opt.curve_fit(accel, keyPlt*1e-3, hfitCenter[:,onlyThisPic], sigma = hFitCenterErrs[:,onlyThisPic])
            #fitx = np.linspace(keyPlt[0], keyPlt[-1])*1e-3
            #fity = accel(fitx, *accelFit)
            #ax.plot(fitx*1e3, fity)
        else:
            for whichPic in range(4):
                ax.errorbar(keyPlt,
                            hfitCenter[:, whichPic],
                            hFitCenterErrs[:, whichPic],
                            marker='o',
                            linestyle='',
                            capsize=3,
                            label=titles[whichPic])
        #accelErr = np.sqrt(np.diag(AccelCov))
        fig2.legend()
        ax.set_ylabel(r'Fit Centers (pix)')
        ax.set_xlabel('time (ms)')

    if numAxisCol != 0:
        disp.display(fig2)

    if not forceNoAnnotation:
        for fid, isAnnotated in zip(fids, isAnnotatedList):
            if not isAnnotated:
                if type(fid) == int or type(fid) == type(''):
                    if newAnnotation or not exp.checkAnnotation(
                            fid, force=False, quiet=True, useBase=useBase):
                        exp.annotate(fid, useBase=useBase)
    if clearOutput:
        disp.clear_output()
    if calcTemperature:
        for temp, err, label in zip(temps, tempErrs,
                                    ['Hor', 'Vert', 'Hor2D', 'Vert2D']):
            print(label + ' temperature = ' +
                  misc.errString(temp * 1e6, err * 1e6) + 'uk')

    for fid in fids:
        if type(fid) == int:
            expTitle, _, lev = exp.getAnnotation(fid)
            expTitle = ''.join(
                '#'
                for _ in range(lev)) + ' File ' + str(fid) + ': ' + expTitle
            disp.display(disp.Markdown(expTitle))
            with exp.ExpFile(fid) as file:
                file.get_basic_info()
    if trackFitCenter:
        pass
        #print('Acceleration in Mpix/s^2 = ' + misc.errString(accelFit[1], accelErr[1]))
    if transferAnalysisOpts is not None and showTferAnalysisPlots:
        colors, colors2 = misc.getColors(
            len(transferAnalysisOpts.initLocs()) + 2)  #, cmStr=dataColor)
        pltShape = (transferAnalysisOpts.initLocsIn[-1],
                    transferAnalysisOpts.initLocsIn[-2])
        # mp.plotThresholdHists([initThresholds[0][0],initThresholds[1][0]], colors, shape=pltShape)
        mp.plotThresholdHists([initThresholds[0][0], initThresholds[0][0]],
                              colors,
                              shape=[1, 2])
    returnDictionary = {
        'images': images,
        'fits': hFitParams,
        'errs': hFitErrs,
        'hSigmas': hSigmas,
        'sigmaErrors': hSigmaErrs,
        'dataKey': keyPlt,
        'hTotalPhotons': hTotalPhotons,
        'tempCalc': temps,
        'tempCalcErr': tempErrs,
        'initThresholds': initThresholds[0],
        '2DFit': fitParams2D,
        '2DErr': fitErrs2D,
        'bgPics': picsForBg,
        'dataLength': datalen
    }
    if returnPics:
        returnDictionary['pics'] = sortedStackedPics
    return returnDictionary
Exemple #20
0
 def simple(self, obj):
     return display.display(display.Markdown(obj))
Exemple #21
0
            'insertedAt', 'updatedAt', 'articleTitle']
corpus = corpus[to_keep]


#### superintendent
!pip install superintendent
!jupyter nbextension enable --py --sys-prefix ipyevents

####
from superintendent import ClassLabeller
import pandas as pd
from IPython import display

labelling_widget = ClassLabeller(
    features=headlines,
    display_func=lambda x: display.display(display.Markdown("# " + x)),
    options=['professional', 'not professional'],
)

labelling_widget

################3





obj = corpus.to_json(orient='records')
jdata = json.loads(obj)

#for d in jdata:
def md_print(markdown_text):
    """ Print Markdown text so that it renders correctly in the cell output. """
    IPDisplay.display(IPDisplay.Markdown(markdown_text))
Exemple #23
0
def bold(string):
    display(iplay.Markdown(string))
def mdown(x):
    import IPython.display as d
    d.display(d.Markdown(x))
Exemple #25
0
def display_md(md: str):
    ip_disp.display(ip_disp.Markdown(md))
Exemple #26
0
 def show_markdown(markdown):
     display.display(display.Markdown(markdown.replace('>', '\\>')))
Exemple #27
0
def print_md(string):
    """Render a string as Markdown to the notebook's output cell."""
    ipd.display(ipd.Markdown(string))
Exemple #28
0
def display_md(md_str: str):
    escaped_str = re.sub('(?<!\\\\)\$', '\$', md_str)
    display(ipd.Markdown(escaped_str))
Exemple #29
0
#data = np.random.rand(10,4)
# Columns A, B, C, D
#columns = [chr(x) for x in range(65,69)]
# Create the dataframe
data = [rick, scrook, scrook, smbaer, emarder, markram]

df = pd.DataFrame(data=data, columns=columns)
# Optionally give the dataframe's index a name
#df.index.name = "my_index"
# Create the markdown string
md = tabulate(df, headers='keys', tablefmt='pipe')
# Fix the markdown string; it will not render with an empty first table cell,
# so if the dataframe's index has no name, just place an 'x' there.
md = md.replace('|    |', '| %s |' % (df.index.name if df.index.name else 'x'))
# Create the Markdown object
result = d.Markdown(md)
# Display the markdown object (in a Jupyter code cell)
result
#print('the scores are:',np.min(rick,sharon))
'''
bench = metrics(bench)
pm = metrics(pm)
hss = metrics(hss)
winners = [('rgerkin',rick),('scrook',scrook),('upgoer5_corpus',bench),('the readability of science decr over time', hss), ('peter',pm)]
with open('results.p','wb') as f:
    pickle.dump(winners,f)

winners = sorted([(w[1],w[0]) for w in winners])
print(winners)

try:
def display_markdown(text: str) -> None:
    display.display(display.Markdown(text))