Example #1
0
    def tess_the_movie(self):
        """
        Opens the link to Ethan Kruse's TESS: The Movie YouTube videos for
        the sector your target is observed in.

        Parameters
        ---------- 

        Attributes
        ----------
        movie_url : str

        """
        def type_of_script():
            try:
                ipy_str = str(type(get_ipython()))
                if 'zmqshell' in ipy_str:
                    return 'jupyter'
                if 'terminal' in ipy_str:
                    return 'ipython'
            except:
                return 'terminal'

        sector = self.obj.source_info.sector
        self.movie_url = self.youtube[sector]

        call_location = type_of_script()

        if (call_location == 'terminal') or (call_location == 'ipython'):
            os.system('python -m webbrowser -t "{0}"'.format(self.movie_url))

        elif (call_location == 'jupyter'):
            from IPython.display import YouTubeVideo
            id = self.movie_url.split('=')[-1]
            return YouTubeVideo(id=id, width=900, height=500)
Example #2
0
def unagi():
    """Show random video about unagi on Youtube."""
    video_list = [
        "1sqLCUuMMfo", "p4KFCAX6X4o", "N26pjkM_z4A", "dn88LiPOKMc",
        "XUzsBV1xPNI", "TVEU-Pfj5eA", "7hnYiT23AEU", "4Qav8bdjCeg",
        "iaTr4V17Dwg", "pcVcJZUX0Dc", "lckYtOBTt48", "dgYeXXGYWtg",
        "E_TuYhsjJ_Y", "XTWchA5WlGQ", "xwNUtMIvYO4", "tSloyrM_XOg",
        "-0NsqEbE57Q"
    ]

    youtube_url = "https://www.youtube.com/watch?v="
    youtube_suffix = "?rel=0&controls=0&showinfo=0"
    video_urls = [youtube_url + v + youtube_suffix for v in video_list]

    import numpy as np

    try:
        ip = get_ipython()
        if ip.has_trait('kernel'):
            from IPython.display import YouTubeVideo
            return YouTubeVideo(np.random.choice(video_list),
                                width=560,
                                height=315)
        else:
            import webbrowser
            url = np.random.choice(video_urls)
            webbrowser.open(url, new=0, autoraise=True)
    except NameError:
        import webbrowser
        url = np.random.choice(video_urls)
        webbrowser.open(url, new=0, autoraise=True)
Example #3
0
def wannabe():
    try:
        from IPython.display import YouTubeVideo, display
        vid = YouTubeVideo("8X-2czaa3WA", autoplay=1, start=4)
        display(vid)
    except ImportError:
        pass
 def button_clicked(change):
     clear_output()
     display(
         YouTubeVideo('ARJ8cAGm6JE',
                      start=(labels[change.description]),
                      autoplay=True))
     display(button_box)
Example #5
0
def search(query, **kwargs):
    '''Search youtube, and if any hits come up, return an IPython
    embed object.

    Parameters
    ----------
    query : str
        The query text

    kwargs
        additional keyword arguments

    See Also
    --------
    IPython.display.YouTubeVideo
    '''
    youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
                    developerKey=DEVELOPER_KEY)

    search_response = youtube.search().list(q=query, part="id,snippet",
                                            maxResults=1).execute()

    for result in search_response.get("items", []):
        if result["id"]["kind"] == "youtube#video":
            return YouTubeVideo(result['id']['videoId'], **kwargs)

    warnings.warn('No results found.')
    return None
Example #6
0
def success_ratio(data_frame):
    
    success_number = data_frame['state'][data_frame['state']=='successful'].count()
    failed_number = data_frame['state'][data_frame['state']=='failed'].count()
    cancelled_number = data_frame['state'][data_frame['state']=='canceled'].count()
    all_number = success_number + failed_number + cancelled_number
    
    data_frame_size = data_frame.shape[0]
    
    if success_number == 0 and failed_number == 0 and cancelled_number == 0:
        print('Sorry, for this particular setting we haven\'t found any campaign at all!\nYou are sailing some uncharted waters here!')
    elif success_number >= 0 and all_number > 0:
        success_ratio = success_number / all_number
        if data_frame.shape[0] < 10:
            print('The overall success ratio is: ',round(success_ratio,4)*100,'%')
            print('However due to lack of sufficient data this is based on a very small number of past campaigns.\nIf you\'d like to check the odds for a bigger sample, please consider changing some of the parameters (e.g. use \'not considered\' or \'other\'')
            print('FYI, here are the campaigns we found similar:')
            display(data_frame.style.format({'percentage_of_money_collected': "{:.2%}"}))
            # print(data_frame) # tutaj coś zmienić, żeby df się ładniej wyświetlało
        elif data_frame.shape[0] < 50:
            print('The overall success ratio is: ',round(success_ratio,4)*100,'%')
            print(f'However due to lack of sufficient data this is based on a limited number of past campaigns\n({data_frame_size} to be exact).\nIf you\'d like to check the odds for a bigger sample, please consider changing some of the parameters to either \'not considered\' or \'any\'')
        else:
            print('The success ratio for past campaigns similar to yours is:\n',round(success_ratio,4)*100,'%')
            print('This is based on a sample of ',data_frame_size,' previous campaigns')
         
    else:
        print('something went really wrong, contact the admin!')


# chiptune easter egg

    if pd.unique(data_frame['category']).any():
        if pd.unique(data_frame['category'])[0]=='Chiptune':
            display(YouTubeVideo('rf_p3-8fTo0', autoplay=1, loop=1))
Example #7
0
def fix_everything():
    """
    Scans over all the data and indicates which errors have been fixed. This
    function is great for stress relieve.

    :return: All your problems fixed by Rick
    """
    return YouTubeVideo('dQw4w9WgXcQ', autoplay=True)
Example #8
0
    def display(self, line, cell=''):
        """
        Display something using IPython's rich display system.

        Parameters
        ----------
        -h, --html    : load HTML file
        -i, --image   : load JPG or PNG image
        -j, --json    : load JSON file
        -l, --latex   : load LaTeX file
        -m, --math    : load LaTeX math expression
        -s, --svg     : load SVG file
        -y, --youtube : load YouTube ID

        Examples
        --------
        %display -i myimage.png

        %display -m '\Delta x + y^3'

        %%display -h \"\"\"
        <ul>
        <li>This</li>
        <li>is</li>
        <li>a list.</li>
        </ul>
        \"\"\"

        Notes
        -----
        %disp is automatically aliased to %display.
        """
        
        opts, cell = self.parse_options('%s\n%s' % (line, cell),
                                        'h:i:j:l:m:s:y:',
                                        ['image=', 'html=', 'json=', 'latex=',
                                         'math=', 'svg=', 'youtube='])
        for opt, arg in opts.iteritems():
            if opt in ['h', 'html']:
                return HTML(arg)
            elif opt in ['i', 'image']:
                return Image(arg)
            elif opt in ['j', 'json']:
                return JSON(arg)
            elif opt in ['l', 'latex']:
                return Latex(arg)
            elif opt in ['m', 'math']:
                return Math(arg)
            elif opt in ['s', u's', 'svg']:
                return SVG(arg)
            elif opt in ['y', 'youtube']:
                return YouTubeVideo(arg)

        # Raise an exception if no options were specified:
        raise ValueError('Format: [option] <file|URI>')
Example #9
0
def display_yotube_video(url, **kwargs):
    """
    Displays a Youtube video in a Jupyter notebook.
    
    Args:
        url (string): a link to a Youtube video.
        **kwargs: further arguments for IPython.display.YouTubeVideo
    
    Returns:
        YouTubeVideo: a video that is displayed in your notebook.
    """
    id_ = url.split("=")[-1]
    return YouTubeVideo(id_, **kwargs)
When testing your model you can also make adjustments to calculate a balanced accuracy. Scikit-learn has the [`balanced_accuracy_score` method](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.balanced_accuracy_score.html), which implements the technique outlined in [this](https://ieeexplore.ieee.org/document/5597285) paper. It essentially defines accuracy as the average recall obtained on each class.

Let's test an example using the `'class_weight'='balanced'` approach.

svm_stats = data.predict(algorithm='svm', cv_dict={'type': 'kfolds','n_folds': 5, 'subject_id':subject_id}, **{'class_weight':'balanced', 'kernel':"linear"})


## MVPA Patterns as Biomarkers
Now that we know how to train multivariate patterns, what can we do with them? There has been a lot of interest in their potential to serve as neural biomarkers of psychological states.  If you would like to learn about how these can be used to better understand how we process and experience pain, watch these two videos by Tor Wager, where he summarizes some of the groundbreaking work he has been doing in this space.

YouTubeVideo('LV51_3jHg_c')

YouTubeVideo('3iXh0FzuAjY')

## Additional Resources
If you are feeling like you would like to learn more about some of the details and possibilities of this approach, we encourage you to read some of the many review papers from [Haynes & Rees, 2006](https://www.nature.com/articles/nrn1931), [Naselaris et al., 2011](https://www.sciencedirect.com/science/article/pii/S1053811910010657), [Haxby et al., 2014](https://www.annualreviews.org/doi/full/10.1146/annurev-neuro-062012-170325), [Woo et al, 2017](http://cosanlab.com/static/papers/Woo_2017_NN.pdf).
def embed_video(url, labels={}):

    names = list(labels.keys())
    buttons = []

    def button_clicked(change):
        clear_output()
        display(YouTubeVideo('ARJ8cAGm6JE', start=(
            labels[change.description]), autoplay=True))
        display(button_box)

    for i in range(len(names)):
        buttons.append(widgets.Button(description=names[i]))
        buttons[-1].on_click(button_clicked)

    button_box = widgets.HBox(buttons)

    display(YouTubeVideo('ARJ8cAGm6JE', autoplay=False))
    display(button_box)
Example #12
0
    def display(self, width: int = 400, height: int = 300, **kwargs):
        """
        Visualization in jupyter notebook with custom options

        Parameters
        ----------
        width: int, default 400
            Width in pixels
        height: int, default 300
            Height in pixels
        kwargs: dict
            See :py:class:`IPython.display.YouTubeVideo` for other kwargs

        Returns
        -------
        v: IPython.display.YouTubeVideo
        """
        from IPython.display import YouTubeVideo

        return YouTubeVideo(self.vid, width=width, height=height, **kwargs)
Example #13
0
#
#
#
#
# Ordinary Differential Equations (ODEs) are yet another for of linear dynamical systems and are a scientific model used in a wide range of problems of the basic form:
#
# ### $$\dot{x} = A x$$
#
# These are equations such that the is the instantaneous rate of change in $x$ (i.e. $\dot{x}$ is the derivative of $x$) is dependent on $x$.  Many systems can be modeled with these types of equations.
#
# Here is a quick video that introduces the concepts of Differential Equations. The following is a good review of general ODEs.

# In[ ]:

from IPython.display import YouTubeVideo
YouTubeVideo("8QeCQn7uxnE", width=640, height=360, cc_load_policy=True)

# Now consider an ODE as a system of linear equations:
#
# $$\dot{x_t} = A x_t$$
#
# Based on the current $x$ vector at time $t$ and the matrix $A$, we can calculate the derivative at $\dot{x}$ at time $t$.  Once we know the derivative, we can increment the time to by some small amount $dt$ and calculate a new value of $x$ as follows:
#
# $$x_{t+1} = x_t + \dot{x_t}dt$$
#
# Then we can do the exact sequence of calculations again for $t+2$. The following function has the transition matrix ($A$), the starting state vector ($x_0$) and a number of time steps ($N$) and uses the above equations to calculate each state and return all of the $x$ statues:

#
# The following code generates a trajectory of points starting from `x_0`, applying the matrix $A$ to get $x_1$ and then applying $A$ again to see how the system progresses from the start state.
#
Example #14
0
# # Signal Generation
# *by Luke Chang*
# 
# Measuring in-vivo brain activity from humans is an extraordinary feat. How do scanners work? and what exactly are we measuring? In this course, we will be learning how to analyze functional magnetic resonance imaging (fMRI) data. Before we dive in the analysis methods, it's important to have a basic understanding of what we are measuring. This course primarily focuses on Blood Oxygenated Level Dependent (BOLD) fMRI signals. In this section, we will watch a few short videos by Martin Lindquist and Tor Wager to gain a very high level understanding of (1) magnetic resonance physics, (2) how images are formed from these signals, (3) the relationship between k-space and image space, and (4) BOLD physiology. Gaining a deep understanding of the MR physics and physiological basis for the BOLD fMRI signal is beyond the scope of this course and we refer the interested reader to the excellent [Huettel, Song, & McCarthy (2004) Functional magnetic resonance imaging textbook](https://www.amazon.com/Functional-Magnetic-Resonance-Imaging-Huettel/dp/0878936270/ref=pd_sbs_14_1/144-9493364-1935804?_encoding=UTF8&pd_rd_i=0878936270&pd_rd_r=ac61b1df-17bf-47c5-8db5-25dfa36bcd16&pd_rd_w=J61zv&pd_rd_wg=d1O2i&pf_rd_p=703f3758-d945-4136-8df6-a43d19d750d1&pf_rd_r=PCEXDFT3TQQ4JW7FD8HF&psc=1&refRID=PCEXDFT3TQQ4JW7FD8HF) for a more in depth conceptual and quantitative overview.
# 
# The lecture for this section can be viewed {download}`here <../images/lectures/Measurement_Signal.pdf>`.

# ## Basic MR Physics

# In[1]:


from IPython.display import YouTubeVideo

YouTubeVideo('XsDXxgjEJVY')


# ## Image Formation

# In[3]:


YouTubeVideo('PxqDjhO9FUs')


# ## K-Space

# In[4]:

*Written by Luke Chang*

Now that we have learned how to estimate a single-subject model, create contrasts, and run a group-level analysis, the next important topic to cover is how we can threshold these group maps. This is not as straightforward as it might seem as we need to be able to correct for multiple comparisons.

In this tutorial, we will cover how we go from modeling brain responses in each voxel for a single participant to making inferences about the group. We will cover the following topics:

- Issues with correcting for multiple comparisons
- Family Wise Error Rate
- Bonferroni Correction
- False Discovery Rate

Let's get started by watching an overview of multiple comparisons by Martin Lindquist.

from IPython.display import YouTubeVideo

YouTubeVideo('AalIM9-5-Pk')

The primary goal in fMRI data analysis is to make inferences about how the brain processes information. These inferences can be in the form of predictions, but most often we are testing hypotheses about whether a particular region of the brain is involved in a specific type of process. This requires rejecting a $H_0$ hypothesis (i.e., that there is no effect). Null hypothesis testing is traditionally performed by specifying contrasts between different conditions of an experimental design and assessing if these differences between conditions are reliably present across many participants. There are two main types of errors in null-hypothesis testing.

*Type I error*
- $H_0$ is true, but we mistakenly reject it (i.e., False Positive)
- This is controlled by significance level $\alpha$.

*Type II error*
- $H_0$ is false, but we fail to reject it (False Negative)

The probability that a hypothesis test will correctly reject a false null hypothesis is described as the *power* of the test.

Hypothesis testing in fMRI is complicated by the fact that we are running many tests across each voxel in the brain (hundreds of thousands of tests). Selecting an appropriate threshold requires finding a balance between sensitivity (i.e., true positive rate) and specificity (i.e., false negative rate). There are two main approaches to correcting for multiple tests in fMRI data analysis. 

**Familywise Error Rate** (FWER) attempts to control the probability of finding *any* false positives. Mathematically, FWER can be defined as the probability $P$ of observing any false positive ${FWER} = P({False Positives}\geq 1)$. 
Example #16
0
from IPython.display import YouTubeVideo
# a short video about using NumPy arrays, from Enthought
YouTubeVideo('vWkb7VahaXQ')
# In[18]:

efficiency_at_mpow = efficiency_lam(subs[Rth])
efficiency_at_mp = efficiency_lam(optimal_point)
f"{efficiency_at_mpow*100}% to {efficiency_at_mp*100}%; a {(efficiency_at_mp-efficiency_at_mpow)*100}% gain"

# So now the task at hand is to move beyond this ideal theoretical example to having SPICE find the values we need and then building a testbench class in python that will do the data analysis along with all the SPICE work we just showed automatically for nearly any DC circuit. Where AC circuits and circuits that rely on Transient circuit effects (think switch-mode power supplies) will have to be analyzed separately when we cross those bridges.

# ## Cannot move on due to the issue in pyspice that prevents sweeping anything but Current and Voltage Sources; see TODO above

# ## Example 2 from "Maximum Power Transfer Theorem for DC Circuits (with Examples)" @ ~4:47 min

# In[19]:

YouTubeVideo('RbII8o49Hvs', width=500, height=400, start=287)

# In[20]:

reset()
net_1 = Net('N1')
net_2 = Net('N2')
net_3 = Net('N3')
net_4 = Net('N4')

#voltage source bottom left
vs = V(dc_value=100 @ u_V)
vs['p', 'n'] += net_1, gnd
#restors on the center leg
rleft = R(ref='left', value=4 @ u_Ohm)
rleft[1, 2] += net_1, net_2
Example #18
0
from IPython.display import YouTubeVideo
YouTubeVideo("p86BPM1GV8M")
Example #19
0
# 
# <img src="http://imgs.xkcd.com/comics/future_self.png" alt="comic about comments">

# ## 2.3. Application: A physics experiment
# 
# On the Apollo 15 mission to the Moon, astronaut David Scott famously replicated Galileo's physics experiment in which he showed that gravity accelerates objects of different mass at the same rate. Because there is no air resistance for a falling object on the surface of the Moon, even two objects with very different masses and densities should fall at the same rate. David Scott compared a feather and a hammer.
# 
# You can run the following cell to watch a video of the experiment.

# In[ ]:


from IPython.display import YouTubeVideo
# The original URL is:
#   https://www.youtube.com/watch?v=U7db6ZeLR5s
YouTubeVideo("U7db6ZeLR5s")


# Here's the transcript of the video:
# 
# **167:22:06 Scott**: Well, in my left hand, I have a feather; in my right hand, a hammer. And I guess one of the reasons we got here today was because of a gentleman named Galileo, a long time ago, who made a rather significant discovery about falling objects in gravity fields. And we thought where would be a better place to confirm his findings than on the Moon. And so we thought we'd try it here for you. The feather happens to be, appropriately, a falcon feather for our Falcon. And I'll drop the two of them here and, hopefully, they'll hit the ground at the same time. 
# 
# **167:22:43 Scott**: How about that!
# 
# **167:22:45 Allen**: How about that! (Applause in Houston)
# 
# **167:22:46 Scott**: Which proves that Mr. Galileo was correct in his findings.

# **Newton's Law.** Using this footage, we can also attempt to confirm another famous bit of physics: Newton's law of universal gravitation. Newton's laws predict that any object dropped near the surface of the Moon should fall
# 
# $$\frac{1}{2} G \frac{M}{R^2} t^2 \text{ meters}$$
Example #20
0
![mediation.png](../images/connectivity/mediation.png)

For example, regions can *directly* influence each other, or they can *indirectly* influence each other via a mediating region, or they can be affected similarly by a *shared influence*. These types of figures are often called *graphs*. These types of *graphical* models can be *directed* or *undirected*. Directed graphs imply a causal relationship, where one region A directly influence another region B. Directed graphs or *causal models* are typically described as *effective connectivity*, while undirected graphs in which the relationship is presumed to be bidirectional are what we typically describe as *functional connectivity*.

In this tutorial, we will work through examples on:
 - Seed-based functional connectivity
 - Psychophysiological interactions
 - Principal Components Analysis
 - Graph Theory
 
Let's start by watching a short overview of connectivity by Martin Lindquist.

from IPython.display import YouTubeVideo

YouTubeVideo('J0KX_rW0hmc')

Now, let's dive in a little bit deeper into the specific details of functional connectivity.

YouTubeVideo('OVAQujut_1o')

## Functional Connectivity
### Seed Voxel Correlations

One relatively simple way to calculate functional connectivity is to compute the temporal correlation between two regions of interest (ROIs). Typically, this is done by extracting the temporal response from a *seed voxel* or the average response within a *seed region*. Then this time course is regressed against all other voxels in the brain to produce a whole brain map of anywhere that shares a similar time course to the seed.

Let's try it ourselves with an example subject from the Pinel Localizer dataset. First, let's import the modules we need for this tutorial and set our paths.

%matplotlib inline

import os
Example #21
0
# <headingcell level=1>

# Multidimensional Kalman Filter

# <headingcell level=2>

# for a Constant Acceleration Model (CA)

# <markdowncell>

# Situation covered: You have a Position Sensor (e.g. a Vision System) and try to calculate velocity ($\dot x$ and $\dot y$) as well as position ($x$ and $y$) of a ball in 3D space.

# <codecell>

YouTubeVideo("tIIJME8-au8")

# <headingcell level=2>

# State Vector - Constant Acceleration

# <markdowncell>

# Constant Acceleration Model for Motion in 3D
# 
# $$x= \left[ \matrix{ x \\ y \\ z \\ \dot x \\ \dot y \\ \dot z \\ \ddot x \\ \ddot y \\ \ddot z} \right]$$
# 

# <markdowncell>

# Formal Definition:
Example #22
0
a[1].set_xlabel('Time', fontsize=18)
a[1].set_title('Signal convolved with HRF kernel', fontsize=18)

# If you are interested in a more detailed overview of convolution in the time domain, I encourage you to watch this [video](https://youtu.be/9Hk-RAIzOaw) by Mike X Cohen. For more details about convolution and the HRF function, see this [overview](https://practical-neuroimaging.github.io/on_convolution.html) using python examples.

# ### Oscillations
#
# Ok, now let's move on to studying time-varying signals that have the shape of oscillating waves.
#
# Let's watch a short video by Mike X Cohen to get some more background on sine waves. Don't worry too much about the matlab code as we will work through similar Python examples in this notebook.

# In[12]:

from IPython.display import YouTubeVideo

YouTubeVideo('9RvZXZ46FRQ')

# Oscillations can be described mathematically as:
#
# $A\sin(2 \pi ft + \theta)$
#
# where $f$ is the frequency or the speed of the oscillation described in the number of cycles per second - $Hz$. Amplitude $A$ refers to the height of the waves, which is half the distance of the peak to the trough. Finally, $\theta$ describes the phase angle offset, which is in radians.
#
# Here we will plot a simple sine wave.  Try playing with the different parameters (i.e., amplitude, frequency, & theta) to gain an intuition of how they each impact the shape of the wave.

# In[20]:

from numpy import sin, pi, arange

sampling_freq = 500
time = arange(-1, 1 + 1 / sampling_freq, 1 / sampling_freq)
Example #23
0
Embed      = Image(    'http://scienceview.berkeley.edu/view/images/newview.jpg')

# if kwarg `url` is given, the embedding is assumed to be false
SoftLinked = Image(url='http://scienceview.berkeley.edu/view/images/newview.jpg')

# In each case, embed can be specified explicitly with the `embed` kwarg
# ForceEmbed = Image(url='http://scienceview.berkeley.edu/view/images/newview.jpg', embed=True)

Embed

SoftLinked

from IPython.display import YouTubeVideo
# a talk about IPython at Sage Days at U. Washington, Seattle.
# Video credit: William Stein.
YouTubeVideo('1j_HxD4iLn8')

from IPython.display import HTML
from base64 import b64encode
video = open("animation.m4v", "rb").read()
video_encoded = b64encode(video)
video_tag = '<video controls alt="test" src="data:video/x-m4v;base64,{0}">'.format(video_encoded)
HTML(data=video_tag)

from IPython.display import HTML

s = """<table>
<tr>
<th>Header 1</th>
<th>Header 2</th>
</tr>
Original file is located at
    https://colab.research.google.com/notebooks/welcome.ipynb

<img alt="Colaboratory logo" height="45px" src="https://colab.research.google.com/img/colab_favicon.ico" align="left" hspace="10px" vspace="0px">

<h1>Welcome to Colaboratory!</h1>

Colaboratory is a free Jupyter notebook environment that requires no setup and runs entirely in the cloud.

With Colaboratory you can write and execute code, save and share your analyses, and access powerful computing resources, all for free from your browser.
"""

#@title Introducing Colaboratory
#@markdown This 3-minute video gives an overview of the key features of Colaboratory:
from IPython.display import YouTubeVideo
YouTubeVideo('inN8seMm7UI', width=600, height=400)
"""## Getting Started

The document you are reading is a  [Jupyter notebook](https://jupyter.org/), hosted in Colaboratory. It is not a static page, but an interactive environment that lets you write and execute code in Python and other languages.

For example, here is a **code cell** with a short Python script that computes a value, stores it in a variable, and prints the result:
"""

seconds_in_a_day = 24 * 60 * 60
seconds_in_a_day
"""To execute the code in the above cell, select it with a click and then either press the ▷ button to the left of the code, or use the keyboard shortcut "⌘/Ctrl+Enter".

All cells modify the same global state, so variables that you define by executing a cell can be used in other cells:
"""

seconds_in_a_week = 7 * seconds_in_a_day
Example #25
0
plt.ylabel(r'$p(x|k)$')
plt.title("Student's $t$ Distribution")

plt.legend(bbox_to_anchor=(1.25, 1.0))
plt.show()

# In[ ]:

## Lets do an example, thanks to Prof. Cimbala

# In[2]:

from IPython.display import YouTubeVideo
# a talk about IPython at Sage Days at U. Washington, Seattle.
# Video credit: William Stein.
YouTubeVideo('pVk3w9aaSSo')

# In[11]:

# measured quantities:
x_mean = 8.240  # kOhm
Sx = 0.314  # kOhm
N = 20  # samples

# for the 95 confidence level
confidence_level = 0.95  # 95%
alpha = 1 - confidence_level
degrees_of_freedom = N - 1

# In[12]:
Example #26
0
dframe1.idxmax()

dframe1.max()

dframe1
#Can also do an accumulation sum
dframe1.cumsum()

#A very useful feature is describe, which provides summary statistics
describe=dframe1.describe()

# We can also get information on correlation and covariance

#For more info on correlation and covariance, check out the videos below!
from IPython.display import YouTubeVideo
YouTubeVideo('xGbpuFNR1ME')

#Now lets check correlation and covariance on some stock prices!

#Pandas can get info off the web
import pandas_datareader as pdweb #workaround pandas ver 0.24.2
import datetime
pd.show_versions()

#Get the closing prices
prices = pdweb.DataReader(["CVX","BP","XOM"],"yahoo",start=datetime.datetime(2015,1,1)
,end=datetime.datetime(2018,1,1))["Adj Close"]
prices

#Show preview
prices.head()
Example #27
0
- **Planes de Biblioteca** (LIB) A partir de un conjunto de ficheros que contienen los datos de construcción de los aplicadores (lo que conocemos como una **biblioteca de aplicadores**) tomamos el modelo que hemos implantado en la paciente y lo superponemos sobre la imagen adquirida de la paciente con él implantado. En principio ya no es necesario ver los canales por donde circula la fuente y mucho menos conocer la primera posición de parada. Con conocer la transformación afín que relaciona el sistema de referencia de la biblioteca con el de la imagen adquirida sería suficiente para tener definidos los caminos por donde circulará la fuente. En principio, solo podríamos utilizar este sistema en aquellos aplicadores que son rígidos.

- **Planes Híbridos** (Hybrid) También existe la posibilidad de hacer una reconstrucción mixta: Parte en reconstrucción directa, parte con la ayuda de bibliotecas.

Es este último tipo el que utilizaremos en nuestro caso para la reconstrucción y por tanto para el proceso de anotado de las imágenes.

### Sistema de planificación (TPS)

EL sistema de planificación que utilizamos es Oncentra Brachy que pertenece a la compañía Elekta. El software integra diversos módulos que permiten la planificación del tratamiento desde la adquisición de las imágenes del paciente, hasta la exportación del fichero con la información sobre posiciones que la fuente radiactiva deberá recorrer a lo largo de los canales y el tiempo que deberá permanecer en cada una de las paradas para depositar la dosis necesaria. La reconstrucción de los aplicadores se hace a través de dos módulos: Catheter reconstruction, donde se hace la reconstrucción directa y Applicator placement en el que se elige y se posicionan los aplicadores incluidos en la biblioteca. En el caso de una reconstrucción híbrida se usarían los dos módulos.

## ¿Pero qué tiene que ver esto con segmentación?
En principio, el objetivo de la reconstrucción no es representar el aplicador completo, sino solamente los canales por donde circulará la fuente. Pero como tenemos el aplicador posicionado sobre la imagen, la segmentación plano a plano se reduce a hacer cortes en cada una de los cortes de CT/MRI a las imágenes de los aplicadores.

## Diferencias con la segmentación habitual.

En un módulo de segmentación anatómica, un experto delimita la estructura corte a corte o se ayuda de herramientas de AI pre-entrenadas. Por el contrario, para la segmentación de un aplicador mediante bibliotecas de aplicadores, colocamos el modelo del mismo sobre la secuencia de imágenes. Una de las ventajas de tener un modelo tridimensional es que si interpolamos entre dos imágenes de la secuencia para obtener más resolución axial, la segmentación en ese nuevo corte es conocida.

## Proceso de reconstrucción con la herramienta desarrollada

### Búsqueda de los canales de la parte intracavitaria

YouTubeVideo('FL8_dLCzlzA', width=515, height=290)

## Posicionamiento de la parte intracavitaria

YouTubeVideo('Kd5etPcEsRM', width=515, height=290)

## Reconstrucción de agujas

YouTubeVideo('6aw-_IdMBuI', width=515, height=290)
Example #28
0
Jugs of Water - SOLUTION
Problem Statement
You have a five gallons jug and a three gallons jug, and an unlimited supply of water (but no measuring cups) How would you come up with exactly four gallons of water?

Solution
This problem has a cameo in the movie Die Hard 3. The solution is below:

In [3]:
from IPython.display import Image
Image(url='http://mindyourdecisions.com/blog/wp-content/uploads/2013/02/water-jug-riddle-1.png')
Out[3]:

Hopefully your interviews are not as stressful as this:

In [5]:
from IPython.display import YouTubeVideo
YouTubeVideo('BVtQNK_ZUJg')
Out[5]:
Yippee Ki Yay


#update #2 
Example #29
0
# coding: utf-8

# # Kalman Filter for Bike Lean Angle Estimation
#
# You've seen this probably on MotoGP, where the camera mounted on the bike is exactly horizontal, even when the bike leans. This is not as easy at it seems.

# In[725]:

from IPython.display import YouTubeVideo

# In[726]:

YouTubeVideo('-p2ndhw-kfQ', width=720, height=390)

# The first try would be to use the gravitational force and just point the camera to the ground.
#
# Doesn't work on bikes, because they are leaning in exactly this angle, which is needed to compensate the gravitational force with the centrifugal force.
#
# ![Bike Lean](https://upload.wikimedia.org/wikipedia/en/8/87/BikeLeanForces3.PNG)
#
# One has to use two different sensors:
#
# 1. a rotationrate sensor for lean angle
# 2. a acceleration sensor for gravitional force
#
# Both sensors have to be fused to estimate the lean angle. This is done with a Kalman Filter. We are using [Sympy](http://www.sympy.org/de/) do develop this filter.

# In[727]:

import numpy as np
from sympy import Symbol, symbols, Matrix, sin, cos, acos, pi
Example #30
0
    def tess_the_movie(self):
        """
        Opens the link to Ethan Kruse's TESS: The Movie YouTube videos for
        the sector your target is observed in.

        Parameters
        ----------

        Attributes
        ----------
        movie_url : str

        """
        def type_of_script():
            try:
                ipy_str = str(type(get_ipython()))
                if 'zmqshell' in ipy_str:
                    return 'jupyter'
                if 'terminal' in ipy_str:
                    return 'ipython'
            except:
                return 'terminal'

        sector = self.obj.source_info.sector

        base = "https://www.youtube.com/results?search_query="
        query = "TESS+the+movie+sector+{0}+ethankruse".format(sector)

        soup = BeautifulSoup(requests.get(base + query).text,
                             "html.parser").find_all('script')[26]

        items = soup.text
        items = items.split('\n')[1].split('title')

        good_sector = 0

        for subitem in items:
            j = subitem.find('Sector')
            if j > 0 and 'TESS: The Movie' in subitem:

                sect = subitem[j:j + 100].split(',')[0].split(' ')[-1]

                if int(sect) == int(sector):
                    i = subitem.find('/watch?v')
                    ext = subitem[i:i + 100].split('"')[0]
                    good_sector = 1
                    break

        if good_sector == 1:
            self.movie_url = 'https://www.youtube.com{0}'.format(ext)

            call_location = type_of_script()

            if (call_location == 'terminal') or (call_location == 'ipython'):
                os.system('python -m webbrowser -t "{0}"'.format(
                    self.movie_url))

            elif (call_location == 'jupyter'):
                from IPython.display import YouTubeVideo
                id = self.movie_url.split('=')[-1]
                return YouTubeVideo(id=id, width=900, height=500)

        else:
            print('No movie is available yet.')
            return