def __init__(self, *args, **kwargs):
        '''

        Keywords:
            filename (str) : a path to a Jupyter notebook (".ipynb") file

        '''
        nbformat = import_required(
            'nbformat',
            'The Bokeh notebook application handler requires Jupyter Notebook to be installed.'
        )
        nbconvert = import_required(
            'nbconvert',
            'The Bokeh notebook application handler requires Jupyter Notebook to be installed.'
        )

        if 'filename' not in kwargs:
            raise ValueError('Must pass a filename to NotebookHandler')
        filename = kwargs['filename']

        with open(filename) as f:
            nb = nbformat.read(f, nbformat.NO_CONVERT)
            exporter = nbconvert.PythonExporter()
            source, meta = exporter.from_notebook_node(nb)
            kwargs['source'] = source

        super(NotebookHandler, self).__init__(*args, **kwargs)
Exemple #2
0
    def __init__(self, *args, **kwargs):
        
        nbformat = import_required('nbformat', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')
        nbconvert = import_required('nbconvert', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')
        
        if 'filename' not in kwargs:
            raise ValueError('Must pass a filename to NotebookHandler')

        with open(kwargs['filename']) as f:
            nb = nbformat.read(f, nbformat.NO_CONVERT)
            exporter = nbconvert.PythonExporter()
            source, meta = exporter.from_notebook_node(nb)
            kwargs['source'] = source

        super(NotebookHandler, self).__init__(*args, **kwargs)
Exemple #3
0
    def _make_prop_dict(self) -> pd.DataFrame:
        """ Returns a dataframe containing all the properties of all the submodels of the model being
        analyzed. Used as datasource to show attributes.

        """
        pd = import_required(
            "pandas",
            "Structure graphs require Pandas (http://pandas.pydata.org) to be installed"
        )
        df = pd.DataFrame()
        for x in self._graph.nodes(data=True):
            M = self._model.select_one(dict(id=x[0]))
            Z = pd.DataFrame(self._obj_props_to_df2(M))
            Z["id"] = x[0]
            Z["model"] = str(M)
            Z["values"] = Z["values"].map(lambda x: str(x))
            Z["types"] = Z["types"].map(lambda x: str(x))
            df = df.append(Z)
        return df
Exemple #4
0
''' The data in world_cities.csv was taken from GeoNames ``cities5000.zip``
downloaded from

.. code-block:: none

    http://www.geonames.org/export/

on Tuesday September 15, 2015.

Under ``CC-BY`` license (creative commons attributions license).

'''
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required('pandas',
              'world_cities sample data requires Pandas (http://pandas.pydata.org) to be installed')

from . import _data_dir

try:
    data = pd.read_csv(_data_dir("world_cities.csv"))
except (IOError, OSError):
    raise RuntimeError('Could not load file "world_cities.csv". Please execute bokeh.sampledata.download()')
Exemple #5
0
'''

'''
from __future__ import absolute_import

from bokeh.util.dependencies import import_required

pd = import_required(
    'pandas',
    'population sample data requires Pandas (http://pandas.pydata.org) to be installed'
)

from . import _data_dir


def load_population():
    csv_file = _data_dir("WPP2012_SA_DB03_POPULATION_QUINQUENNIAL.csv")
    df = pd.read_csv(csv_file, encoding="CP1250")
    df = df[df.Sex != "Both"]
    df = df.drop(["VarID", "Variant", "MidPeriod", "SexID", "AgeGrpSpan"],
                 axis=1)
    df = df.rename(columns={"Time": "Year"})
    df.Value *= 1000
    return df
Exemple #6
0
    client_time       datetime64[ns]
    device                    object
    event_name                object
    gender                    object
    city                      object
    latitude                 float64
    longitude                float64
    state                     object
    zip_code                   int64
    marital_status            object
    session_id                object
"""
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required('pandas',
              'project_funding sample data requires Pandas (http://pandas.pydata.org) to be installed')

from collections import defaultdict
import json
import os

from pandas.io.json import json_normalize
from six import iteritems
from six.moves.urllib.request import URLopener

def denormalize_column_names(parsed_data):
    """Attempts to remove the column hierarchy if possible when parsing from json.

    Args:
        parsed_data (:class:`pandas.DataFrame`): df parsed from json data using
            :func:`pandas.io.json.json_normalize`.
def test_required_success():
    assert dep.import_required('sys', 'yep') is not None
""" Provide U.S. marriage and divorce statistics between 1867 and 2014

Data from the CDC's National Center for Health Statistics (NHCS) database
(http://www.cdc.gov/nchs/).

Data organized by Randal S. Olson (http://www.randalolson.com)

"""
from __future__ import absolute_import

from bokeh.util.dependencies import import_required

pd = import_required(
    "pandas", "us_marriages_divorces sample data requires Pandas (http://pandas.pydata.org) to be installed"
)

from os.path import dirname, join

data = pd.read_csv(join(dirname(__file__), "us_marriages_divorces.csv"))

# Fill in missing data with a simple linear interpolation
data = data.interpolate(method="linear", axis=0).ffill().bfill()
"""

"""
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required('pandas',
    'sea surface temperature sample data requires Pandas (http://pandas.pydata.org) to be installed')

from os.path import dirname, join

sea_surface_temperature = pd.read_csv(join(dirname(__file__), 'sea_surface_temperature.csv.gz'),
                                      parse_dates=True, index_col=0)
sea_surface_temperature = sea_surface_temperature.rename(columns={'temperature (celsius)': 'temperature'})
sea_surface_temperature.index.name = 'time'
Exemple #10
0
'''
This module provides provides access to probly.csv and numberly.csv

'''
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required('pandas',
              'perceptioms sample data requires Pandas (http://pandas.pydata.org) to be installed')

from os.path import dirname, join

probly = pd.read_csv(join(dirname(__file__), 'probly.csv'))
numberly = pd.read_csv(join(dirname(__file__), 'numberly.csv'))
''' Provide a pandas DataFrame instance of four of the datasets from gapminder.org.

These are read in from csv filess that have been downloaded from Bokeh's
sample data on S3. But the original code that generated the csvs from the
raw gapminder data is available at the bottom of this file.
'''
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required(
    'pandas',
    'gapminder sample data requires Pandas (http://pandas.pydata.org) to be installed'
)

from os.path import join
import sys

from . import _data_dir

data_dir = _data_dir()

datasets = [
    'fertility',
    'life_expectancy',
    'population',
    'regions',
]

for dataset in datasets:
    filename = join(data_dir, 'gapminder_%s.csv' % dataset)
    try:
Exemple #12
0
"""

"""
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required(
    'pandas',
    'autompg2 sample data requires Pandas (http://pandas.pydata.org) to be installed'
)

from os.path import dirname, join

autompg2 = pd.read_csv(join(dirname(__file__), 'auto-mpg2.csv'))


def capitalize_words(string):
    return " ".join([word.capitalize() for word in string.split(" ")])


autompg2["manufacturer"] = autompg2["manufacturer"].map(capitalize_words)
autompg2["model"] = autompg2["model"].map(capitalize_words)
autompg2["drv"] = autompg2["drv"].replace({
    "f": "front",
    "r": "rear",
    "4": "4x4"
})
''' Provide U.S. marriage and divorce statistics between 1867 and 2014

Data from the CDC's National Center for Health Statistics (NHCS) database
(http://www.cdc.gov/nchs/).

Data organized by Randal S. Olson (http://www.randalolson.com)

'''
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required('pandas',
              'us_marriages_divorces sample data requires Pandas (http://pandas.pydata.org) to be installed')

from os.path import dirname, join

data = pd.read_csv(
    join(dirname(__file__), 'us_marriages_divorces.csv'))

# Fill in missing data with a simple linear interpolation
data = data.interpolate(method='linear', axis=0).ffill().bfill()
'''
This module provides provides access to probly.csv and numberly.csv

'''
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required(
    'pandas',
    'perceptioms sample data requires Pandas (http://pandas.pydata.org) to be installed'
)

from os.path import dirname, join

probly = pd.read_csv(join(dirname(__file__), 'probly.csv'))
numberly = pd.read_csv(join(dirname(__file__), 'numberly.csv'))
Exemple #15
0
'''

'''
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required('pandas',
              'population sample data requires Pandas (http://pandas.pydata.org) to be installed')

from . import _data_dir

def load_population():
    csv_file = _data_dir("WPP2012_SA_DB03_POPULATION_QUINQUENNIAL.csv")
    df = pd.read_csv(csv_file, encoding="CP1250")
    df = df[df.Sex != "Both"]
    df = df.drop(["VarID", "Variant", "MidPeriod", "SexID", "AgeGrpSpan"], axis=1)
    df = df.rename(columns={"Time": "Year"})
    df.Value *= 1000
    return df
Exemple #16
0
from os.path import dirname, join

from bokeh.models import Range1d
from bokeh.plotting import figure, show, output_file
from bokeh.util.dependencies import import_required

pandas = import_required('pandas', 'get it suckah')

colormap = {
    'chemical explosion': 'orange',
    'quarry blast': 'green',
    'earthquake': 'violet',
    'sonic boom': 'magenta',
    'explosion': 'blue',
    'landslide': 'brown',
    'mining explosion': 'black'
}

title = "QUAKIN' IN MAH BOOTS"
graph = figure(title=title)
graph.xaxis.axis_label = 'Magnitude'
graph.yaxis.axis_label = 'Depth'

quakes = pandas.read_csv(join(dirname(__file__), 'resource/earthquakes.csv'))
graph.circle(quakes['mag'],
             quakes['depth'],
             color=[colormap[x] for x in quakes['type']],
             fill_alpha=0.2,
             size=10)
graph.y_range = Range1d(700, -10)
Exemple #17
0
# Bokeh imports
from bokeh.util.dependencies import import_required

# Bokeh imports
from .apps import DjangoBokehConfig
from .routing import autoload, directory, document
from .static import static_extensions

import_required("django", "django is required by bokeh.server.django")
import_required("channels", "The package channels is required by bokeh.server.django and must be installed")

default_app_config = "bokeh.server.django.DjangoBokehConfig"
Exemple #18
0
    elements['CPK']                  (convention for molecular modeling color)
    elements['electronic configuration']
    elements['electronegativity']    (units: Pauling)
    elements['atomic radius']        (units: pm)
    elements['ionic radius']         (units: pm)
    elements['van der waals radius'] (units: pm)
    elements['ionization enerygy']   (units: kJ/mol)
    elements['electron affinity']    (units: kJ/mol)
    elements['phase']                (standard state: solid, liquid, gas)
    elements['bonding type']
    elements['melting point']        (units: K)
    elements['boiling point']        (units: K)
    elements['density']              (units: g/cm^3)
    elements['type']                 (see below)
    elements['year discovered']
    elements['group']
    elements['period']

element types: actinoid, alkali metal, alkaline earth metal, halogen, lanthanoid, metal, metalloid, noble gas, nonmetal, transition metalloid

"""
from __future__ import absolute_import

from bokeh.util.dependencies import import_required

pd = import_required("pandas", "periodic_table sample data requires Pandas (http://pandas.pydata.org) to be installed")

from os.path import dirname, join

elements = pd.read_csv(join(dirname(__file__), "elements.csv"))
Exemple #19
0
"""

"""
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required('pandas',
              'autompg sample data requires Pandas (http://pandas.pydata.org) to be installed')

from os.path import dirname, join

autompg = pd.read_csv(join(dirname(__file__), 'auto-mpg.csv'))
 def test_success(self):
     assert dep.import_required('sys', 'yep') is not None
Exemple #21
0
 def test_success(self) -> None:
     assert dep.import_required('sys', 'yep') is not None
 def test_fail(self):
     with pytest.raises(RuntimeError) as excinfo:
         dep.import_required('bleepbloop', 'nope')
     assert 'nope' in str(excinfo.value)
Exemple #23
0
import six

if six.PY2:
    raise ImportError("bokeh.server.django requires Python 3.x")

from bokeh.util.dependencies import import_required

import_required("django", "django is required by bokeh.server.django")
import_required("channels", "channels is required by bokeh.server.django")

from .apps import DjangoBokehConfig
default_app_config = "bokeh.server.django.DjangoBokehConfig"

from .routing import document, autoload, directory
Exemple #24
0
    elements['electronegativity']    (units: Pauling)
    elements['atomic radius']        (units: pm)
    elements['ionic radius']         (units: pm)
    elements['van der waals radius'] (units: pm)
    elements['ionization enerygy']   (units: kJ/mol)
    elements['electron affinity']    (units: kJ/mol)
    elements['phase']                (standard state: solid, liquid, gas)
    elements['bonding type']
    elements['melting point']        (units: K)
    elements['boiling point']        (units: K)
    elements['density']              (units: g/cm^3)
    elements['type']                 (see below)
    elements['year discovered']
    elements['group']
    elements['period']

element types: actinoid, alkali metal, alkaline earth metal, halogen, lanthanoid, metal, metalloid, noble gas, nonmetal, transition metalloid

'''
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required(
    'pandas',
    'periodic_table sample data requires Pandas (http://pandas.pydata.org) to be installed'
)

from os.path import dirname, join

elements = pd.read_csv(join(dirname(__file__), 'elements.csv'))
def test_required_fail():
    with pytest.raises(RuntimeError) as excinfo:
        dep.import_required('bleepbloop', 'nope')
    assert 'nope' in str(excinfo.value)
Exemple #26
0
""" The data in airports.json is a subset of US airports with field
elevations > 1500 meters. The query result was taken from

.. code-block:: none

    http://services.nationalmap.gov/arcgis/rest/services/GlobalMap/GlobalMapWFS/MapServer/10/query

on October 15, 2015.
"""
from __future__ import absolute_import

from bokeh.util.dependencies import import_required

pd = import_required("pandas", "airports sample data requires Pandas (http://pandas.pydata.org) to be installed")

import json
import os

from . import _data_dir

with open(os.path.join(_data_dir(), "airports.json"), "r") as data_file:
    content = data_file.read()
    airports = json.loads(content)
    schema = [["attributes", "nam"], ["attributes", "zv3"], ["geometry", "x"], ["geometry", "y"]]
    data = pd.io.json.json_normalize(airports["features"], meta=schema)
    data.rename(columns={"attributes.nam": "name", "attributes.zv3": "elevation"}, inplace=True)
    data.rename(columns={"geometry.x": "x", "geometry.y": "y"}, inplace=True)
Exemple #27
0
"""

"""
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required('pandas',
              'browsers sample data requires Pandas (http://pandas.pydata.org) to be installed')

from os.path import join, dirname

_data_dir = dirname(__file__)

# http://gs.statcounter.com/#browser_version-ww-monthly-201311-201311-bar
_csv_path = join(_data_dir, "browsers_nov_2013.csv")
browsers_nov_2013 = pd.read_csv(_csv_path, names=["Version", "Share"], skiprows=1)

_versions = browsers_nov_2013.Version.map(lambda x: x.rsplit(" ", 1))
browsers_nov_2013["Browser"] = _versions.map(lambda x: x[0])
browsers_nov_2013["VersionNumber"] = _versions.map(lambda x: x[1] if len(x) == 2 else "0")

# https://github.com/alrra/browser-logos
_browsers = ["Chrome", "Firefox", "Safari", "Opera", "IE"]
icons = {}

for browser in _browsers:
    icon_path = join(_data_dir, "icons", browser.lower() + "_32x32.png")

    with open(icon_path, "rb") as icon:
        icons[browser] = icon.read()
''' Provide US Unemployment rate data by year, from 1948 to 20013

'''
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required(
    'pandas',
    'unemployment1948 sample data requires Pandas (http://pandas.pydata.org) to be installed'
)

from os.path import dirname, join

data = pd.read_csv(join(dirname(__file__), 'unemployment1948.csv'))
Exemple #29
0
    device                    object
    event_name                object
    gender                    object
    city                      object
    latitude                 float64
    longitude                float64
    state                     object
    zip_code                   int64
    marital_status            object
    session_id                object
"""
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required(
    'pandas',
    'project_funding sample data requires Pandas (http://pandas.pydata.org) to be installed'
)

import os

from six.moves.urllib.request import URLopener

from bokeh.charts.utils import df_from_json

DATA_URL = "https://raw.githubusercontent.com/localytics/data-viz-challenge/master/data.json"
DOWNLOAD_NAME = 'project_funding.json'
CSV_NAME = 'project_funding.csv'

# Get absolute path relative to script
data_dir = os.path.dirname(os.path.realpath(__file__))
json_file_path = os.path.join(data_dir, DOWNLOAD_NAME)
Exemple #30
0
""" Daylight hours from http://www.sunrisesunset.com

"""
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required('pandas',
              'daylight sample data requires Pandas (http://pandas.pydata.org) to be installed')

import re
import datetime
import requests

from six.moves import xrange
from os.path import join, abspath, dirname

url = "http://sunrisesunset.com/calendar.asp"

r0 = re.compile("<[^>]+>|&nbsp;|[\r\n\t]")
r1 = re.compile(r"(\d+)(DST Begins|DST Ends)?Sunrise: (\d+):(\d\d)Sunset: (\d+):(\d\d)")

def fetch_daylight_hours(lat, lon, tz, dst, year):
    """Fetch daylight hours from sunrisesunset.com for a given location.

       Parameters
       ----------
       lat  : float
           Location's latitude.
       lon  : float
           Location's longitude.
       tz   : int or float
Exemple #31
0
    Agriculture
    Architecture
    Art and Performance
    Biology
    Business
    Communications and Journalism
    Computer Science,Education
    Engineering
    English
    Foreign Languages
    Health Professions
    Math and Statistics
    Physical Sciences
    Psychology
    Public Administration
    Social Sciences and History

'''
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required(
    'pandas',
    'degrees sample data requires Pandas (http://pandas.pydata.org) to be installed'
)

from os.path import dirname, join

xyvalues = pd.read_csv(
    join(dirname(__file__), "percent-bachelors-degrees-women-usa.csv"))
Exemple #32
0
''' Provide US Unemployment rate data by year, from 1948 to 20013

'''
from __future__ import absolute_import

from bokeh.util.dependencies import import_required
pd = import_required('pandas',
              'unemployment1948 sample data requires Pandas (http://pandas.pydata.org) to be installed')

from os.path import dirname, join

data = pd.read_csv(join(dirname(__file__), 'unemployment1948.csv'))