Ejemplo n.º 1
0
def generate_reports():
    py.sign_in("juz19", "gns0PM7FQ368i6A8tNOZ")
    try:
        if not os.path.exists('challengedata'):
            os.makedirs('challengedata')
    except IOError as e:
        print('Failed to create directory: challengedata. ' + e)
        return
    averages = {}
    url_by_week = {}
    for sub in os.listdir(os.getcwd() + '/challengedata'):
        if sub.startswith('celpp_week'):
            rmsd, week_num, protocol = readJson(sub)
            print(week_num)
            if not rmsd:
                continue
            averages[week_num] = stats(rmsd)
            url_by_week[week_num] = py.plot(box_plot(rmsd, week_num),
                                            filename='Box Plot - ' +
                                            week_num.split('_')[1][4:],
                                            auto_open=False)
            html_string = '''
			<html>
				<head>
					<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
					<style>body{ margin:0 100; background:whitesmoke; }</style>
				</head>
				<body>
					<h1>Week ''' + week_num.split(
                '_')[1][4:] + ''' Visualization of RMSD for Smina</h1>

					<!-- *** Section 1 *** --->
					<h2>Section 1: RMSDs for All Targets in Week ''' + week_num.split(
                    '_')[1][4:] + '''</h2>
					<iframe width="1000" height="550" frameborder="0" seamless="seamless" scrolling="no" \
			src="''' + url_by_week[week_num] + '''.embed?width=800&height=550"></iframe>
					
				</body>
			</html>'''

            try:
                if not os.path.exists('visual'):
                    os.makedirs('visual')
                if not os.path.exists('visual/' + week_num):
                    os.makedirs('visual/' + week_num)

                f = open('visual/' + week_num + '/report.html', 'w')
                f.write(html_string)
                f.close()
            except IOError as e:
                print('Failed to create report.html. ' + e)
                break
    generate_summary(averages, week_num, url_by_week)
Ejemplo n.º 2
0
import plotly.plotly as py
import plotly.tools as plotly_tools
import plotly.graph_objs as go
import plotly.offline as offline

import os
import matplotlib.pyplot as plt
import matplotlib as mpl
%matplotlib inline

from scipy.stats import gaussian_kde

from IPython.display import HTML

py.sign_in("juz19", "gns0PM7FQ368i6A8tNOZ")


def fetch_visual_file():
	global wd
	wd = str(os.getcwd()) 
		
	print('All Files will go into the celpp folder')
			
	cred = (wd + '/credentials.txt')
	try: #attempts to connect to file required to connect to ftp
		print('Trying to open credentials.txt')
		fo = open(cred, 'r')
		fo.close()
	except: #writes file required to connect to ftp if not already made		
    	print('Writing credentials.txt file')
Ejemplo n.º 3
0
from flask import Flask, render_template, request
import plotly as py
import pandas as pd

import plotly.plotly as py
import plotly.tools as plotly_tools
from plotly.graph_objs import *

import json
import os
py.sign_in("ACodeMZ", "FC5PBOKLqkR0GSFE94e9")
app = Flask(__name__)

#  Read the csv into a pandas DataFrame
data = pd.read_csv('FluNetInteractiveReport.csv')

#  Provide users with the raw data from their selected time frames
#       Option 1: Start date to end date
#           ex: Jan 1 2012 - Mar - 2013
#       Option 2: Start date to end date with annual frequency
#           ex: Jan 1 - Mar 1 for the past 5 years

# If user selects "Inclusive" - will deliver continuous filtered data from start week, start year, to end week of end year
def time_filter_inclusive(start_year, start_week, end_year, end_week):
    #find start row
    temp_df = data.loc[data['Year']==start_year]
    start_row = temp_df.loc[temp_df['Week']==start_week]
    #find end row
    temp_df = data.loc[data['Year']==end_year]
    end_row = temp_df.loc[temp_df['Week']==end_week+1]
        
import plotly.plotly as py
import plotly.tools as plotly_tools
from plotly.graph_objs import *

import os
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp()
from matplotlib.finance import quotes_historical_yahoo
import matplotlib.pyplot as plt

from scipy.stats import gaussian_kde

from IPython.display import HTML

py.sign_in("jackp", "XXXX")

### Step 1: Generate 2 graphs and 2 tables for report with Plotly Python client

#### First graph: 2014 Apple stock data with moving average

# Let's grab Apple stock data using the <a href="http://matplotlib.org/api/finance_api.html">matplotlib finance</a> model from 2014, then take a moving average with a <a href="http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html">numpy convolution</a>.

# In[154]:

x = []
y = []
ma = []


def moving_average(interval, window_size):
Ejemplo n.º 5
0




--------------------------------------------------------------------------------------------------------------------------------------------------------
# Plot what are already have! 

import plotly as py
plotly.tools.set_credentials_file(username='******', api_key='kveygo902q')

import plotly.plotly as py
import plotly.tools as plotly_tools
from plotly.graph_objs import *

py.sign_in(username='******', api_key='kveygo902q')



#cluster DAD
CL=0
print("this cluster: top level")

fig, ax = plt.subplots(figsize=(10, 10))
teals = sns.light_palette("#18cfed", as_cmap=True, reverse=True)
brightgreens = sns.light_palette("#03f90c", as_cmap=True, reverse=True)

cmaps = ("Blues_r","Reds_r","Greens_r","Purples_r","Reds_r","Greys_r","Blues_r","Reds_r","Greens_r","Oranges_r","Purples_r","Greys_r")


sns.cubehelix_palette(as_cmap=True,reverse=True,start=.1, rot=0, dark=.3)
trafico = trafico.set_index(['ESTACION'], append=True)
trafico = trafico.unstack('FDIA')
trafico = trafico.T
trafico = trafico.reset_index()
trafico.rename(columns={'level_0': 'FHORA'}, inplace=True)

trafico['FDIA'] = trafico['FDIA'].str.cat(trafico["FHORA"], sep=" ")
trafico.drop(['FHORA'], axis=1, inplace=True)
trafico['FDIA'] = trafico['FDIA'].apply(
    lambda x: datetime.strptime(x, '%d/%m/%y %H:%M'))
trafico.rename(columns={'FDIA': 'FECHA'}, inplace=True)
trafico.sort_index(inplace=True)
trafico2 = trafico
#TRAFICO2 VARIABLE DE SEGUIMIENTO

#Este dataframe se puede agrupar por dias, horas con la suma o la media.
#la idea es lanzar querys a todo el dataframe según se quiera.
trafico.reset_index(inplace=True)
trafico = trafico.resample('D', on='FECHA').mean()
trafico.drop(['index'], axis=1, inplace=True)
del (catalogo, datos_descargados, estaciones, trafico1, trafico2)
# =============================================================================
# Enviando a plotly (hay que entroducir las credenciales)
# =============================================================================

plotly.tools.set_credentials_file(username='******', api_key='XXX')
py.sign_in('XXX', 'XXX')

cf.set_config_file(offline=False, world_readable=True, theme='ggplot')
trafico.iplot(kind='scatter', filename='enero-febreo media de trafico por dia')
import plotly as py
py.tools.set_credentials_file(username='******', api_key='r0oOvo7yWphavCbFEoMG')
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.tools as plotly_tools
from plotly.graph_objs import *
py.sign_in("adityac564", "r0oOvo7yWphavCbFEoMG")

import re
import tweepy
from tweepy import OAuthHandler
from textblob import TextBlob

class TwitterClient(object):
    '''
    Generic Twitter Class for sentiment analysis.
    '''
    def __init__(self):
        '''
        Class constructor or initialization method.
        '''
        # keys and tokens from the Twitter Dev Console
        consumer_key = 'Dl6Xl0Xhxxxxxxxxxxxxx'
        consumer_secret = 'flrjl36HbaLj3xxxxxxxxxxxxxxx'
        access_token = '809633084370886657-He6xxxxxxxxxxxxxxx'
        access_token_secret = 'Cv37nNYgIDBvdn0KBiVSxxxxxxxxxxxxxx'
 
        # attempt authentication
        try:
            # create OAuthHandler object
            self.auth = OAuthHandler(consumer_key, consumer_secret)
from datetime import date as dt_date

import plotly.tools as plotly_tools
from plotly.graph_objs import *

import os
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp()
from matplotlib.finance import quotes_historical_yahoo
import matplotlib.pyplot as plt

from scipy.stats import gaussian_kde

from IPython.display import HTML

plotly.sign_in("jackp", "XXXX")


date1 = dt_date(2014, 1, 1)
date2 = dt_date(2014, 12, 12)

tickers = ['AAPL', 'GE', 'IBM', 'KO', 'MSFT', 'PEP']
prices = []
for ticker in tickers:
    quotes = quotes_historical_yahoo(ticker, date1, date2)
    prices.append( [q[1] for q in quotes] )


# We have all the stock prices in a list of lists - use the code snippet below to convert this into a Pandas dataframe.
df = pandas.DataFrame( prices ).transpose()
df.columns = tickers
from datetime import time as dt_tm
from datetime import date as dt_date

import plotly.tools as plotly_tools
from plotly.graph_objs import *

import os
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp()
from matplotlib.finance import quotes_historical_yahoo
import matplotlib.pyplot as plt

from scipy.stats import gaussian_kde

from IPython.display import HTML

plotly.sign_in("jackp", "XXXX")

date1 = dt_date(2014, 1, 1)
date2 = dt_date(2014, 12, 12)

tickers = ['AAPL', 'GE', 'IBM', 'KO', 'MSFT', 'PEP']
prices = []
for ticker in tickers:
    quotes = quotes_historical_yahoo(ticker, date1, date2)
    prices.append([q[1] for q in quotes])

# We have all the stock prices in a list of lists - use the code snippet below to convert this into a Pandas dataframe.
df = pandas.DataFrame(prices).transpose()
df.columns = tickers
df.head()
Ejemplo n.º 10
0
import plotly.plotly as py
import plotly.tools as plotly_tools
from plotly.graph_objs import *

import os
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp()
from matplotlib.finance import quotes_historical_yahoo
import matplotlib.pyplot as plt

from scipy.stats import gaussian_kde

from IPython.display import HTML

py.sign_in("yabebal", "amestkilo163291")

#Let's grab Apple stock data using the matplotlib finance model from
#2014, then take a moving average with a numpy convolution.
x = []
y = []
ma = []

def moving_average(interval, window_size):
    window = np.ones(int(window_size))/float(window_size)
    return np.convolve(interval, window, 'same')

date1 = dt_date( 2014, 1, 1 )
date2 = dt_date( 2014, 12, 12 )
quotes = quotes_historical_yahoo('AAPL', date1, date2)
if len(quotes) == 0: