コード例 #1
0
ファイル: example2.py プロジェクト: Cadair/ginga
def main(options, args):
    
    logger = log.get_logger("ginga", options=options)

    # create a new plot with default tools, using figure
    fig = figure(x_range=[0,600], y_range=[0,600], plot_width=600, plot_height=600,
                 toolbar_location=None)

    viewer = ib.CanvasView(logger)
    viewer.set_figure(fig)

    def load_file(path):
        image = AstroImage(logger)
        image.load_file(path)
        viewer.set_image(image)

    def load_file_cb(attr_name, old_val, new_val):
        #print(attr_name, old_val, new_val)
        load_file(new_val)

    # add a entry widget and configure with the call back
    dstdir = options.indir
    path_w = TextInput(value=dstdir, title="File:")
    path_w.on_change('value', load_file_cb)

    if len(args) > 0:
        load_file(args[0])

    # put the path widget and viewer in a layout and add to the document
    curdoc().add_root(vplot(fig, path_w))
コード例 #2
0
def plot():
    # Set up data
    N = 200
    x = np.linspace(0, 4*np.pi, N)
    y = np.sin(x)
    source = ColumnDataSource(data=dict(x=x, y=y))


    # Set up plots
    plot = Figure(plot_height=400, plot_width=400, title="my sine wave",
                  tools="crosshair,pan,reset,resize,save,wheel_zoom",
                  x_range=[0, 4*np.pi], y_range=[-2.5, 2.5])

    plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)


    # Set up widgets
    text = TextInput(title="title", value='my sine wave')
    offset = Slider(title="offset", value=0.0, start=-5.0, end=5.0, step=0.1)
    amplitude = Slider(title="amplitude", value=1.0, start=-5.0, end=5.0)
    phase = Slider(title="phase", value=0.0, start=0.0, end=2*np.pi)
    freq = Slider(title="frequency", value=1.0, start=0.1, end=5.1)


    # Set up callbacks
    def update_title(attrname, old, new):
        plot.title = text.value

    text.on_change('value', update_title)

    def update_data(attrname, old, new):

        # Get the current slider values
        a = amplitude.value
        b = offset.value
        w = phase.value
        k = freq.value

        # Generate the new curve
        x = np.linspace(0, 4*np.pi, N)
        y = a*np.sin(k*x + w) + b

        source.data = dict(x=x, y=y)

    for w in [offset, amplitude, phase, freq]:
        w.on_change('value', update_data)


    # Set up layouts and add to document
    inputs = VBoxForm(children=[text, offset, amplitude, phase, freq])
    fullformat = HBox(children=[inputs, plot], width=800)

    return fullformat, []
コード例 #3
0
ファイル: example1.py プロジェクト: Cadair/ginga
def main(options, args):
    
    logger = log.get_logger("ginga", options=options)

    TOOLS = "pan,wheel_zoom,box_select,tap"
    
    # create a new plot with default tools, using figure
    fig = figure(x_range=[0,600], y_range=[0,600], plot_width=600, plot_height=600,
                 tools=TOOLS)

    viewer = ib.CanvasView(logger)
    viewer.set_figure(fig)

    ## box_select_tool = fig.select(dict(type=BoxSelectTool))
    ## box_select_tool.select_every_mousemove = True
    #tap_tool = fig.select_one(TapTool).renderers = [cr]

    # open a session to keep our local document in sync with server
    session = push_session(curdoc())

    #curdoc().add_periodic_callback(update, 50)

    def load_file(path):
        image = AstroImage(logger)
        image.load_file(path)
        viewer.set_image(image)

    def load_file_cb(attr_name, old_val, new_val):
        #print(attr_name, old_val, new_val)
        load_file(new_val)

    # add a entry widget and configure with the call back
    dstdir = options.indir
    path_w = TextInput(value=dstdir, title="File:")
    path_w.on_change('value', load_file_cb)

    curdoc().add_root(vplot(fig, path_w))

    if len(args) > 0:
        load_file(args[0])

    # open the document in a browser
    session.show() 

    # run forever
    session.loop_until_closed() 
コード例 #4
0

# initialize controls
# choose between original and arc length parametrization
parametrization_input = CheckboxGroup(labels=['show original parametrization',
                                              'show arc length parametrization'],
                                      active=[0, 1])
parametrization_input.on_click(parametrization_change)
# slider controlling the current parameter t
t_value_input = Slider(title="parameter t", name='parameter t', value=arc_settings.t_value_init,
                       start=arc_settings.t_value_min, end=arc_settings.t_value_max,
                       step=arc_settings.t_value_step)
t_value_input.on_change('value', t_value_change)
# text input for the x component of the curve
x_component_input = TextInput(value=arc_settings.x_component_input_msg, title="curve x")
x_component_input.on_change('value', curve_change)
# text input for the y component of the curve
y_component_input = TextInput(value=arc_settings.y_component_input_msg, title="curve y")
y_component_input.on_change('value', curve_change)
# dropdown menu for selecting one of the sample curves
sample_curve_input = Dropdown(label="choose a sample function pair or enter one below",
                              menu=arc_settings.sample_curve_names)
sample_curve_input.on_click(sample_curve_change)


# initialize plot
toolset = "crosshair,pan,reset,resize,save,wheel_zoom"
# Generate a figure container
plot = Figure(plot_height=400, plot_width=400, tools=toolset,
              title="Arc length parametrization",
              x_range=[arc_settings.x_min_view, arc_settings.x_max_view],
コード例 #5
0
def update(attr, old, new):

    init_time = datetime.now()

    main_doc.clear()
    main_doc.add_root(layout)
    #main_doc.theme = 'dark_minimal'

    title = ticker0.value

    print('-I- selected item is : ', title)

    #     sentiment = ticker1.value
    reviews = reviews_ms1.copy()
    reviews = reviews[reviews.title == title]
    reviews = reviews.drop_duplicates()
    reviews['id'] = range(len(reviews))

    ## sentence making
    full_comments = []
    full_title = []
    l1 = int(len(reviews) / 2)
    c1, t1, id1 = sentence_finder(reviews, 0, l1)
    c2, t2, id2 = sentence_finder(reviews, l1, len(reviews))
    full_comments = c1 + c2
    full_title = t1 + t2
    full_id = id1 + id2

    reviews_old = reviews[['id', 'comments']]
    reviews_old.columns = ['id', 'comments_full']

    def stripper(s):
        return s.strip()

    reviews = pd.DataFrame()
    reviews['comments'] = full_comments
    reviews['comments'] = reviews['comments'].apply(stripper)
    reviews['title'] = full_title
    reviews['id'] = full_id
    reviews = reviews[reviews.comments != '']
    reviews = reviews.drop_duplicates(subset=['comments'])
    reviews = reviews.merge(reviews_old, on='id', how='left')

    title_words = spacy_tokenizer(reviews.title.iloc[0].replace("'",
                                                                "")).split()

    def title_word_remover(s):
        for t in title_words:
            s = s.replace(t, '')
        return s

    ## For finding bigrams
    data = reviews.comments.values.tolist()
    # Remove Emails
    data = [re.sub('\S*@\S*\s?', '', sent) for sent in data]
    # Remove new line characters
    data = [re.sub('\s+', ' ', sent) for sent in data]
    # Remove distracting single quotes
    data = [re.sub("\'", "", sent) for sent in data]
    data_words = list(sent_to_words(data))

    # Build the bigram and trigram models
    bigram = gensim.models.Phrases(
        data_words, min_count=4,
        threshold=50)  # higher threshold fewer phrases.
    # trigram = gensim.models.Phrases(bigram[data_words], threshold=50)
    # Faster way to get a sentence clubbed as a trigram/bigram
    bigram_mod = gensim.models.phrases.Phraser(bigram)
    # trigram_mod = gensim.models.phrases.Phraser(trigram)
    data_words1 = [' '.join(bigram_mod[d]) for d in data_words]
    reviews['comments'] = data_words1

    ## steming lemmetising and stop word removal
    reviews['cleaned_comments'] = reviews.comments.apply(spacy_tokenizer)

    ## sentiment finding
    senti = sentimental_analysis.find_sentiment(reviews)
    reviews['sentiment_pred'] = senti

    ## finding all nouns in the full reviews
    all_nouns = []
    for i in tqdm(range(len(reviews))):
        all_nouns = all_nouns + noun_finder(reviews.cleaned_comments.iloc[i])

    ## Nouns and their count with weight
    noun_df = pd.DataFrame(
        pd.Series(all_nouns).astype('str').value_counts().reset_index())
    noun_df.columns = ['noun', 'count']
    noun_df['weight'] = 0
    noun_df.head()

    print('-I- finding the weight and updating it in df ---------------------')

    ## finding the weight and updating it in df
    for text in tqdm(reviews.cleaned_comments):
        doc = nlp(text)
        noun_adj_pairs = []

        for i, token in enumerate(doc):
            bi_words = str(token).split('_')
            if ((token.pos_ not in ('ADJ')) & (len(bi_words) == 1)):
                continue
            if ((len(bi_words) == 2)):
                if ((nlp(bi_words[0])[0].pos_ == 'ADJ') &
                    (nlp(bi_words[1])[0].pos_ in ('NOUN', 'PROPN')) &
                    (~pd.Series(bi_words[1]).isin(title_words)[0])):
                    noun_adj_pairs.append((bi_words[0], bi_words[1]))
                    try:
                        noun_df.loc[noun_df.noun == str(bi_words[1]),
                                    'weight'] = noun_df.loc[
                                        noun_df.noun == str(bi_words[1]),
                                        'weight'].iloc[0] + 1
                    except:
                        noun_df = noun_df.append(
                            pd.DataFrame(
                                {
                                    'noun': [bi_words[1]],
                                    'count': [1],
                                    'weight': [1]
                                },
                                index=[len(noun_df)]))
                elif ((token.pos_ in ('NOUN', 'PROPN')) &
                      (nlp(bi_words[0])[0].pos_ in ('NOUN', 'PROPN')) &
                      (nlp(bi_words[1])[0].pos_ in ('NOUN', 'PROPN')) &
                      (~pd.Series(bi_words[0]).isin(title_words)[0]) &
                      (~pd.Series(bi_words[1]).isin(title_words)[0])):
                    #             elif((nlp(bi_words[0])[0].pos_ in ('NOUN','PROPN')) & (nlp(bi_words[1])[0].pos_ in ('NOUN','PROPN'))):
                    noun_df.loc[
                        noun_df.noun == str(token),
                        'weight'] = noun_df.loc[noun_df.noun == str(token),
                                                'weight'].iloc[0] + 1
                continue

            if ((pd.Series([str(token)]).isin(positive)[0]) |
                (pd.Series([str(token)]).isin(negative)[0])):
                for j in range(i + 1, min(i + 6, len(doc))):
                    #                 if (doc[j].pos_ in ('NOUN','PROPN')):
                    if ((doc[j].pos_ in ('NOUN', 'PROPN')) &
                        (len(str(doc[j]).split('_')) != 2)):
                        noun_adj_pairs.append((token, doc[j]))
                        noun_df.loc[noun_df.noun == str(doc[j]),
                                    'weight'] = noun_df.loc[
                                        noun_df.noun == str(doc[j]),
                                        'weight'].iloc[0] + 1
                        break

    ## removing words from noun which is in title to find top topics ( topic nouns )
    noun_df = noun_df[~noun_df.noun.isin(
        spacy_tokenizer(reviews.title.iloc[0].replace("'", "")).split())]
    noun_df = noun_df.sort_values(by='weight', ascending=False)
    noun_df = noun_df.iloc[0:20, ]
    reviews.to_csv('./CRM_bokeh_app/static/temp.csv', index=False)

    topic_df = pd.DataFrame()
    topic_df['topics'] = noun_df['noun']
    print('-I- topic sentimental distribution finding---')
    pos_r = []
    neg_r = []
    neu_r = []
    full_l = []
    for t in tqdm(topic_df.topics):
        temp = reviews.copy()
        temp['item_presence'] = temp.apply(
            lambda row: topic_presence(row['cleaned_comments'], t), axis=1)
        temp = temp[temp.item_presence != -1]
        full_l.append(len(temp))
        pos_r.append(len(temp[temp.sentiment_pred == 'positive']))
        neg_r.append(len(temp[temp.sentiment_pred == 'negative']))
        neu_r.append(len(temp[temp.sentiment_pred == 'neutral']))


#     topic_df['length'] = full_l
    topic_df['positive'] = pos_r
    topic_df['negative'] = neg_r
    topic_df['neutral'] = neu_r

    final_time = datetime.now()
    print('-I- Time taken for update is = ', str(final_time - init_time))

    global ticker2
    global radio_button_group
    global text_input

    ticker2 = Select(title='Topic',
                     value='all',
                     options=['all'] + list(noun_df.noun))
    ticker2.on_change('value', update2)
    radio_button_group = RadioButtonGroup(
        labels=['all', 'positive', 'negative', 'neutral'], active=0)
    radio_button_group.on_change('active', update2)

    text_input = TextInput(value="", title="Custom topic search:")
    text_input.on_change("value", update2)

    t_r = column([row([ticker2, text_input]), radio_button_group])

    z = plot_senti(reviews, title)
    z1 = plot_topic(noun_df, title)
    z2 = column([
        row([
            plot_senti_stack(topic_df.sort_values(by='positive'), 1),
            plot_senti_stack(topic_df.sort_values(by='negative'), 2)
        ]),
        plot_senti_stack(topic_df.sort_values(by='neutral'), 3)
    ])
    z = column([row([z, z1]), z2])

    z1 = row([plot_senti(reviews, 'all topics'), summarizer(reviews)])
    z2 = plot_rows(reviews)
    z2 = column([z1, z2])
    z3 = column(column([z, t_r], name='needed1'),
                column([z2], name='review1'),
                name='needed')

    main_doc.add_root(z3)
コード例 #6
0
def update_slider(attrname, old, new):
    n_samples = int(samples_slider.value)

    newdata = get_ends(n_samples)
    source_cut.data = dict(newdata.to_dict('list'))

    plot.x_range.factors = newdata['aspects'].tolist() # this was missing
    plot.y_range.start = min(source_cut.data['importance'])
    plot.y_range.end = max(source_cut.data['importance'])

data_table = DataTable(source=source_cut, columns=columns, width=700, height=500)

dataset_select.on_change('value', update_dataset)
exponent_slider.on_change('value', update_dataset)
ratings_box.on_change('value', update_dataset)
samples_slider.on_change('value', update_slider)

# Set up layout
selects = row(dataset_select)
inputs = column(selects, widgetbox(exponent_slider, ratings_box, samples_slider))
table = widgetbox(data_table)

tab1 = Panel(child=table, title="Data")
tab2 = Panel(child=plot, title="Bar Plot")
tabs = Tabs(tabs=[tab1, tab2])
lay = layout([[inputs,tabs],]) 

# Add to document
curdoc().add_root(lay)
curdoc().title = "Keyword Extraction"
コード例 #7
0
ファイル: main.py プロジェクト: harrispilton/fieldcycling
def modify_doc(doc):
    """ Contains the application, including all callbacks
        TODO: could the callbacks be outsourced?
    :param doc:
    :type doc:
    """
    logger.debug('modify_doc has been called')
    def get_data_frames(ie,):
        """ Called one time initially, and then every time the experiment number is changed by the slider
        :param ie: experiment number
        :type ie: int
        :returns: dataframe from stella datafile and dataframe with tau and phi and fitted values
        :rtype: list of 2 pandas dataframes
        """
        logger.debug('get_dataframe with ie={}'.format(ie))
        fid = polymer.getfid(ie) #read FID or series of FIDs for selected experiment
        try:
            tau = polymer.get_tau_axis(ie) #numpy array containing the taus for experiment ie
            try:
                startpoint=fid_slider.range[0] #lower integration bound
                endpoint = fid_slider.range[1] #upper integration bound
            except NameError:
                # fid_slider not initialized for first plot. Use default values:
                startpoint=int(0.05*polymer.getparvalue(ie,'BS'))
                endpoint = int(0.1*polymer.getparvalue(ie,'BS'))
                logger.debug('fid_slider not initialized for first plot. Use default values {} and {}.'.format(startpoint, endpoint))
                
            polymer.addparameter(ie,'fid_range',(startpoint,endpoint)) #add integration range to parameters to make it accesible
            phi = get_mag_amplitude(fid, startpoint, endpoint,
                                    polymer.getparvalue(ie,'NBLK'),
                                    polymer.getparvalue(ie,'BS')) # list containing averaged fid amplitudes (which is proportional to a magnetization phi)
            df = pd.DataFrame(data=np.c_[tau, phi], columns=['tau', 'phi']) # DataFrames are nice
            df['phi_normalized'] = (df['phi'] - df['phi'].iloc[0] ) / (df['phi'].iloc[-1] - df['phi'].iloc[1] ) #Normalize magnetization,
            #Note: in the normalized magnetization the magnetization build-up curves and magnetization decay curves look alike
            #Note: this makes it easier for fitting as everything looks like 1 * exp(-R/time) in first order
            polymer.addparameter(ie,'df_magnetization',df) # make the magnetization dataframe accesible as parameter
            fit_option = 2 #mono exponential, 3 parameter fit
            p0=[1.0, polymer.getparvalue(ie,'T1MX')**-1*2, 0] #choose startparameters for fitting an exponential decay
            df, popt = magnetization_fit(df, p0, fit_option) # use leastsq to find optimal parameters
            polymer.addparameter(ie,'popt(mono_exp)',popt) # add fitting parameters for later access
            logger.info('fitfunction(t) = {} * exp(- {} * t) + {}'.format(*popt)) # print the fitting parameters to console (for convenience)
        except KeyError:
            logger.warning('no relaxation experiment found')
            tau=np.zeros(1)
            phi=np.zeros(1)
            df = pd.DataFrame(data=np.c_[tau, phi], columns=['tau', 'phi'])
            df['phi_normalized'] = np.zeros(1)
            df['fit_phi'] = np.zeros(1)
        return fid, df

    def calculate_mag_dec(attr, old, new, start_ie=None):
        ''' Is being call from the callback for the experiment chooser
            loads selected experiment visualize in plot p1 and p2 
            gets experiment number from the slider
            writes source_fid.data from the fid from the polymer object
            writes source_mag_dec.data from the dataframe
            '''
        ie = experiment_slider.value   #get expermient number from the slider
        logger.debug('calculate mag_dec for ie={}'.format(ie))
        fid, df = get_data_frames(ie)
        source_fid.data=ColumnDataSource.from_df(fid) #convert fid to bokeh format
        source_mag_dec.data = ColumnDataSource.from_df(df)

    def plot_par():
        ''' Creates plot for the parameters 
            Called with every update from the callback'''
        logger.debug('creating plot for the parameters')

        # read data due to selection of select_x/y
        xs = par_df[select_xaxis.value ].values
        ys = par_df[select_yaxis.value].values
        # read titles due to name of select_x/y
        x_title = select_xaxis.value.title()
        y_title = select_yaxis.value.title()

        # remark: many attributes in a bokeh plot cannot be modified after initialization
        #         for example p4.x_axis_type='datetime' does not work. keywords are a
        #         workaround to pass all optional arguments initially
        # set optional keyword arguments, kw, for figure()
        kw = dict() #initialize
        if select_xaxis.value in discrete:
            kw['x_range'] = sorted(set(xs))
        if select_yaxis.value in discrete:
            kw['y_range'] = sorted(set(ys))
        if select_yaxis.value in time:
            kw['y_axis_type'] = 'datetime'
        if select_xaxis.value in time:
            kw['x_axis_type'] = 'datetime'
        kw['title']="%s vs %s" % (x_title, y_title)
        # create figure using optional keywords kw
        p4 = figure(plot_height=300, plot_width=600, tools='pan,box_zoom,reset',
                    **kw)
        # set axis label
        p4.xaxis.axis_label = x_title
        p4.yaxis.axis_label = y_title

        # strings at x axis ticks need a lot of space. solution: rotate label orientation
        if select_xaxis.value in discrete:
            p4.xaxis.major_label_orientation = pd.np.pi / 4 # rotates labels...

        # standard size of symbols
        sz = 9
        # custom size of symbols due to select_size
        if select_size.value != 'None':
            groups = pd.qcut(pd.to_numeric(par_df[select_size.value].values), len(SIZES))
            sz = [SIZES[xx] for xx in groups.codes]

        # standard color
        c = "#31AADE"        
        # custom color due to select_color
        if select_color.value != 'None':
            groups = pd.qcut(pd.to_numeric(par_df[select_color.value]).values, len(COLORS))
            c = [COLORS[xx] for xx in groups.codes]

        # create the plot using circles
        p4.circle(x=xs, y=ys, color=c, size=sz, line_color="white", alpha=0.6, hover_color='white', hover_alpha=0.5)
        return p4 #return the plot
    
    def callback_update_plot_1(attr, old, new):
        ''' Callback for update of figure 1 in parameters tab '''
        tabs.tabs[1].child.children[1] = plot_par()
        print(tabs.tabs[1].child.children[1])
        logger.debug('Parameter plot updated')
#        p4 = plot_par()

    def callback_update_p3():
        logger.debug('update plot 3')
        p3 = fit_mag_decay_all(polymer,par_df)
        return p3

    def callback_update_experiment(attr, old, new):
        """ Callback for the experiment chooser
        """
        ie = experiment_slider.value
        logger.debug('Callback experiment update, ie={}'.format(ie))
        fid_slider.end = polymer.getparvalue(ie,'BS')
        try:
            fid_slider.range=polymer.getparvalue(ie,'fid_range')
        except:
            startpoint = int(0.05 * polymer.getparvalue(ie,'BS'))
            endpoint = int(0.1 * polymer.getparvalue(ie,'BS'))
            fid_slider.range=(startpoint,endpoint)
        calculate_mag_dec(attr,old,new)
        
    def callback_load_more_data(attr,old,new):
        ''' callback for loading of data '''
        # TODO: implement
        logger.debug('callback for loading of data ')
        logger.error('Not implemented!')
        path=pathbox.value.strip()
        file=filebox.value.strip()
        if file=="*.sdf":
            logger.info('callback for loading data. filename: {}'.format(file))
            allsdf=filter(lambda x: x.endswith('.sdf'),os.listdir(path))
            for f in allsdf:
                sdf_list.append(sdf.StelarDataFile(f,path))
        else:
            sdf_list.append(sdf.StelarDataFile(file,path))
        
        filenames=[x.file() for x in sdf_list]
        filenames_df=pd.DataFrame(data=filenames,columns=['file'])
        table_source.data=ColumnDataSource.from_df(filenames_df)

    def callback_export_data(attr,old,new):
        logger.debug('callback_export_data has been called ')
        logger.error('Not implemented!')
        pass
    
    def callback_write_table_to_file(attr,old,new): ##FIXME
        logger.debug('callback_write_table_to_file has been called ')
        logger.error('Not implemented!')
        pass
#        path=export_text.value.strip()
#        exportdata=export_source.data
#        CustomJS(args=dict(source=export_source),
#                 code=open(join(dirname(__file__), "export_csv.js")).read())

    def callback_update_parameters():
        ''' callback for button
            function to call when button is clicked
            for updates parameters of polymer, since they can change during evaluation '''
        logger.debug('callback for button (update parameter).')
        par_df, columns, discrete, continuous, time, quantileable = polymer.scan_parameters()
        select_xaxis.options=columns
        select_yaxis.options=columns
        select_size.options=['None']+quantileable
        select_color.options=['None']+quantileable

    logger.info('Starting the script')
    ### This is the start of the script ###
    ### The callbacks are above ###

    #load data:
    # TODO: how to handle multiple datafiles?
    # New Tab for each datafile?
    # dropdown selection to choose datafile
    # complete new start of process? (probably not prefered)

    polymer = load_data('glyzerin_d3_300K.sdf')
    nr_experiments = polymer.get_number_of_experiments()
    start_ie = 1     # initially set ie = 1
    par_df, columns, discrete, continuous, time, quantileable = polymer.scan_parameters(20)
    # for the initial call get the dataframes without callback
    # they are being updated in following callbacks
    fid, df = get_data_frames(start_ie)
    source_fid = ColumnDataSource(data=ColumnDataSource.from_df(fid))
    source_mag_dec = ColumnDataSource(data=ColumnDataSource.from_df(df))
    # initialy creates the plots p1 and p2
    p1, p2 = create_plot_1_and_2(source_fid, source_mag_dec)
    
    ### initiates widgets, which will call the callback on change ###
    # initiate slider to choose experiment
    experiment_slider = Slider(start=1, end=nr_experiments, value=1, step=1,callback_policy='mouseup', width=800) #select experiment by value
    # initiate slider for the range in which fid shall be calculated
    # select the intervall from which magneitization is calculated from fid
    fid_slider = RangeSlider(start=1,end=polymer.getparvalue(start_ie,'BS'),
                             range=polymer.getparvalue(start_ie,'fid_range'),
                             step=1,callback_policy='mouseup', width=400)

    # fit magnetization decay for all experiments
    p3 = fit_mag_decay_all(polymer, par_df)
    # refit mag dec with updated ranges after button push
    button_refit = Button(label='Update',button_type="success")
    button_refit.on_click(callback_update_p3)

    # initialize empty source for experiment slider
    source = ColumnDataSource(data=dict(value=[]))
    # 'data' is the attribute. it's a field in source, which is a ColumnDataSource
    # initiate callback_update_experiment which is the callback
    source.on_change('data',callback_update_experiment) #source for experiment_slider
    experiment_slider.callback = CustomJS(args=dict(source=source),code="""
        source.data = { value: [cb_obj.value] }
    """)#unfortunately this customjs is needed to throttle the callback in current version of bokeh

    # initialize empty source for fid slider, same as above
    source2 = ColumnDataSource(data=dict(range=[], ie=[]))
    source2.on_change('data',calculate_mag_dec)
    fid_slider.callback=CustomJS(args=dict(source=source2),code="""
        source.data = { range: cb_obj.range }
    """)#unfortunately this customjs is needed to throttle the callback in current version of bokeh

    # same for the update button
    button_scan = Button(label='Scan Parameters',button_type="success")
    button_scan.on_click(callback_update_parameters)
    
    # here comes for callbacks for x, y, size, color
    select_xaxis = Select(title='X-Axis', value='ZONE', options=columns)
    select_xaxis.on_change('value', callback_update_plot_1)

    select_yaxis = Select(title='Y-Axis', value='TIME', options=columns)
    select_yaxis.on_change('value', callback_update_plot_1)

    select_size = Select(title='Size', value='None', options=['None'] + quantileable)
    select_size.on_change('value', callback_update_plot_1)

    select_color = Select(title='Color', value='None', options=['None'] + quantileable)
    select_color.on_change('value', callback_update_plot_1)

    controls_p4 = widgetbox([button_scan, select_xaxis,select_yaxis,select_color,select_size], width=150)
    #p4 = plot_par()
    layout_p4 = row(controls_p4,plot_par())
    logger.debug('layout for parameter plot created')

    ####
    #### TODO: write file input
    #### TODO: select files to import
    #### TODO: discard imported files
    ####

    # load more data:
    table_source=ColumnDataSource(data=dict())
    sdf_list=[polymer]
    # TODO: This is current plan, to save the different dataframes in a list, right?
    filenames=[x.file() for x in sdf_list]
    files_df=pd.DataFrame(data=filenames,columns=['file'])
    table_source.data=ColumnDataSource.from_df(files_df)
    t_columns = [
        TableColumn(field='file', title='Path / Filename'),
        #TableColumn(field='file', title='Filename'),
        ]
    table=DataTable(source=table_source,columns=t_columns)
    pathbox=TextInput(title="Path",value=os.path.curdir)
    filebox=TextInput(title="Filename",value="*.sdf")
    pathbox.on_change('value',callback_load_more_data)
    filebox.on_change('value',callback_load_more_data)
    layout_input=column(pathbox,filebox,table)

    # Data Out: export data from figures
    #         & export parameters

    export_source=ColumnDataSource(data=dict())
    export_columns=[]
    output_table=DataTable(source=export_source,columns=export_columns)
    export_slider = Slider(start=1, end=4, value=3, step=1,callback_policy='mouseup', width=200) #do we need mouseup on this?
    export_slider.on_change('value',callback_export_data)
    export_text = TextInput(title="Path",value=os.path.curdir)
    export_button = Button(label='Export to csv',button_type="success") # FIXME Callback  doesn't work yet
    export_button.on_click(callback_write_table_to_file)
 
    layout_output=row(column(export_slider,export_text,export_button),output_table)
    print('after layout_output')
    

    # set the layout of the tabs
    layout_p1 = column(experiment_slider, p1,
                       row(
                           column(fid_slider,p2),
                           column(button_refit, p3)
                           ),
                       )
    tab_relaxation = Panel(child = layout_p1, title = 'Relaxation')
    tab_parameters = Panel(child = layout_p4, title = 'Parameters')
    tab_input = Panel(child = layout_input, title = 'Data In')
    tab_output = Panel(child = layout_output, title = 'Data Out')

    # initialize tabs object with 3 tabs
    tabs = Tabs(tabs = [tab_relaxation, tab_parameters,
                        tab_input, tab_output])
    print('tabs')
    doc.add_root(tabs)
    doc.add_root(source) # i need to add source to detect changes
    doc.add_root(source2)
    print('tab tab')
コード例 #8
0
def plot():

    # FIGURES AND X-AXIS
    fig1 = Figure(title = 'Energy',  plot_width = WIDTH, plot_height = HEIGHT, tools = TOOLS)

    timeticks = DatetimeTickFormatter(formats=dict(seconds =["%b%d %H:%M:%S"],
                                                   minutes =["%b%d %H:%M"],
                                                   hours =["%b%d %H:%M"],
                                                   days  =["%b%d %H:%M"],
                                                   months=["%b%d %H:%M"],
                                                   years =["%b%d %H:%M %Y"]))
    fig1.xaxis.formatter = timeticks

    # INPUT WIDGETS
    collection_list = CONN[DB].collection_names(include_system_collections=False)
    gliders = sorted([platformID for platformID in collection_list if len(platformID)>2])
    gliders = Select(title = 'PlatformID', value = gliders[0], options = gliders)
    prev_glider = Button(label = '<')
    next_glider = Button(label = '>')
    glider_controlbox = HBox(children = [gliders, prev_glider, next_glider])

    max_amphr = TextInput(title='Max AmpHrs', value='1040')
    deadby_date = TextInput(title='Deadby Date', value='')
    data_controlbox = HBox(max_amphr, deadby_date,  width = 300)

    control_box = HBox(glider_controlbox,
                       data_controlbox)

    # DATA VARS
    coulombs_raw = ColumnDataSource(dict(x=[],y=[]))
    coulombs_ext = ColumnDataSource(dict(x=[],y=[]))
    coulombs_per = ColumnDataSource(dict(x=[],y=[]))

    # AXIS setup
    fig1.yaxis.axis_label = 'Coulombs (AmpHr)'
    fig1.extra_y_ranges = {'usage': Range1d(start=0, end=1200)}


    # PLOT OBJECTS
    fig1.line(  'x', 'y', source = coulombs_raw, legend = 'm_coulombs_amphr_total', color = 'blue')
    fig1.circle('x', 'y', source = coulombs_raw, legend = 'm_coulombs_amphr_total', color = 'blue')
    fig1.line(  'x', 'y', source = coulombs_ext, legend = 'projected',              color = 'red')
    #fig1.cross('x', 'y', source = coulombs_ext, legend = 'projected',  size=10,     color = 'red')
    fig1.renderers.append(Span(name = 'maxamp_span',      location = int(max_amphr.value),  dimension = 'width',  line_color= 'green', line_dash='dashed', line_width=2))
    fig1.renderers.append(Span(name = 'maxamp_intersect', location = 1000*time.time(),      dimension = 'height', line_color= 'green', line_dash='dashed', line_width=2))

    fig1.legend[0].location = 'top_left'
    fig1.legend[0].legend_padding = 30

    # CALLBACK FUNCS
    def update_coulombs(attrib,old,new):
        g = gliders.value

        coulombs_raw.data   = load_sensor(g, 'm_coulomb_amphr_total')
        #coulombs_per.data  = moving_usage(coulombs_raw.data)
        update_projection(None,None,None)


    def update_projection(attrib,old,new):
        g = gliders.value
        try:
            fig1.select('maxamp_span')[0].location = int(max_amphr.value)
            coulombs_ext.data, deadby_date.value = calc_deadby_date(g, int(max_amphr.value))
            fig1.select('maxamp_intersect')[0].location = coulombs_ext.data['x'][-1]
        except Exception as e:
            print('update_projection error',type(e),e)

    #GLIDER SELECTS
    def glider_buttons(increment):
        ops = gliders.options
        new_index = ops.index(gliders.value) + increment
        if new_index >= len(ops):
            new_index = 0
        elif new_index < 0:
            new_index = len(ops)-1
        gliders.value = ops[new_index]
    def next_glider_func():
        glider_buttons(1)
    def prev_glider_func():
        glider_buttons(-1)

    gliders.on_change('value', update_coulombs)
    next_glider.on_click(next_glider_func)
    prev_glider.on_click(prev_glider_func)

    max_amphr.on_change('value', update_projection)

    update_coulombs(None,None,None)

    return vplot(control_box, fig1)
コード例 #9
0
ファイル: taylor_server.py プロジェクト: 0-T-0/bokeh
    order = int(new)
    update_data()

def on_text_value_change(attr, old, new):
    try:
        global expr
        expr = sy.sympify(new, dict(x=xs))
    except (sy.SympifyError, TypeError, ValueError) as exception:
        dialog.content = str(exception)
        dialog.visible = True
    else:
        update_data()

dialog = Dialog(title="Invalid expression")

slider = Slider(start=1, end=20, value=order, step=1, title="Order:")
slider.on_change('value', on_slider_value_change)

text = TextInput(value=str(expr), title="Expression:")
text.on_change('value', on_text_value_change)

inputs = HBox(children=[slider, text])
layout = VBox(children=[inputs, plot, dialog])
update_data()
document.add_root(layout)
session.show(layout)

if __name__ == "__main__":
    print("\npress ctrl-C to exit")
    session.loop_until_closed()
コード例 #10
0
reset_button.on_click(lambda x: reset_ButtonClick())
autoscale_button.on_click(lambda x: autoscale_ButtonClick())
channel_button[0].on_click(lambda x: channel_ButtonClick(1))
channel_button[1].on_click(lambda x: channel_ButtonClick(2))
channel_button[2].on_click(lambda x: channel_ButtonClick(3))
channel_button[3].on_click(lambda x: channel_ButtonClick(4))
pulse_capture_button.on_click(lambda x: pulseCapture_ButtonClick())
auto_save_button.on_click(lambda x: autoSave_ButtonClick())
load_button.on_click(lambda x: load_ButtonClick())
force_save_button.on_click(lambda x: forceSave_ButtonClick())
save_PC_button.on_click(lambda x: savePC_ButtonClick())

acq_period_select.on_change("value", acqPeriod_SelectChange)
channel_name_select1.on_change("value", chanNameSelect1_SelectChange)

force_save_filename_input.on_change("value", forceSaveFilename_InputChange)
channel_name_input.on_change("value", channelName_InputChange)
time_range_input.on_change("value", timeRange_InputChange)
trigger_level_input.on_change("value", triggerLevel_InputChange)
save_filepath_input.on_change("value", saveFilepath_InputChange)
load_file_input.on_change("value", loadFile_InputChange)
save_filepath_PC_input.on_change("value", saveFilepath_InputChange)      #
save_filename_PC_input.on_change("value", forceSaveFilename_InputChange) #reusing callbacks


#set up layout
doc.title = "Intensity Tracker"

grid = layout([
        [channel_button[0],channel_button[1],channel_button[2],channel_button[3]], 
        [all_on_button, all_off_button, reset_button, autoscale_button], 
コード例 #11
0
class MetaModelVisualization(object):
    """
    Top-level container for the Meta Model Visualization.

    Attributes
    ----------
    prob : Problem
        Name of variable corresponding to Problem Component
    meta_model : MetaModel
        Name of empty Meta Model Component object reference
    resolution : int
        Number used to calculate width and height of contour plot
    is_structured_meta_model : Bool
        Boolean used to signal whether the meta model is structured or unstructured
    slider_source : ColumnDataSource
        Data source containing dictionary of sliders
    contour_training_data_source : ColumnDataSource
        Data source containing dictionary of training data points
    bottom_plot_source : ColumnDataSource
        Data source containing data for the bottom subplot
    bottom_plot_scatter_source : ColumnDataSource
        Data source containing scatter point data for the bottom subplot
    right_plot_source : ColumnDataSource
        Data source containing data for the right subplot
    right_plot_scatter_source : ColumnDataSource
        Data source containing scatter point data for the right subplot
    contour_plot_source : ColumnDataSource
        Data source containing data for the contour plot
    input_names : list
        List of input data titles as strings
    output_names : list
        List of output data titles as strings
    training_inputs : dict
        Dictionary of input training data
    x_input_select : Select
        Bokeh Select object containing a list of inputs for the x axis
    y_input_select : Select
        Bokeh Select object containing a list of inputs for the y axis
    output_select : Select
        Bokeh Select object containing a list of inputs for the outputs
    x_input_slider : Slider
        Bokeh Slider object containing a list of input values for the x axis
    y_input_slider : Slider
        Bokeh Slider object containing a list of input values for the y axis
    slider_dict : dict
        Dictionary of slider names and their respective slider objects
    predict_inputs : dict
        Dictionary containing training data points to predict at.
    num_inputs : int
        Number of inputs
    num_outputs : int
        Number of outputs
    limit_range : array
        Array containing the range of each input
    scatter_distance : TextInput
        Text input for user to enter custom value to calculate distance of training points around
        slice line
    right_alphas : array
        Array of points containing alpha values for right plot
    bottom_alphas : array
        Array of points containing alpha values for bottom plot
    dist_range : float
        Value taken from scatter_distance used for calculating distance of training points around
        slice line
    x_index : int
        Value of x axis column
    y_index : int
        Value of y axis column
    output_variable : int
        Value of output axis column
    sliders_and_selects : layout
        Layout containing the sliders and select elements
    doc_layout : layout
        Contains first row of plots
    doc_layout2 : layout
        Contains second row of plots
    Z : array
        A 2D array containing contour plot data
    """

    def __init__(self, model, resolution=50, doc=None):
        """
        Initialize parameters.

        Parameters
        ----------
        model : MetaModelComponent
            Reference to meta model component
        resolution : int
            Value used to calculate the size of contour plot meshgrid
        doc : Document
            The bokeh document to build.
        """
        self.prob = Problem()
        self.resolution = resolution
        logging.getLogger("bokeh").setLevel(logging.ERROR)

        # If the surrogate model coming in is structured
        if isinstance(model, MetaModelUnStructuredComp):
            self.is_structured_meta_model = False

            # Create list of input names, check if it has more than one input, then create list
            # of outputs
            self.input_names = [name[0] for name in model._surrogate_input_names]
            if len(self.input_names) < 2:
                raise ValueError('Must have more than one input value')
            self.output_names = [name[0] for name in model._surrogate_output_names]

            # Create reference for untructured component
            self.meta_model = MetaModelUnStructuredComp(
                default_surrogate=model.options['default_surrogate'])

        # If the surrogate model coming in is unstructured
        elif isinstance(model, MetaModelStructuredComp):
            self.is_structured_meta_model = True

            self.input_names = [name for name in model._var_rel_names['input']]

            if len(self.input_names) < 2:
                raise ValueError('Must have more than one input value')

            self.output_names = [name for name in model._var_rel_names['output']]

            self.meta_model = MetaModelStructuredComp(
                distributed=model.options['distributed'],
                extrapolate=model.options['extrapolate'],
                method=model.options['method'],
                training_data_gradients=model.options['training_data_gradients'],
                vec_size=1)

        # Pair input list names with their respective data
        self.training_inputs = {}

        self._setup_empty_prob_comp(model)

        # Setup dropdown menus for x/y inputs and the output value
        self.x_input_select = Select(title="X Input:", value=[x for x in self.input_names][0],
                                     options=[x for x in self.input_names])
        self.x_input_select.on_change('value', self._x_input_update)

        self.y_input_select = Select(title="Y Input:", value=[x for x in self.input_names][1],
                                     options=[x for x in self.input_names])
        self.y_input_select.on_change('value', self._y_input_update)

        self.output_select = Select(title="Output:", value=[x for x in self.output_names][0],
                                    options=[x for x in self.output_names])
        self.output_select.on_change('value', self._output_value_update)

        # Create sliders for each input
        self.slider_dict = {}
        self.predict_inputs = {}
        for title, values in self.training_inputs.items():
            slider_data = np.linspace(min(values), max(values), self.resolution)
            self.predict_inputs[title] = slider_data
            # Calculates the distance between slider ticks
            slider_step = slider_data[1] - slider_data[0]
            slider_object = Slider(start=min(values), end=max(values), value=min(values),
                                   step=slider_step, title=str(title))
            self.slider_dict[title] = slider_object

        self._slider_attrs()

        # Length of inputs and outputs
        self.num_inputs = len(self.input_names)
        self.num_outputs = len(self.output_names)

        # Precalculate the problem bounds.
        limits = np.array([[min(value), max(value)] for value in self.training_inputs.values()])
        self.limit_range = limits[:, 1] - limits[:, 0]

        # Positional indicies
        self.x_index = 0
        self.y_index = 1
        self.output_variable = self.output_names.index(self.output_select.value)

        # Data sources are filled with initial values
        # Slider Column Data Source
        self.slider_source = ColumnDataSource(data=self.predict_inputs)

        # Contour plot Column Data Source
        self.contour_plot_source = ColumnDataSource(data=dict(
            z=np.random.rand(self.resolution, self.resolution)))
        self.contour_training_data_source = ColumnDataSource(
            data=dict(x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))

        # Bottom plot Column Data Source
        self.bottom_plot_source = ColumnDataSource(data=dict(
            x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))
        self.bottom_plot_scatter_source = ColumnDataSource(data=dict(
            bot_slice_x=np.repeat(0, self.resolution), bot_slice_y=np.repeat(0, self.resolution)))

        # Right plot Column Data Source
        self.right_plot_source = ColumnDataSource(data=dict(
            x=np.repeat(0, self.resolution), y=np.repeat(0, self.resolution)))
        self.right_plot_scatter_source = ColumnDataSource(data=dict(
            right_slice_x=np.repeat(0, self.resolution),
            right_slice_y=np.repeat(0, self.resolution)))

        # Text input to change the distance of reach when searching for nearest data points
        self.scatter_distance = TextInput(value="0.1", title="Scatter Distance")
        self.scatter_distance.on_change('value', self._scatter_input)
        self.dist_range = float(self.scatter_distance.value)

        # Grouping all of the sliders and dropdowns into one column
        sliders = [value for value in self.slider_dict.values()]
        sliders.extend(
            [self.x_input_select, self.y_input_select, self.output_select, self.scatter_distance])
        self.sliders_and_selects = row(
            column(*sliders))

        # Layout creation
        self.doc_layout = row(self._contour_data(), self._right_plot(), self.sliders_and_selects)
        self.doc_layout2 = row(self._bottom_plot())

        if doc is None:
            doc = curdoc()

        doc.add_root(self.doc_layout)
        doc.add_root(self.doc_layout2)
        doc.title = 'Meta Model Visualization'

    def _setup_empty_prob_comp(self, metamodel):
        """
        Take data from surrogate ref and pass it into new surrogate model with empty Problem model.

        Parameters
        ----------
        metamodel : MetaModelComponent
            Reference to meta model component

        """
        # Check for structured or unstructured
        if self.is_structured_meta_model:
            # Loop through the input names
            for idx, name in enumerate(self.input_names):
                # Check for no training data
                try:
                    # Append the input data/titles to a dictionary
                    self.training_inputs[name] = metamodel.inputs[idx]
                    # Also, append the data as an 'add_input' to the model reference
                    self.meta_model.add_input(name, 0.,
                                              training_data=metamodel.inputs[idx])
                except TypeError:
                    msg = "No training data present for one or more parameters"
                    raise TypeError(msg)

            # Add the outputs to the model reference
            for idx, name in enumerate(self.output_names):
                self.meta_model.add_output(
                    name, 0.,
                    training_data=metamodel.training_outputs[name])

        else:
            for name in self.input_names:
                try:
                    self.training_inputs[name] = {
                        title for title in metamodel.options['train:' + str(name)]}
                    self.meta_model.add_input(
                        name, 0.,
                        training_data=[
                            title for title in metamodel.options['train:' + str(name)]])
                except TypeError:
                    msg = "No training data present for one or more parameters"
                    raise TypeError(msg)

            for name in self.output_names:
                self.meta_model.add_output(
                    name, 0.,
                    training_data=[
                        title for title in metamodel.options['train:' + str(name)]])

        # Add the subsystem and setup
        self.prob.model.add_subsystem('interp', self.meta_model)
        self.prob.setup()

    def _slider_attrs(self):
        """
        Assign data to slider objects and callback functions.

        Parameters
        ----------
        None

        """
        for name, slider_object in self.slider_dict.items():
            # Checks if there is a callback previously assigned and then clears it
            if len(slider_object._callbacks) == 1:
                slider_object._callbacks.clear()

            # Check if the name matches the 'x input' title
            if name == self.x_input_select.value:
                # Set the object and add an event handler
                self.x_input_slider = slider_object
                self.x_input_slider.on_change('value', self._scatter_plots_update)

            # Check if the name matches the 'y input' title
            elif name == self.y_input_select.value:
                # Set the object and add an event handler
                self.y_input_slider = slider_object
                self.y_input_slider.on_change('value', self._scatter_plots_update)
            else:
                # If it is not an x or y input then just assign it the event handler
                slider_object.on_change('value', self._update)

    def _make_predictions(self, data):
        """
        Run the data parameter through the surrogate model which is given in prob.

        Parameters
        ----------
        data : dict
            Dictionary containing training points.

        Returns
        -------
        array
            np.stack of predicted points.
        """
        # Create dictionary with an empty list
        outputs = {name: [] for name in self.output_names}

        # Parse dict into shape [n**2, number of inputs] list
        inputs = np.empty([self.resolution**2, self.num_inputs])
        for idx, values in enumerate(data.values()):
            inputs[:, idx] = values.flatten()

        # Check for structured or unstructured
        if self.is_structured_meta_model:
            # Assign each row of the data coming in to a tuple. Loop through the tuple, and append
            # the name of the input and value.
            for idx, tup in enumerate(inputs):
                for name, val in zip(data.keys(), tup):
                    self.prob[self.meta_model.name + '.' + name] = val
                self.prob.run_model()
                # Append the predicted value(s)
                for title in self.output_names:
                    outputs[title].append(
                        np.array(self.prob[self.meta_model.name + '.' + title]))

        else:
            for idx, tup in enumerate(inputs):
                for name, val in zip(data.keys(), tup):
                    self.prob[self.meta_model.name + '.' + name] = val
                self.prob.run_model()
                for title in self.output_names:
                    outputs[title].append(
                        float(self.prob[self.meta_model.name + '.' + title]))

        return stack_outputs(outputs)

    def _contour_data_calcs(self):
        """
        Parse input data into a dictionary to be predicted at.

        Parameters
        ----------
        None

        Returns
        -------
        dict
            Dictionary of training data to be predicted at.
        """
        # Create initial data array of training points
        resolution = self.resolution
        x_data = np.zeros((resolution, resolution, self.num_inputs))

        self._slider_attrs()

        # Broadcast the inputs to every row of x_data array
        x_data[:, :, :] = np.array(self.input_point_list)

        # Find the x/y input titles and match their index positions
        for idx, (title, values) in enumerate(self.slider_source.data.items()):
            if title == self.x_input_select.value:
                self.xlins_mesh = values
                x_index_position = idx
            if title == self.y_input_select.value:
                self.ylins_mesh = values
                y_index_position = idx

        # Make meshgrid from the x/y inputs to be plotted
        X, Y = np.meshgrid(self.xlins_mesh, self.ylins_mesh)
        # Move the x/y inputs to their respective positions in x_data
        x_data[:, :, x_index_position] = X
        x_data[:, :, y_index_position] = Y

        pred_dict = {}
        for idx, title in enumerate(self.slider_source.data):
            pred_dict.update({title: x_data[:, :, idx]})

        return pred_dict

    def _contour_data(self):
        """
        Create a contour plot.

        Parameters
        ----------
        None

        Returns
        -------
        Bokeh Image Plot
        """
        resolution = self.resolution
        # Output data array initialization
        y_data = np.zeros((resolution, resolution, self.num_outputs))
        self.input_point_list = [point.value for point in self.slider_dict.values()]

        # Pass the dict to make predictions and then reshape the output to
        # (resolution, resolution, number of outputs)
        y_data[:, :, :] = self._make_predictions(self._contour_data_calcs()).reshape(
            (resolution, resolution, self.num_outputs))
        # Use the output variable to pull the correct column of data from the predicted
        # data (y_data)
        self.Z = y_data[:, :, self.output_variable]
        # Reshape it to be 2D
        self.Z = self.Z.reshape(resolution, resolution)

        # Update the data source with new data
        self.contour_plot_source.data = dict(z=[self.Z])

        # Min to max of training data
        self.contour_x_range = xlins = self.xlins_mesh
        self.contour_y_range = ylins = self.ylins_mesh

        # Color bar formatting
        color_mapper = LinearColorMapper(
            palette="Viridis11", low=np.amin(self.Z), high=np.amax(self.Z))
        color_bar = ColorBar(color_mapper=color_mapper, ticker=BasicTicker(), label_standoff=12,
                             location=(0, 0))

        # Contour Plot
        self.contour_plot = contour_plot = figure(
            match_aspect=False,
            tooltips=[(self.x_input_select.value, "$x"), (self.y_input_select.value, "$y"),
                      (self.output_select.value, "@z")], tools='')
        contour_plot.x_range.range_padding = 0
        contour_plot.y_range.range_padding = 0
        contour_plot.plot_width = 600
        contour_plot.plot_height = 500
        contour_plot.xaxis.axis_label = self.x_input_select.value
        contour_plot.yaxis.axis_label = self.y_input_select.value
        contour_plot.min_border_left = 0
        contour_plot.add_layout(color_bar, 'right')
        contour_plot.x_range = Range1d(min(xlins), max(xlins))
        contour_plot.y_range = Range1d(min(ylins), max(ylins))
        contour_plot.image(image='z', source=self.contour_plot_source, x=min(xlins), y=min(ylins),
                           dh=(max(ylins) - min(ylins)), dw=(max(xlins) - min(xlins)),
                           palette="Viridis11")

        # Adding training data points overlay to contour plot
        if self.is_structured_meta_model:
            data = self._structured_training_points()
        else:
            data = self._unstructured_training_points()

        if len(data):
            # Add training data points overlay to contour plot
            data = np.array(data)
            if self.is_structured_meta_model:
                self.contour_training_data_source.data = dict(x=data[:, 0], y=data[:, 1],
                                                              z=self.meta_model.training_outputs[
                                                              self.output_select.value].flatten())
            else:
                self.contour_training_data_source.data = dict(x=data[:, 0], y=data[:, 1],
                                                              z=self.meta_model._training_output[
                                                              self.output_select.value])

            training_data_renderer = self.contour_plot.circle(
                x='x', y='y', source=self.contour_training_data_source,
                size=5, color='white', alpha=0.50)

            self.contour_plot.add_tools(HoverTool(renderers=[training_data_renderer], tooltips=[
                (self.x_input_select.value + " (train)", '@x'),
                (self.y_input_select.value + " (train)", '@y'),
                (self.output_select.value + " (train)", '@z'), ]))

        return self.contour_plot

    def _right_plot(self):
        """
        Create the right side subplot to view the projected slice.

        Parameters
        ----------
        None

        Returns
        -------
        Bokeh figure
        """
        # List of the current positions of the sliders
        self.input_point_list = [point.value for point in self.slider_dict.values()]

        # Find the title of the y input and match it with the data
        y_idx = self.y_input_select.value
        y_data = self.predict_inputs[y_idx]
        # Find the position of the x_input slider
        x_value = self.x_input_slider.value

        # Rounds the x_data to match the predict_inputs value
        subplot_value_index = np.where(
            np.around(self.predict_inputs[self.x_input_select.value], 5) ==
            np.around(x_value, 5))[0]

        # Make slice in Z data at the point calculated before and add it to the data source
        z_data = self.Z[:, subplot_value_index].flatten()

        x = z_data
        y = self.slider_source.data[y_idx]

        # Update the data source with new data
        self.right_plot_source.data = dict(x=x, y=y)

        # Create and format figure
        self.right_plot_fig = right_plot_fig = figure(
            plot_width=250, plot_height=500,
            title="{} vs {}".format(y_idx, self.output_select.value), tools="pan")
        right_plot_fig.xaxis.axis_label = self.output_select.value
        right_plot_fig.yaxis.axis_label = y_idx
        right_plot_fig.xaxis.major_label_orientation = math.pi / 9
        right_plot_fig.line(x='x', y='y', source=self.right_plot_source)
        right_plot_fig.x_range.range_padding = 0.1
        right_plot_fig.y_range.range_padding = 0.02

        # Determine distance and alpha opacity of training points
        if self.is_structured_meta_model:
            data = self._structured_training_points(compute_distance=True, source='right')
        else:
            data = self._unstructured_training_points(compute_distance=True, source='right')

        self.right_alphas = 1.0 - data[:, 2] / self.dist_range

        # Training data scatter plot
        scatter_renderer = right_plot_fig.scatter(x=data[:, 3], y=data[:, 1], line_color=None,
                                                  fill_color='#000000',
                                                  fill_alpha=self.right_alphas.tolist())

        right_plot_fig.add_tools(HoverTool(renderers=[scatter_renderer], tooltips=[
            (self.output_select.value + " (train)", '@x'),
            (y_idx + " (train)", '@y'),
        ]))
        right_plot_fig.scatter(x=data[:, 3], y=data[:, 1], line_color=None, fill_color='#000000',
                               fill_alpha=self.right_alphas.tolist())

        span_width = self.dist_range * (max(y_data) - min(y_data))

        # Set the right_plot data source to new values
        self.right_plot_scatter_source.data = dict(
            right_slice_x=np.repeat(x_value, self.resolution), right_slice_y=y_data,
            left_dashed=[i - span_width for i in np.repeat(x_value, self.resolution)],
            right_dashed=[i + span_width for i in np.repeat(x_value, self.resolution)])

        self.contour_plot.line(
            'right_slice_x', 'right_slice_y', source=self.right_plot_scatter_source,
            color='black', line_width=2)
        self.contour_plot.line(
            'left_dashed', 'right_slice_y', line_dash='dashed',
            source=self.right_plot_scatter_source, color='black', line_width=2)
        self.contour_plot.line(
            'right_dashed', 'right_slice_y', line_dash='dashed',
            source=self.right_plot_scatter_source, color='black', line_width=2)

        return self.right_plot_fig

    def _bottom_plot(self):
        """
        Create the bottom subplot to view the projected slice.

        Parameters
        ----------
        None

        Returns
        -------
        Bokeh figure
        """
        # List of the current positions of the sliders
        self.input_point_list = [point.value for point in self.slider_dict.values()]

        # Find the title of the x input and match it with the data
        x_idx = self.x_input_select.value
        x_data = self.predict_inputs[x_idx]
        # Find the position of the y_input slider
        y_value = self.y_input_slider.value

        # Rounds the y_data to match the predict_inputs value
        subplot_value_index = np.where(
            np.around(self.predict_inputs[self.y_input_select.value], 5) ==
            np.around(y_value, 5))[0]

        # Make slice in Z data at the point calculated before and add it to the data source
        z_data = self.Z[subplot_value_index, :].flatten()

        x = self.slider_source.data[x_idx]
        y = z_data

        # Update the data source with new data
        self.bottom_plot_source.data = dict(x=x, y=y)

        # Create and format figure
        self.bottom_plot_fig = bottom_plot_fig = figure(
            plot_width=550, plot_height=250,
            title="{} vs {}".format(x_idx, self.output_select.value), tools="")
        bottom_plot_fig.xaxis.axis_label = x_idx
        bottom_plot_fig.yaxis.axis_label = self.output_select.value
        bottom_plot_fig.line(x='x', y='y', source=self.bottom_plot_source)
        bottom_plot_fig.x_range.range_padding = 0.02
        bottom_plot_fig.y_range.range_padding = 0.1

        # Determine distance and alpha opacity of training points
        if self.is_structured_meta_model:
            data = self._structured_training_points(compute_distance=True)
        else:
            data = self._unstructured_training_points(compute_distance=True)

        self.bottom_alphas = 1.0 - data[:, 2] / self.dist_range

        # Training data scatter plot
        scatter_renderer = bottom_plot_fig.scatter(x=data[:, 0], y=data[:, 3], line_color=None,
                                                   fill_color='#000000',
                                                   fill_alpha=self.bottom_alphas.tolist())

        bottom_plot_fig.add_tools(HoverTool(renderers=[scatter_renderer], tooltips=[
            (x_idx + " (train)", '@x'),
            (self.output_select.value + " (train)", '@y'),
        ]))

        span_width = self.dist_range * (max(x_data) - min(x_data))

        # Set the right_plot data source to new values
        self.bottom_plot_scatter_source.data = dict(
            bot_slice_x=x_data, bot_slice_y=np.repeat(y_value, self.resolution),
            upper_dashed=[i + span_width for i in np.repeat(y_value, self.resolution)],
            lower_dashed=[i - span_width for i in np.repeat(y_value, self.resolution)])

        self.contour_plot.line(
            'bot_slice_x', 'bot_slice_y', source=self.bottom_plot_scatter_source, color='black',
            line_width=2)
        self.contour_plot.line(
            'bot_slice_x', 'upper_dashed', line_dash='dashed',
            source=self.bottom_plot_scatter_source, color='black', line_width=2)
        self.contour_plot.line(
            'bot_slice_x', 'lower_dashed', line_dash='dashed',
            source=self.bottom_plot_scatter_source, color='black', line_width=2)

        return self.bottom_plot_fig

    def _unstructured_training_points(self, compute_distance=False, source='bottom'):
        """
        Calculate the training points and returns and array containing the position and alpha.

        Parameters
        ----------
        compute_distance : bool
            If true, compute the distance of training points from surrogate line.
        source : str
            Which subplot the method is being called from.

        Returns
        -------
        array
            The array of training points and their alpha opacity with respect to the surrogate line
        """
        # Input training data and output training data
        x_training = self.meta_model._training_input
        training_output = np.squeeze(stack_outputs(self.meta_model._training_output), axis=1)

        # Index of input/output variables
        x_index = self.x_input_select.options.index(self.x_input_select.value)
        y_index = self.y_input_select.options.index(self.y_input_select.value)
        output_variable = self.output_names.index(self.output_select.value)

        # Vertically stack the x/y inputs and then transpose them
        infos = np.vstack((x_training[:, x_index], x_training[:, y_index])).transpose()
        if not compute_distance:
            return infos

        points = x_training.copy()

        # Normalize so each dimension spans [0, 1]
        points = np.divide(points, self.limit_range)
        dist_limit = np.linalg.norm(self.dist_range * self.limit_range)
        scaled_x0 = np.divide(self.input_point_list, self.limit_range)

        # Query the nearest neighbors tree for the closest points to the scaled x0 array
        # Nearest points to x slice
        if x_training.shape[1] < 3:

            tree = cKDTree(points)
            # Query the nearest neighbors tree for the closest points to the scaled x0 array
            dists, idxs = tree.query(
                scaled_x0, k=len(x_training), distance_upper_bound=self.dist_range)

            # kdtree query always returns requested k even if there are not enough valid points
            idx_finite = np.where(np.isfinite(dists))
            dists = dists[idx_finite]
            idxs = idxs[idx_finite]

        else:
            dists, idxs = self._multidimension_input(scaled_x0, points, source=source)

        # data contains:
        # [x_value, y_value, ND-distance, func_value]

        data = np.zeros((len(idxs), 4))
        for dist_index, j in enumerate(idxs):
            data[dist_index, 0:2] = infos[j, :]
            data[dist_index, 2] = dists[dist_index]
            data[dist_index, 3] = training_output[j, output_variable]

        return data

    def _structured_training_points(self, compute_distance=False, source='bottom'):
        """
        Calculate the training points and return an array containing the position and alpha.

        Parameters
        ----------
        compute_distance : bool
            If true, compute the distance of training points from surrogate line.
        source : str
            Which subplot the method is being called from.

        Returns
        -------
        array
            The array of training points and their alpha opacity with respect to the surrogate line
        """
        # Create tuple of the input parameters
        input_dimensions = tuple(self.meta_model.inputs)

        # Input training data and output training data
        x_training = np.array([z for z in product(*input_dimensions)])
        training_output = self.meta_model.training_outputs[self.output_select.value].flatten()

        # Index of input/output variables
        x_index = self.x_input_select.options.index(self.x_input_select.value)
        y_index = self.y_input_select.options.index(self.y_input_select.value)

        # Vertically stack the x/y inputs and then transpose them
        infos = np.vstack((x_training[:, x_index], x_training[:, y_index])).transpose()
        if not compute_distance:
            return infos

        points = x_training.copy()

        # Normalize so each dimension spans [0, 1]
        points = np.divide(points, self.limit_range)
        self.dist_limit = np.linalg.norm(self.dist_range * self.limit_range)
        scaled_x0 = np.divide(self.input_point_list, self.limit_range)
        # Query the nearest neighbors tree for the closest points to the scaled x0 array
        # Nearest points to x slice

        if x_training.shape[1] < 3:
            x_tree, x_idx = self._two_dimension_input(scaled_x0, points, source=source)
        else:
            x_tree, x_idx = self._multidimension_input(scaled_x0, points, source=source)

        # format for 'data'
        # [x_value, y_value, ND-distance_(x or y), func_value]

        n = len(x_tree)
        data = np.zeros((n, 4))
        for dist_index, j in enumerate(x_idx):
            data[dist_index, 0:2] = infos[j, :]
            data[dist_index, 2] = x_tree[dist_index]
            data[dist_index, 3] = training_output[j]

        return data

    def _two_dimension_input(self, scaled_points, training_points, source='bottom'):
        """
        Calculate the distance of training points to the surrogate line.

        Parameters
        ----------
        scaled_points : array
            Array of normalized slider positions.
        training_points : array
            Array of input training data.
        source : str
            Which subplot the method is being called from.

        Returns
        -------
        idxs : array
            Index of closest points that are within the dist range.
        x_tree : array
            One dimentional array of points that are within the dist range.
        """
        # Column of the input
        if source == 'right':
            col_idx = self.y_input_select.options.index(self.y_input_select.value)
        else:
            col_idx = self.x_input_select.options.index(self.x_input_select.value)

        # Delete the axis of input from source to predicted 1D distance
        x = np.delete(scaled_points, col_idx, axis=0)
        x_training_points = np.delete(training_points, col_idx, axis=1).flatten()

        # Tree of point distances
        x_tree = np.abs(x - x_training_points)

        # Only return points that are within our distance-viewing paramter.
        idx = np.where(x_tree <= self.dist_range)
        x_tree = x_tree[idx]
        return x_tree, idx[0]

    def _multidimension_input(self, scaled_points, training_points, source='bottom'):
        """
        Calculate the distance of training points to the surrogate line.

        Parameters
        ----------
        scaled_points : array
            Array of normalized slider positions.
        training_points : array
            Array of input training data.
        source : str
            Which subplot the method is being called from.

        Returns
        -------
        idxs : array
            Index of closest points that are within the dist range.
        x_tree : array
            Array of points that are within the dist range.
        """
        # Column of the input
        if source == 'right':
            col_idx = self.y_input_select.options.index(self.y_input_select.value)

        else:
            col_idx = self.x_input_select.options.index(self.x_input_select.value)

        # Delete the axis of input from source to predicted distance
        x = np.delete(scaled_points, col_idx, axis=0)
        x_training_points = np.delete(training_points, col_idx, axis=1)

        # Tree of point distances
        x_tree = cKDTree(x_training_points)

        # Query the nearest neighbors tree for the closest points to the scaled array
        dists, idx = x_tree.query(x, k=len(x_training_points),
                                  distance_upper_bound=self.dist_range)

        # kdtree query always returns requested k even if there are not enough valid points
        idx_finite = np.where(np.isfinite(dists))
        dists_finite = dists[idx_finite]
        idx = idx[idx_finite]
        return dists_finite, idx

    # Event handler functions
    def _update_all_plots(self):
        self.doc_layout.children[0] = self._contour_data()
        self.doc_layout.children[1] = self._right_plot()
        self.doc_layout2.children[0] = self._bottom_plot()

    def _update_subplots(self):
        self.doc_layout.children[1] = self._right_plot()
        self.doc_layout2.children[0] = self._bottom_plot()

    def _update(self, attr, old, new):
        self._update_all_plots()

    def _scatter_plots_update(self, attr, old, new):
        self._update_subplots()

    def _scatter_input(self, attr, old, new):
        # Text input update function of dist range value
        self.dist_range = float(new)
        self._update_all_plots()

    def _x_input_update(self, attr, old, new):
        # Checks that x and y inputs are not equal to each other
        if new == self.y_input_select.value:
            raise ValueError("Inputs should not equal each other")
        else:
            self.x_input_select.value = new
            self._update_all_plots()

    def _y_input_update(self, attr, old, new):
        # Checks that x and y inputs are not equal to each other
        if new == self.x_input_select.value:
            raise ValueError("Inputs should not equal each other")
        else:
            self.y_input_select.value = new
            self._update_all_plots()

    def _output_value_update(self, attr, old, new):
        self.output_variable = self.output_names.index(new)
        self._update_all_plots()
コード例 #12
0
# initialize controls
# buttons for choosing a sample function
sample_function_type = RadioButtonGroup(labels=fs.function_names, active=fs.function_init)

# here one can choose arbitrary input function
default_function_input = TextInput(value=fs.function_input_init)
default_function_period_start = TextInput(title='period start', value=fs.timeinterval_start_init)
default_function_period_end = TextInput(title='period end', value=fs.timeinterval_end_init)

# slider controlling degree of the fourier series
degree = Slider(title="degree", name='degree', value=fs.degree_init, start=fs.degree_min,
                end=fs.degree_max, step=fs.degree_step)

# initialize callback behaviour
degree.on_change('value', degree_change)
default_function_input.on_change('value',
                                 type_input_change)  # todo write default functions for any callback, like above
default_function_period_start.on_change('value', type_input_change)
default_function_period_end.on_change('value', type_input_change)
sample_function_type.on_change('active', type_input_change)

# initialize plot
toolset = "crosshair,pan,reset,resize,save,wheel_zoom"
# Generate a figure container
plot = Figure(plot_height=fs.resolution,
              plot_width=fs.resolution,
              tools=toolset,
              title="Fourier Series Approximation",
              x_range=[fs.x_min, fs.x_max],
              y_range=[fs.y_min, fs.y_max]
              )
# Plot the line by the x,y values in the source property
コード例 #13
0
	def plotting(self):



		#Tools = [hover, TapTool(), BoxZoomTool(), BoxSelectTool(), PreviewSaveTool(), ResetTool()]
		TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,hover,previewsave"


		tab_plots = []
		#output_file("test.html")
		self.all_elements = []
		self.elements_comparison = []

		for attr_id, i in zip(self.attribute_ids, range(len(self.attribute_ids))):
			
			"""
			create plots for each datafile and put them in a tab.
			"""
			list_of_datasets = getattr(self, attr_id)
			y_axis_units = [x["y_unit"] for x in list_of_datasets]
			x_axis_units = [x["x_unit"] for x in list_of_datasets]

			figure_obj = figure(plot_width = 1000, plot_height = 800, y_axis_type = "log",
			title = attr_id, tools = TOOLS)
			#figure_obj.axes.major_label_text_font_size("12pt")
			#figure_obj.major_label_text_font_size("12pt")
			
			setattr(self, attr_id+"_"+"figure_obj",figure_obj)

			figure_obj.yaxis.axis_label = y_axis_units[0]
			figure_obj.xaxis.axis_label = x_axis_units[0]

			if not all(x == y_axis_units[0] for x in y_axis_units):
				for unit, data in zip(y_axis_units, list_of_datasets): 
					if not unit == y_axis_units[0]:
						figure_obj.extra_y_ranges =  {"foo": Range1d(start = np.amin(data["data"]["y"]),
						end = np.amax(data["data"]["y"]))}
						figure_obj.add_layout(LogAxis(y_range_name = "foo", axis_label = unit), "right")
						break

			if not all(x == x_axis_units[0] for x in x_axis_units):
				for unit, data in zip(x_axis_units, list_of_datasets): 
					if not unit == x_axis_units[0]:
						figure_obj.extra_x_ranges =  {"bar": Range1d(start = np.amin(data["data"]["x"]),
						end = np.amax(data["data"]["x"]))}
						figure_obj.add_layout(LinearAxis(x_range_name = "bar", axis_label = unit), "above")
						break



			figure_obj.xaxis.axis_label = list_of_datasets[0]["x_unit"]
			colour_list = Spectral11 + RdPu9 + Oranges9
			colour_indices = [0, 2, 8, 10, 12, 14, 20, 22, 1, 3, 9, 11, 13, 15]

			list_of_elements = []

			for dataset, color_index in zip(list_of_datasets, colour_indices):

				self.all_elements.append(dataset["sample element"]) #strip isotope number 
				color = colour_list[color_index]

				source = ColumnDataSource(data = dataset["data"]) #Datastructure for source of plotting

				setattr(self, attr_id+"_"+dataset["sample element"]+"_source", source) #Source element generalized for all plotting				


				list_of_elements.append(dataset["sample element"])

				figure_obj.line("x", "y", source = getattr(self, attr_id+"_"+dataset["sample element"]
								+"_source"), line_width = 2, line_color = color, 
								legend = dataset["sample element"], name = dataset["sample element"],
								 )

			hover = figure_obj.select_one(HoverTool).tooltips = [("element", "@element"), ("(x,y)", "($x, $y)")]

			radio_group = RadioGroup(labels = list_of_elements, active=0)

			"""
			Need to fetch default variables from input file and replace DEFAULT

			Block of code produces the layout of buttons and callbacks
			"""

			
			#Calculations on the dataset
			text_input_rsf = TextInput(value = "default", title = "RSF (at/cm^3): ")
			do_integral_button = Button(label = "Calibration Integral")
			smoothing_button = Button(label = "Smoothing on selected curve")

			text_input_sputter = TextInput(value = "default", title = "Sputter speed: float unit")
			text_input_crater_depth = TextInput(value = "default", title = "Depth of crater in: float")
			

			radio_group.on_change("active", lambda attr, old, new: None)

			text_input_xval_integral = TextInput(value = "0", title = "x-value for calibration integral ")
			text_input_yval_integral = TextInput(value = "0", title = "y-value for calibration integral ")

			#Save files for later use
			save_flexDPE_button = Button(label = "Save element for FlexPDE")
			save_all_flexDPE_button = Button(label = "Save all elements for FlexPDE")


			#Pointers to methods on click / change handlers
			do_integral_button.on_click(lambda identity = self.attribute_ids[i], radio = radio_group, 
										x_box = text_input_xval_integral, y_box = text_input_yval_integral: 
										self.integrate(identity, radio, x_box, y_box))

			smoothing_button.on_click(lambda identity = self.attribute_ids[i], radio = radio_group: 
									self.smoothing(identity, radio) )

			save_flexDPE_button.on_click(lambda identity = self.attribute_ids[i], radio = radio_group: 
										self.write_to_flexPDE(identity, radio))

			save_all_flexDPE_button.on_click(lambda identity = self.attribute_ids[i], radio = radio_group:
											self.write_all_to_flexPDE(identity, radio))

			text_input_rsf.on_change("value", lambda attr, old, new, radio = radio_group, 
								identity = self.attribute_ids[i], text_input = text_input_rsf, which = "rsf":
								self.update_data(identity, radio, text_input, new, which))


			text_input_sputter.on_change("value", lambda attr, old, new, radio = radio_group, 
								identity = self.attribute_ids[i], text_input = text_input_sputter, which = "sputter":
								self.update_data(identity, radio, text_input, new, which))

			text_input_crater_depth.on_change("value", lambda attr, old, new, radio = radio_group, 
								identity = self.attribute_ids[i], text_input = text_input_crater_depth, which = "crater_depth":
								self.update_data(identity, radio, text_input, new, which))


			#Initialization of actual plotting. 
			tab_plots.append(Panel(child = hplot(figure_obj, 
										   vform(radio_group, save_flexDPE_button, save_all_flexDPE_button), 
										   vform(text_input_rsf, smoothing_button, text_input_sputter, text_input_crater_depth),
										   vform(text_input_xval_integral, text_input_yval_integral, do_integral_button)),
										   title = attr_id))


		"""
		Check to see if one or more element exists in the samples and creat a comparison plot for each 
		of those elements.
		"""
		
		for element in self.all_elements:
			checkers = list(self.all_elements)
			checkers.remove(element)
			if element in checkers and not element in self.elements_comparison:
				self.elements_comparison.append(element)

		"""create plots for each element that is to be compared """
	
		for comparison_element in self.elements_comparison: 

			colour_list = Spectral11 + RdPu9 + Oranges9
			colour_indices = [0, 2, 8, 10, 12, 14, 20, 22, 1, 3, 9, 11, 13, 15]
			figure_obj = figure(plot_width = 1000, plot_height = 800, y_axis_type = "log", title = comparison_element, tools = TOOLS)
			#figure_obj.xaxis.major_label_text_font_size("12pt")
			#figure_obj.yaxis.major_label_text_font_size("12pt")
			

			y_axis_units = []
			x_axis_units = []

			comparison_datasets = []


			for attr_id, color_index in zip(self.attribute_ids, colour_indices):

				list_of_datasets = getattr(self, attr_id)

				for dataset in list_of_datasets:

					if dataset["sample element"] == comparison_element:
						comparison_datasets.append(dataset)
						y_axis_units.append(dataset["y_unit"])
						x_axis_units.append(dataset["x_unit"])

			figure_obj.xaxis.axis_label = comparison_datasets[-1]["x_unit"]
			figure_obj.yaxis.axis_label = comparison_datasets[-1]["y_unit"]

			if not all(x == y_axis_units[-1] for x in y_axis_units):
				for unit, data in zip(y_axis_units, comparison_datasets): 
					if not unit == y_axis_units[-1]:
						figure_obj.extra_y_ranges =  {"foo": Range1d(start = np.amin(data["data"]["y"]),
						end = np.amax(data["data"]["y"]))}
						figure_obj.add_layout(LogAxis(y_range_name = "foo", axis_label = unit), "right")
						break

			if not all(x == x_axis_units[-1] for x in x_axis_units):
				for unit, data in zip(x_axis_units, comparison_datasets): 
					if not unit == x_axis_units[-1]:
						figure_obj.extra_x_ranges =  {"bar": Range1d(start = np.amin(data["data"]["x"]),
						end = np.amax(data["data"]["x"]))}
						figure_obj.add_layout(LinearAxis(x_range_name = "bar", axis_label = unit), "above")
						break


			for attr_id, color_index in zip(self.attribute_ids, colour_indices):

				list_of_datasets = getattr(self, attr_id)

				for dataset in list_of_datasets:

					if dataset["sample element"] == comparison_element:
						color = colour_list[color_index]

						"""
						Logic that ensures that plots get put with correspoinding axes. 
						"""
						if dataset["x_unit"] != x_axis_units[-1] or dataset["y_unit"] != y_axis_units[-1]:

							if dataset["x_unit"] != x_axis_units[-1] and dataset["y_unit"] != y_axis_units[-1]:

								figure_obj.line("x", "y", source = getattr(self, attr_id+"_"+dataset["sample element"]+"_source"), line_width = 2, 
								line_color = color, legend = attr_id, x_range_name = "bar", y_range_name = "foo")

							elif dataset["x_unit"] != x_axis_units[-1]:

								figure_obj.line("x", "y", source = getattr(self, attr_id+"_"+dataset["sample element"]+"_source"), line_width = 2, 
								line_color = color, legend = attr_id, x_range_name = "bar")

							else: 

								figure_obj.line("x", "y", source = getattr(self, attr_id+"_"+dataset["sample element"]+"_source"), line_width = 2, 
								line_color = color, legend = attr_id, y_range_name = "foo")

						else: 
							figure_obj.line("x", "y", source = getattr(self, attr_id+"_"+dataset["sample element"]+"_source"), line_width = 2, 
							line_color = color, legend = attr_id)
						


			tab_plots.append(Panel(child = figure_obj, title = comparison_element))	

		tabs = Tabs(tabs = tab_plots)

		session = push_session(curdoc())
		session.show()
		session.loop_until_closed()
コード例 #14
0
ファイル: sliders_app.py プロジェクト: rbtr/bokeh
        "PARAMS: offset: %s amplitude: %s", offset.value,
        amplitude.value
    )

    source.data = dict(x=x, y=y)

update_data()

def input_change(attrname, old, new):
    """Executes whenever the input form changes.

    It is responsible for updating the plot, or anything else you want.

    Args:
        attrname : the attr that changed
        old : old value of attr
        new : new value of attr
        """
    update_data()
    plot.title = text.value

# Text box event registration
text.on_change('value', input_change)

# Slider event registration
for w in [offset, amplitude, phase, freq]:
    w.on_change('value', input_change)

# put ourselves in the document
curdoc().add(hbox)
コード例 #15
0
ファイル: main.py プロジェクト: amcdawes/coincidence
# other widgets (not all are used yet)
phase = Slider(title="phase", value=0.0, start=0.0, end=5.0, step=0.1)
points = Slider(title="data points", value=20, start=0, end=500, step=1)
statsA = Paragraph(text="100", width=400, height=40)
statsB = Paragraph(text="100", width=400, height=40)
g2 = Paragraph(text="100", width=400, height=80)
g2_2d = Paragraph(text="100", width=400, height=40)


# Set up callbacks
def send_command(attrname, old, new):
    # not implemented yet
    # TODO turn into a raw command area for sending any device command
    plot.title.text = command.value

command.on_change('value', send_command)

last_time = time.time()

# start out keeping 20 data points
datapoints = 20

def update_data():
    # TODO: store data in a stream for charting vs time
    # this function is called every 100 ms (set below if you want to change it)

    # keep track of time interval for accurate counting calculations
    global last_time
    T = time.time() - last_time
    last_time = time.time()
    #print(T)
コード例 #16
0
ファイル: main.py プロジェクト: priya-vijay/NicheLife
text = TextInput(title="Map Name", value="NicheLife Map")
feature1 = Slider(title="Subway Accessibility", value=0.5, start=0, end=1, step=0.1)
feature2 = Slider(title="Safety", value=0.5, start=0, end=1, step=0.1)
feature3 = Slider(title="Public Satisfaction", value=0.5, start=0, end=1, step=0.1)
feature4 = Slider(title="Restaurants", value=0.5, start=0, end=1, step=0.1)
feature5 = Slider(title="Grocery Stores", value=0.5, start=0, end=1, step=0.1)
feature6 = Slider(title="Nightlife", value=0.5, start=0, end=1, step=0.1)
price = Select(title="Show Affordability", options=["Yes", "No"])


# Set up callbacks
def update_title(attrname, old, new):
    p.title = text.value


text.on_change("value", update_title)


def update_data(attrname, old, new):
    # Get the current slider values
    f1user = feature1.value
    f2user = feature2.value
    f3user = feature3.value
    f4user = feature4.value
    f5user = feature5.value
    f6user = feature6.value
    showprice = price.value

    # Calculate score based on user input
    qivals = getscore([f1user, f2user, f3user, f4user, f5user, f6user])
コード例 #17
0
                       value=10,
                       step=1)
salary_slider.on_change('value', lambda attr, old, new: update())

city = Select(title="Location",
              value="All",
              options=open('city.txt').read().split('\n'))
city.on_change('value', lambda attr, old, new: update())

company = Select(title="Company",
                 value="All",
                 options=open('company.txt').read().split('\n'))
company.on_change('value', lambda attr, old, new: update())

keyword = TextInput(title="Keywords")
keyword.on_change('value', lambda attr, old, new: update())


def select_jobs():
    company_val = str(company.value)
    city_val = str(city.value)
    kwd_val = str(keyword.value)

    selected = data[data.Salary_Upper_Bound >= salary_slider.value]

    if (company_val != "All"):
        selected = selected[selected.Company.str.contains(company_val) == True]
    if (city_val != "All"):
        selected = selected[selected.City.str.contains(city_val) == True]
    if (keyword != ""):
        selected = selected[selected.Description.str.contains(kwd_val) == True]
コード例 #18
0
ファイル: main.py プロジェクト: alxsoares/zjsxzy_in_js
    # 绝对值曲线
    source_absolute.data = source_absolute.from_df(
        pd.DataFrame({'ts': dataframe['absolute']}))

    # 词表格
    data = pd.read_csv("%s/%s_%s_words.csv" % (DATA_DIR, word, threshold))
    source_table.data = {'word': data['word'], 'distance': data['distance']}


years_selections = [str(year) for year in range(2010, 2018)]
year_select = Select(value="2013",
                     title="开始年份",
                     width=200,
                     options=years_selections)
year_select.on_change("value", lambda attr, old, new: update_data())
slider = TextInput(title="阈值", value="0.3")
# slider = Slider(title="阈值", start=0.0, end=1.0, value=0.3, step=0.1)
slider.on_change('value', lambda attr, old, new: update_data())
text = TextInput(title="关键词(例如:MPA、房地产、通胀)", value=u'楼市')
text.on_change('value', lambda attr, old, new: update_data())

update_data()

# Set up layouts and add to document
inputs = widgetbox(text, slider, year_select)
table = widgetbox(data_table)

curdoc().add_root(
    row(inputs, table, plot_absolute, plot, plot_weighted, width=800))
curdoc().title = u"关键词历史热度"
コード例 #19
0
class Market():
    ###########################################################################
    # PLOT WIDGET
    ###########################################################################
    # +----------------------------+
    # | +-----------+ +----------+ |
    # | | start_btn | | end_btn  | |
    # | +-----------+ +----------+ |
    # +----------------------------+
    # |                            |
    # |          plot              |
    # |                            |
    # +----------------------------+
    ###########################################################################

    def __init__(self,
                 start=(date.today() - relativedelta(years=3)),
                 end=date.today()):
        self.start = str(start)
        self.end = str(end)
        self.sp500 = core.market.SP500('dataset/tickers.h5')
        symbols = self.sp500.store['info']['symbol'].values.tolist()
        # log.info("{}".format(symbols))
        # Select tick button
        title = "Company tick"
        self.select_tick = Select(title=title,
                                  value=symbols[0],
                                  options=symbols)
        # Inputs buttons
        self.start_date = TextInput(value=self.start, title='start')
        self.end_date = TextInput(value=self.end, title='end')
        # layout
        self.plot_layout = self.candle_plot(symbols[0])

        self.start_date.on_change('value', self.on_start_date_change)
        self.end_date.on_change('value', self.on_end_date_change)
        self.select_tick.on_change('value', self.on_tick_selection_change)
        self.layout = layout(
            [[self.select_tick, self.start_date, self.end_date],
             [self.plot_layout]])

    def candle_plot(self, ticker, index_name='date'):
        self.df = self.sp500.get_ticker_stocks(ticker, self.start, self.end)
        self.df = plot.normalize_name(self.df)
        self.df = self.df.set_index(index_name)
        index = self.df.index.get_level_values(index_name)
        stock_data = {index_name: index}

        for val in self.df.columns.values:
            stock_data[val] = self.df[val]
        # log.info(stock_data)

        source = ColumnDataSource(data=dict(stock_data))
        hover = HoverTool(tooltips=[('date', '@date{%F}'),
                                    ('adj close', '$@adj_close{%0.2f}'),
                                    ('adj open', '$@adj_open{%0.2f}'),
                                    ('volume', '@volume{0.00 a}'),
                                    ('open', '@open{%0.2f}'),
                                    ('close', '@close{%0.2f}')],
                          formatters={
                              'date': 'datetime',
                              'adj_close': 'printf',
                              'adj_open': 'printf',
                              'open': 'printf',
                              'close': 'printf'
                          },
                          mode='vline')
        inc = self.df['close'] > self.df['open']
        dec = self.df['open'] > self.df['close']
        w = 12 * 60 * 60 * 1000  # half day in ms
        p = figure(x_axis_type="datetime",
                   plot_width=1600,
                   title=ticker,
                   tools="xwheel_zoom, xpan, reset, save",
                   active_drag="xpan")

        p.add_tools(hover)
        p.toolbar.logo = None
        p.grid.grid_line_alpha = 0.3
        p.xaxis.major_label_orientation = pi / 4
        p.xaxis.axis_label = 'Date'
        p.yaxis.axis_label = 'Price'

        p.line(index_name, 'adj_close', color='#A6CEE3', source=source)
        p.line(index_name, 'adj_open', color='#FB9A99', source=source)
        p.segment(index_name,
                  'high',
                  index_name,
                  'low',
                  color="white",
                  source=source)
        p.vbar(index[inc],
               w,
               self.df['open'][inc],
               self.df['close'][inc],
               fill_color="#D5E1DD",
               line_color="white")
        p.vbar(index[dec],
               w,
               self.df['open'][dec],
               self.df['close'][dec],
               fill_color="#F2583E",
               line_color="white")
        p.legend.location = "top_left"
        p.background_fill_color = "black"
        columns = [
            TableColumn(field="date", title="date", formatter=DateFormatter())
        ]
        for key in stock_data.keys():
            if key != 'date':
                columns.append(TableColumn(field=key, title=key))

        # Layout
        return column(p, DataTable(source=source, columns=columns, width=1600))

    # Callbacks
    def on_tick_selection_change(self, attr, old, new):
        log.debug('VALUE: old {} | new {}'.format(old, new))
        self.layout.children[1] = self.candle_plot(new)

    def on_start_date_change(self, attr, old, new):
        log.debug('VALUE: old {} | new {}'.format(old, new))
        self.start = new
        self.layout.children[1] = self.candle_plot(self.select_tick.value)

    def on_end_date_change(self, attr, old, new):
        log.debug('VALUE: old {} | new {}'.format(old, new))
        self.end = new
        self.layout.children[1] = self.candle_plot(self.select_tick.value)
コード例 #20
0
ファイル: pyTree.py プロジェクト: GianArauz/pyTree
    # Filtering both DataFrames and creating the CDSs in this callback
    cb_cds = ColumnDataSource(df[μ].copy())

    # Updating old CDSs data with new CDSs data
    cds.data.update(cb_cds.data)

    # Creating title with the number filtered-in trees of the given species
    fig.title.text = f'There are {trees} {species} in Barcelona.'


# %% ATTACHING BOKEH CALLBACKS ================================================
# =============================================================================

# Attaching the Text Input widget to the Text Input callback
ti_wg.on_change('value', ti_callback)

# %% CREATING BOKEH LAYOUT ====================================================
# =============================================================================

# Creating a layout combining the slider and the figure
layout = column(ti_wg, fig)

# Adding the layout to the current document
curdoc().add_root(layout)

# Defining a title for our current document
curdoc().title = 'pyTree dashboard'

# %% INVOKING BOKEH SERVER ====================================================
# =============================================================================
コード例 #21
0
ファイル: main.py プロジェクト: guillochon/catexplorer
        p.y_range.start = max(nys) + y_buf
        p.y_range.end = min(nys) - y_buf

    mld = {}
    mldf = {}
    for nd in nds:
        mld.setdefault('xs',[]).append(nd['x'] if len(nd['x']) else [0.0])
        mld.setdefault('ys',[]).append(nd['y'] if len(nd['y']) else [0.0])
        mld.setdefault('colors',[]).append(plotting.bandcolorf(nd['band']) if len(nd['x']) else 'white')
        mld.setdefault('lws',[]).append(2)

        mldf.setdefault('xs',[]).extend(nd['x'] if len(nd['x']) else [])
        mldf.setdefault('ys',[]).extend(nd['y'] if len(nd['y']) else [])
        mldf.setdefault('colors',[]).extend([plotting.bandcolorf(nd['band']) for x in range(len(nd['x']))])
        
    mlobs.data_source.data = mld
    circobs.data_source.data = mldf

def bandcb(attrname, old, new):
    callback()

def namecb(attrname, old, new):
    callback()

namefield.on_change('value', namecb)
bandfield.on_change('value', bandcb)
button.on_click(callback)

# put the button and plot in a layout and add to the document
curdoc().add_root(column(p, row(namefield, bandfield), button))
コード例 #22
0
ファイル: stock-test.py プロジェクト: sersei/Log-Plot
toolset = "box_zoom,resize,pan,reset,save,xwheel_zoom"
plot_zoomed = Figure(plot_width=1000, plot_height=500, x_axis_type="datetime",tools=toolset, lod_factor=100,lod_interval=1000)
plot_zoomed.line('x', 'y',source=source_zoomed, color='navy', alpha=0.5)
plot = Figure(plot_width=PLOT_WIDTH, plot_height=250, x_axis_type="datetime",toolbar_location=None,lod_factor=100,lod_interval=1000)
plot.line('x', 'y',source=source_small, color='navy', alpha=0.5)
plot.y_range.start=Min_Y
plot.y_range.end=Max_Y
glyph = Quad(left="left", right="right", top="top", bottom="bottom", fill_color="#b3de69", fill_alpha=0.1)
plot.add_glyph(sourceQuad, glyph)

RangeStartX=0
RangeEndX=1
text_start_x = TextInput(title="X range start", name='x_range_start', value="0")
text_end_x = TextInput(title="X range end", name='x_range_end', value="1")
text_start_x.on_change('value', update_data)
text_end_x.on_change('value', update_data)

toolset = "box_zoom,resize,pan,reset,save,x_wheel_zoom"

plot_zoomed.x_range.callback = CustomJS(args=dict(xrange=plot_zoomed.x_range,start_x=text_start_x,end_x=text_end_x),code="""
var start = xrange.get("start");
var end = xrange.get("end");
start_x.set("value",start.toString());
end_x.set("value",end.toString());
start_x.trigger('change');
end_x.trigger('change');
""")

curdoc().add_root(VBoxForm(children=[plot_zoomed,plot], width=1000))
コード例 #23
0
	def plotting(self):

		if self.debug:
			self.debug_file = open("debug_output.txt", "w")
			self.debug_file.write("Initialized plotting subroutine \n")
			 

		TOOLS="pan,wheel_zoom,box_zoom,reset,hover,previewsave"

		tab_plots = []
		self.all_elements = []
		self.elements_comparison = []

		for filename in self.filenames:
			if "ITO" in filename:
				tab_plots.append(self.mass_plotting(filename))
				continue
	
			data_dict = self.data_generation(filename)
			self.data_test(data_dict)

			name_check = data_dict["gen_info"]["DATA FILES"]
			attr_id = name_check[1][4][:-3] + "_" + name_check[2][2]
			self.attribute_ids.append(attr_id)

			attr_extra_y_ranges = False
			attr_extra_x_ranges = False

			local_source_line = []

			"""
			create plots for each datafile and put them in a tab.
			"""

			y_axis_units = [x["y_unit"] for x in data_dict["data"]]
			x_axis_units = [x["x_unit"] for x in data_dict["data"]]

			figure_obj = figure(plot_width = 1000, plot_height = 800, y_axis_type = "log",
			title = attr_id, tools = TOOLS)
			#figure_obj.axes.major_label_text_font_size("12pt")
			#figure_obj.major_label_text_font_size("12pt")

			hover = figure_obj.select(dict(type = HoverTool))
			hover.tooltips = [
							("Element:", "@element"),
							("(x, y):", "($x, $y)")]

			self.figure_data.append((figure_obj, data_dict))
		
			figure_obj.yaxis.axis_label = y_axis_units[0]
			figure_obj.xaxis.axis_label = x_axis_units[0]

			if not all(x == y_axis_units[0] for x in y_axis_units):
				for unit, dataset in zip(y_axis_units, data_dict["data"]): 
					if not unit == y_axis_units[0]:
						
						extra_y_ranges_exists = attr_extra_y_ranges
						extra_y_ranges_exists = True

						if self.debug:
							  
							self.debug_file.write("Added extra y-axis for file_id: %s, element: %s | New length %g \n" 
								%(attr_id, dataset["sample_element"], len(figure_obj.yaxis)))
							 

						figure_obj.extra_y_ranges =  {"foo": Range1d(start = np.amin(dataset["y"]),
						end = np.amax(dataset["y"]))}
						figure_obj.add_layout(LogAxis(y_range_name = "foo", axis_label = unit), "right")
						break

			if not all(x == x_axis_units[0] for x in x_axis_units):
				for unit, dataset in zip(x_axis_units, data_dict["data"]): 
					if not unit == x_axis_units[0]:
						
						extra_x_ranges_exists = attr_extra_x_ranges
						extra_x_ranges_exists = True
						
						if self.debug:
							  
							self.debug_file.write("Added extra x-axis for file_id: %s, element: %s. | New length %g \n" 
								%(attr_id, dataset["sample_element"], len(figure_obj.yaxis)))
							 
			
						figure_obj.extra_x_ranges =  {"bar": Range1d(start = np.amin(dataset["x"]),
						end = np.amax(dataset["x"]))}
						figure_obj.add_layout(LinearAxis(x_range_name = "bar", axis_label = unit), "above")
						break

			figure_obj.xaxis.axis_label = x_axis_units[0]
			colour_list = Spectral11 + RdPu9 + Oranges9
			colour_indices = [0, 2, 8, 10, 12, 14, 20, 22, 1, 3, 9, 11, 13, 15]


			list_of_elements = []
			source_list = []
			line_list = []

			for dataset, color_index in zip(data_dict["data"], colour_indices):

				self.all_elements.append(dataset["sample_element"]) #strip isotope number 
				color = colour_list[color_index]

				source = ColumnDataSource(data = dataset) #Datastructure for source of plotting

				self.source_test(source)

				list_of_elements.append(dataset["sample_element"])
				line_glyph = figure_obj.line("x", "y", source = source, 
							line_width = 2,
							line_color = color, 
							legend = dataset["sample_element"])

				if self.debug:
					self.debug_file.write("Create line object on figure %s  at %s \n" %(id(figure_obj), id(line_glyph)))
					 

				line_list.append(line_glyph)
				source_list.append(source)

			local_source_line.append([[source, line] for source, line in zip(source_list, line_list)])
			self.source_line.append(local_source_line)

			#Calculations on the dataset
			text_input_rsf = TextInput(value = "default", title = "RSF or SF (at/cm^3): ")
			do_integral_button = Button(label = "Calibration integral")
			smoothing_button = Button(label = "smth selct elem")
			matplot_button = Button(label = "Create matplotlib fig")

			text_input_sputter = TextInput(value = "default", title = "Sputter speed: number unit")
			text_input_crater_depth = TextInput(value = "default", title = "Depth of crater in: number unit")
			


			radio_group = RadioGroup(labels = list_of_elements, active=0)


			text_input_xval_integral = TextInput(value = "0", title = "x-delimiter ")
			text_input_dose = TextInput(value = "0", title = "Dose[cm^-2] ")

			#Save files for later use
			save_flexDPE_button = Button(label = "Save element for FlexPDE")
			save_all_flexDPE_button = Button(label = "Save all elements for FlexPDE")
			save_textfile_button = Button(label = "Sava Data in textfile")

			#Pointers to methods on click / change handlers
			radio_group.on_change("active", lambda attr, old, new: None)

			matplot_button.on_click(lambda source_list = source_list:
										self.matplotlib_export(source_list))
			
			do_integral_button.on_click(lambda 
											source_list = source_list, 
											line_list = line_list, 
											source_line = self.source_line,
											figure_data = self.figure_data,
											data_dict = data_dict,
											radio = radio_group,
											x_box = text_input_xval_integral, 
											dose = text_input_dose,
											extra_y_ranges = attr_extra_y_ranges: 
										self.integrate(data_dict, source_list, line_list, source_line, figure_data, radio, x_box, dose, extra_y_ranges))

			smoothing_button.on_click(lambda 
										source_list = source_list,
										radio = radio_group, 
										data_dict = data_dict,
										x_box = text_input_xval_integral: 
									self.smoothing(source_list, data_dict, radio, x_box) )

			save_flexDPE_button.on_click(lambda 
											source_list = source_list,
											attrname = attr_id,
											radio = radio_group: 
										self.write_to_flexPDE(source_list, attrname, radio))

			save_all_flexDPE_button.on_click(lambda 
												source_list = source_list, 
												attrname = attr_id:
												self.write_all_to_flexPDE(source_list, attrname))

			save_textfile_button.on_click(lambda 
											data_dict = data_dict, 
											source_list = source_list,
											attrname = attr_id,
											radio = radio_group:
											self.write_new_datafile(data_dict, source_list, attrname,radio))


			text_input_rsf.on_change("value", lambda attr, old, new, 
												radio = radio_group, 
												data_dict = data_dict,
												figure = figure_obj,
												source_list = source_list,
												text_input = text_input_rsf,
												line_list = line_list,
												which = "rsf":
												self.update_data(line_list, data_dict, source_list, figure, radio, text_input, new, which))


			text_input_sputter.on_change("value", lambda attr, old, new, 
													radio = radio_group, 
													data_dict = data_dict,
													figure = figure_obj,
													source_list = source_list, 
													text_input = text_input_sputter,
													which = "sputter":
													self.update_data(data_dict, source_list, figure, radio, text_input, new, which))

			text_input_crater_depth.on_change("value", lambda attr, old, new, 
														radio = radio_group, 
														data_dict = data_dict,
														source_list = source_list,
														figure = figure_obj,
														text_input = text_input_crater_depth, 
														which = "crater_depth":
														self.update_data(data_dict, source_list, figure, radio, text_input, new, which))


			#Initialization of actual plotting. 
			tab_plots.append(Panel(child = hplot(figure_obj, 
										   vform(
										   vform(radio_group, save_flexDPE_button, save_all_flexDPE_button, save_textfile_button, matplot_button), 
										   vform(text_input_rsf, smoothing_button, text_input_sputter, text_input_crater_depth)
										   ),
										   vform(text_input_xval_integral, text_input_dose, do_integral_button)),
										   title = attr_id))



		"""
		Check to see if one or more element exists in the samples and creat a comparison plot for each 
		of those elements.
		"""
		
		for element in self.all_elements:
			checkers = list(self.all_elements)
			checkers.remove(element)
			if element in checkers and not element in self.elements_comparison:
				self.elements_comparison.append(element)

		"""create plots for each element that is to be compared """
	
		for comparison_element in self.elements_comparison: 

			figure_obj = figure(plot_width = 1000, plot_height = 800, y_axis_type = "log", title = comparison_element, tools = TOOLS)
			#figure_obj.xaxis.major_label_text_font_size("12pt")
			#figure_obj.yaxis.major_label_text_font_size("12pt")
			
			y_axis_units = []
			x_axis_units = []

			comparison_datasets = []

			for data_dict_iter in self.column(self.figure_data, 1):

				for dataset in data_dict_iter["data"]:

					if dataset["sample_element"] == comparison_element:
						comparison_datasets.append(dataset)
						y_axis_units.append(dataset["y_unit"])
						x_axis_units.append(dataset["x_unit"])

			figure_obj.xaxis.axis_label = comparison_datasets[-1]["x_unit"]
			figure_obj.yaxis.axis_label = comparison_datasets[-1]["y_unit"]

			if not all(x == y_axis_units[-1] for x in y_axis_units):
				for unit, data in zip(y_axis_units, comparison_datasets): 
					if not unit == y_axis_units[-1]:
						figure_obj.extra_y_ranges =  {"foo": Range1d(start = np.amin(data["y"]),
						end = np.amax(data["y"]))}
						figure_obj.add_layout(LogAxis(y_range_name = "foo", axis_label = unit), "right")
						break

			if not all(x == x_axis_units[-1] for x in x_axis_units):
				for unit, data in zip(x_axis_units, comparison_datasets): 
					if not unit == x_axis_units[-1]:
						figure_obj.extra_x_ranges =  {"bar": Range1d(start = np.amin(data["x"]),
						end = np.amax(data["x"]))}
						figure_obj.add_layout(LinearAxis(x_range_name = "bar", axis_label = unit), "above")
						break

			active_sources = []
			for data_dict, source_line_nested, attr_id, color_index  in zip(self.column(self.figure_data, 1), self.source_line,  self.attribute_ids,  colour_indices):

				for dataset, source_lis_coup, in zip(data_dict["data"], source_line_nested[0]):
					
					source_local = source_lis_coup[0]
					active_sources.append(source_local)

					self.source_test(source_local)
					self.source_dataset_test(source_local, dataset)

					if dataset["sample_element"] == comparison_element:
						color = colour_list[color_index]

						"""
						Logic that ensures that plots get put with correspoinding axes. 
						"""
						if dataset["x_unit"] != x_axis_units[-1] or dataset["y_unit"] != y_axis_units[-1]:

							if dataset["x_unit"] != x_axis_units[-1] and dataset["y_unit"] != y_axis_units[-1]:
								name_check = data_dict["gen_info"]["DATA FILES"]
								attr_id = name_check[1][4][:-3] + "_" + name_check[2][2]

								figure_obj.line("x", "y", source = source_local,
								line_width = 2, 
								line_color = color, 
								legend = attr_id,
								x_range_name = "bar", 
								y_range_name = "foo")

							elif dataset["x_unit"] != x_axis_units[-1]:

								figure_obj.line("x", "y", source = source_local,
								line_width = 2, 
								line_color = color, 
								legend = attr_id, 
								x_range_name = "bar")

							else: 

								figure_obj.line("x", "y", source = source_local,
								line_width = 2, 
								line_color = color, 
								legend = attr_id, 
								y_range_name = "foo")

						else: 
							figure_obj.line("x", "y", source = source_local,
							line_width = 2, 
							line_color = color, 
							legend = attr_id)


			matplot_button = Button(label = "Create matplotlib fig")
			save_all_flexDPE_button = Button(label = "Save all elements for FlexPDE")

			matplot_button.on_click(lambda source_list = active_sources:
							self.matplotlib_export(source_list))	

			save_all_flexDPE_button.on_click(lambda 
									source_list = active_sources, 
									attrname = comparison_element:
									self.write_all_to_flexPDE(source_list, attrname))


			tab_plots.append(Panel(child = hplot(figure_obj, vform(save_all_flexDPE_button, matplot_button)), 
				title = comparison_element))	


		tabs = Tabs(tabs = tab_plots)
		#curdoc().add_root(tabs)
		session = push_session(curdoc())
		session.show()
		session.loop_until_closed()
コード例 #24
0
ファイル: browse.py プロジェクト: JasperYH/MatchAnnot
geneSource = ColumnDataSource(data=dict(Gene=[], Cluster=[]))

df = pd.DataFrame()
boundaryDF = pd.DataFrame()
colorDF = pd.DataFrame()
outDF = pd.DataFrame()


Console = PreText(
    text="Console:\nStart visualize by entering \nannotations, pickle file and gene.\nPress Enter to submit.\n",
    width=250,
    height=100,
)
p = createPlot(df, boundaryDF)

Gene.on_change("value", updateGene)
Full.on_change("value", updateFP)
Partial.on_change("value", updateFP)
Alpha.on_change("value", updateFP)
Cluster.on_change("value", updateGroup)
Save.on_change("value", saveFasta)
Width.on_change("value", updateWidth)

dataColumns = [TableColumn(field="Gene", title="Gene"), TableColumn(field="Cluster", title="Cluster")]
data_table = DataTable(source=geneSource, columns=dataColumns, width=200, height=1200)

paramSource = ColumnDataSource(
    data=dict(
        Parameter=[
            "annotation",
            "format",
コード例 #25
0
ファイル: main.py プロジェクト: Punchwes/linkedin-for-SCU
        here=np.loadtxt('here5.csv',str,delimiter='\n')
        here=list(here)

        here_height=df_counts['numbers']
        y_here_height=here_height/2.0

        source.data=dict(
            x=here[:10],
            y=y_here_height[:10],
            height=here_height[:10]
            )
        plot.tools=[hover]
        plot.xaxis.major_label_orientation=np.pi/3
        plot.x_range.factors=here[:10]
        new_height=here_height[:10].values
        plot.y_range.end=new_height[0]+200
    #plot.rect(x=axis_map[x_axis.value],y=height_map[x_axis.value],height=height_map[x_axis.value]*2.0)
    

#plot.x_range.on_change('_bounds_as_factors',update)
x_axis.on_change('value',update)
cities.on_change('value',update)
companies.on_change('value',update)
industries.on_change('value',update)

#filters=VBox(x_axis)
#tot=HBox(filters,plot)
#show(tot)
curdoc().add_root(HBox(inputs,plot))

コード例 #26
0
# initialize data source
source_curve = ColumnDataSource(data=dict(x=[], y=[]))
source_point = ColumnDataSource(data=dict(x=[], y=[]))
source_sector = ColumnDataSource(data=dict(x=[], y=[]))
source_lines = ColumnDataSource(data=dict(x_start=[], y_start=[], x_end=[], y_end=[]))
source_text = ColumnDataSource(data=dict(area=[]))

# initialize controls
# slider controlling the current parameter t
t_value_input = Slider(title="parameter t", name='parameter t', value=leibnitz_settings.t_value_init,
                       start=leibnitz_settings.t_value_min, end=leibnitz_settings.t_value_max,
                       step=leibnitz_settings.t_value_step)
t_value_input.on_change('value', t_value_change)
# text input for the x component of the curve
x_component_input = TextInput(value=leibnitz_settings.x_component_input_msg, title="curve x")
x_component_input.on_change('value', curve_change)
# text input for the y component of the curve
y_component_input = TextInput(value=leibnitz_settings.y_component_input_msg, title="curve y")
y_component_input.on_change('value', curve_change)
# dropdown menu for selecting one of the sample curves
sample_curve_input = Dropdown(label="choose a sample function pair or enter one below",
                              menu=leibnitz_settings.sample_curve_names)
sample_curve_input.on_change('value', sample_curve_change)

# initialize plot
toolset = "crosshair,pan,reset,save,wheel_zoom"
# Generate a figure container
plot = Figure(plot_height=400, plot_width=400, tools=toolset,
              title="Leibnitz sector formula",
              x_range=[leibnitz_settings.x_min_view, leibnitz_settings.x_max_view],
              y_range=[leibnitz_settings.y_min_view, leibnitz_settings.y_max_view])
コード例 #27
0
def plot():

    # FIGURES AND X-AXIS
    fig1 = Figure(title = 'Dive Profile',  plot_width = WIDTH, plot_height = HEIGHT, tools = TOOLS)
    fig2 = Figure(title = 'Dive Controls', plot_width = WIDTH, plot_height = HEIGHT, tools = TOOLS, x_range=fig1.x_range)
    fig3 = Figure(title = 'Attitude',      plot_width = WIDTH, plot_height = HEIGHT, tools = TOOLS, x_range=fig1.x_range)
    figs = gridplot([[fig1],[fig2],[fig3]])

    # Formatting x-axis
    timeticks = DatetimeTickFormatter(formats=dict(seconds =["%b%d %H:%M:%S"],
                                                   minutes =["%b%d %H:%M"],
                                                   hourmin =["%b%d %H:%M"],
                                                   hours =["%b%d %H:%M"],
                                                   days  =["%b%d %H:%M"],
                                                   months=["%b%d %H:%M"],
                                                   years =["%b%d %H:%M %Y"]))
    fig1.xaxis.formatter = timeticks
    fig2.xaxis.formatter = timeticks
    fig3.xaxis.formatter = timeticks

    # removing gridlines
    fig1.xgrid.grid_line_color = None
    fig1.ygrid.grid_line_color = None
    fig2.xgrid.grid_line_color = None
    fig2.ygrid.grid_line_color = None
    fig3.xgrid.grid_line_color = None
    fig3.ygrid.grid_line_color = None

    # INPUT WIDGETS
    collection_list = CONN[DB].collection_names(include_system_collections=False)
    gliders = sorted([platformID for platformID in collection_list if len(platformID)>2])
    gliders = Select(title = 'PlatformID', value = gliders[0], options = gliders)
    prev_glider = Button(label = '<')
    next_glider = Button(label = '>')
    glider_controlbox = HBox(children = [gliders, prev_glider, next_glider], height=80)

    chunkations = Select(title = 'Chunkation', value = 'segment', options = ['segment', '24hr', '30days', '-ALL-'])
    chunk_indicator = TextInput(title = 'index', value = '0')
    prev_chunk = Button(label = '<')
    next_chunk = Button(label = '>')
    chunk_ID   = PreText(height=80)
    chunk_controlbox = HBox(chunkations,
                            HBox(chunk_indicator, width=25),
                            prev_chunk, next_chunk,
                            chunk_ID,
                            height = 80)

    control_box = HBox(glider_controlbox,
                        chunk_controlbox)

    # DATA VARS
    deadby_date = ''
    depth    = ColumnDataSource(dict(x=[],y=[]))
    vert_vel = ColumnDataSource(dict(x=[],y=[]))

    mbpump   = ColumnDataSource(dict(x=[],y=[]))
    battpos  = ColumnDataSource(dict(x=[],y=[]))
    pitch    = ColumnDataSource(dict(x=[],y=[]))

    mfin      = ColumnDataSource(dict(x=[],y=[]))
    cfin      = ColumnDataSource(dict(x=[],y=[]))
    mroll     = ColumnDataSource(dict(x=[],y=[]))
    mheading = ColumnDataSource(dict(x=[],y=[]))
    cheading = ColumnDataSource(dict(x=[],y=[]))

    # AXIS setup
    colors = COLORS[:]

    fig1.y_range.flipped = True
    fig1.yaxis.axis_label = 'm_depth (m)'
    fig1.extra_y_ranges = {'vert_vel': Range1d(start=-50, end=50),
                           'dummy':    Range1d(start=0, end=100)}
    fig1.add_layout(place = 'right',
                    obj = LinearAxis(y_range_name = 'vert_vel',
                                     axis_label   = 'vertical velocity (cm/s)'))
    fig1.add_layout(place = 'left',
                    obj = LinearAxis(y_range_name = 'dummy',
                                     axis_label   = ' '))
    fig1.yaxis[1].visible = False
    fig1.yaxis[1].axis_line_alpha = 0
    fig1.yaxis[1].major_label_text_alpha = 0
    fig1.yaxis[1].major_tick_line_alpha = 0
    fig1.yaxis[1].minor_tick_line_alpha = 0


    fig2.yaxis.axis_label = 'pitch (deg)'
    fig2.y_range.start, fig2.y_range.end = -40,40
    fig2.extra_y_ranges = {'battpos': Range1d(start=-1, end = 1),
                           'bpump':   Range1d(start=-275, end=275)}
    fig2.add_layout(place = 'right',
                    obj = LinearAxis(y_range_name = 'battpos',
                                     axis_label = 'battpos (in)'))
    fig2.add_layout(place = 'left',
                    obj = LinearAxis(y_range_name = 'bpump',
                                     axis_label   = 'bpump (cc)'))
    fig2.yaxis[1].visible = False # necessary for spacing. later gets set to true


    fig3.yaxis.axis_label = 'fin/roll (deg)'
    fig3.y_range.start, fig3.y_range.end = -30, 30
    fig3.extra_y_ranges = {'heading': Range1d(start=0, end=360), #TODO dynamic avg centering
                           'dummy':   Range1d(start=0, end=100)}
    fig3.add_layout(place = 'right',
                    obj = LinearAxis(y_range_name = 'heading',
                                     axis_label   = 'headings (deg)'))
    fig3.add_layout(place = 'left',
                    obj = LinearAxis(y_range_name = 'dummy',
                                     axis_label   = ' '))
    fig3.yaxis[1].visible = False
    fig3.yaxis[1].axis_line_alpha = 0
    fig3.yaxis[1].major_label_text_alpha = 0
    fig3.yaxis[1].major_tick_line_alpha = 0
    fig3.yaxis[1].minor_tick_line_alpha = 0

    # PLOT OBJECTS
    fig1.line(  'x', 'y', source = depth,    legend = 'm_depth',     color = 'red')
    fig1.circle('x', 'y', source = depth,    legend = 'm_depth',     color = 'red')
    fig1.line(  'x', 'y', source = vert_vel, legend = 'vert_vel',    color = 'green',     y_range_name = 'vert_vel')
    fig1.circle('x', 'y', source = vert_vel, legend = 'vert_vel',    color = 'green',     y_range_name = 'vert_vel')
    fig1.renderers.append(Span(location = 0, dimension = 'width',    y_range_name = 'vert_vel',
                               line_color= 'green', line_dash='dashed', line_width=1))

    fig2.line(  'x', 'y', source = pitch,   legend = "m_pitch",    color = 'indigo')
    fig2.circle('x', 'y', source = pitch,   legend = "m_pitch",    color = 'indigo')
    fig2.line(  'x', 'y', source = battpos, legend = 'm_battpos',  color = 'magenta',   y_range_name = 'battpos')
    fig2.circle('x', 'y', source = battpos, legend = 'm_battpos',  color = 'magenta',   y_range_name = 'battpos')
    fig2.line(  'x', 'y', source = mbpump,  legend = "m_'bpump'",  color = 'blue',      y_range_name = 'bpump')
    fig2.circle('x', 'y', source = mbpump,  legend = "m_'bpump'",  color = 'blue',      y_range_name = 'bpump')
    fig2.renderers.append(Span(location = 0, dimension = 'width',
                               line_color= 'black', line_dash='dashed', line_width=1))
    fig3.line(  'x', 'y', source = mfin,       legend = 'm_fin',     color = 'cyan')
    fig3.circle('x', 'y', source = mfin,       legend = 'm_fin',     color = 'cyan')
    fig3.line(  'x', 'y', source = cfin,       legend = 'c_fin',     color = 'orange')
    fig3.circle('x', 'y', source = cfin,       legend = 'c_fin',     color = 'orange')
    fig3.line(  'x', 'y', source = mroll,      legend = 'm_roll',    color = 'magenta')
    fig3.circle('x', 'y', source = mroll,      legend = 'm_roll',    color = 'magenta')
    fig3.line(  'x', 'y', source = mheading,   legend = 'm_heading', color = 'blue',    y_range_name = 'heading')
    fig3.circle('x', 'y', source = mheading,   legend = 'm_heading', color = 'blue',    y_range_name = 'heading')
    fig3.line(  'x', 'y', source = cheading,   legend = 'c_heading', color = 'indigo',  y_range_name = 'heading')
    fig3.circle('x', 'y', source = cheading,   legend = 'c_heading', color = 'indigo',  y_range_name = 'heading')
    fig3.renderers.append(Span(location = 0, dimension = 'width',    y_range_name = 'default',
                               line_color= 'black', line_dash='dashed', line_width=1))

    # CALLBACK FUNCS
    def update_data(attrib,old,new):
        g = gliders.value
        chnk = chunkations.value
        chindex = abs(int(chunk_indicator.value))

        depth.data    = dict(x=[],y=[])
        vert_vel.data = dict(x=[],y=[])
        mbpump.data   = dict(x=[],y=[])
        battpos.data  = dict(x=[],y=[])
        pitch.data    = dict(x=[],y=[])

        mfin.data     = dict(x=[],y=[])
        cfin.data     = dict(x=[],y=[])
        mroll.data    = dict(x=[],y=[])
        mheading.data = dict(x=[],y=[])
        cheading.data = dict(x=[],y=[])


        depth.data,startend   = load_sensor(g, 'm_depth', chnk, chindex)

        if chnk == 'segment':
            xbd = startend[2]
            chunk_ID.text = '{} {} \n{} ({}) \nSTART: {} \nEND:   {}'.format(g, xbd['mission'],
                                                                             xbd['onboard_filename'], xbd['the8x3_filename'],
                                                                             e2ts(xbd['start']), e2ts(xbd['end']))
            if len(set(depth.data['x']))<=1 and attrib == 'chunk':
                if old > new:
                    next_chunk.clicks += 1
                else:
                    prev_chunk.clicks += 1
                return
            elif len(set(depth.data['x']))<=1 and chunk_indicator.value == 0:
                chunk_indicator.value = 1

        elif chnk in ['24hr', '30days']:
            chunk_ID.text = '{} \nSTART: {} \nEND:   {}'.format(g, e2ts(startend[0]), e2ts(startend[1]))
        elif chnk == '-ALL-':
            chunk_ID.text = '{} \nSTART: {} \nEND:   {}'.format(g,e2ts(depth.data['x'][0] /1000),
                                                                  e2ts(depth.data['x'][-1]/1000))


        vert_vel.data  = calc_vert_vel(depth.data)

        mbpump.data,_     = load_sensor(g, 'm_de_oil_vol', chnk, chindex)
        if len(mbpump.data['x']) > 1:
            #for yax in fig2.select('mbpump'):
            #    yax.legend = 'm_de_oil_vol'
            pass
        else:
            mbpump.data,_     = load_sensor(g, 'm_ballast_pumped', chnk, chindex)
            #for yax in fig2.select('mbpump'):
            #    yax.legend = 'm_ballast_pumped'
        battpos.data,_ = load_sensor(g, 'm_battpos',    chnk, chindex)
        pitch.data,_   = load_sensor(g, 'm_pitch',      chnk, chindex)
        pitch.data['y'] = [math.degrees(y) for y in pitch.data['y']]

        mfin.data,_     = load_sensor(g, 'm_fin',     chnk, chindex)
        cfin.data,_     = load_sensor(g, 'c_fin',     chnk, chindex)
        mroll.data,_    = load_sensor(g, 'm_roll',    chnk, chindex)
        mheading.data,_ = load_sensor(g, 'm_heading', chnk, chindex)
        cheading.data,_ = load_sensor(g, 'c_heading', chnk, chindex)
        mfin.data['y']     = [math.degrees(y) for y in mfin.data['y']]
        cfin.data['y']     = [math.degrees(y) for y in cfin.data['y']]
        mheading.data['y'] = [math.degrees(y) for y in mheading.data['y']]
        cheading.data['y'] = [math.degrees(y) for y in cheading.data['y']]
        mroll.data['y']    = [math.degrees(y) for y in mroll.data['y']]

        fig1.yaxis[1].visible = True
        fig2.yaxis[1].visible = True
        fig3.yaxis[1].visible = True


    #GLIDER SELECTS
    def glider_buttons(increment):
        ops = gliders.options
        new_index = ops.index(gliders.value) + increment
        if new_index >= len(ops):
            new_index = 0
        elif new_index < 0:
            new_index = len(ops)-1
        gliders.value = ops[new_index]
        chunkation_update(None, None, None) #reset chunk indicator and clicks
    def next_glider_func():
        glider_buttons(1)
    def prev_glider_func():
        glider_buttons(-1)
    def update_glider(attrib,old,new):
        chunk_indicator.value = '0'
        #update_data(None,None,None)


    gliders.on_change('value', update_glider)
    next_glider.on_click(next_glider_func)
    prev_glider.on_click(prev_glider_func)


        #CHUNK SELECTS
    def chunkation_update(attrib,old,new):
        chunk_indicator.value = '0'
        prev_chunk.clicks = 0
        next_chunk.clicks = 0
        update_data(None,None,None)
        if new == '-ALL-':
            chunk_indicator.value = '-'

    def chunk_func():
        chunkdiff = prev_chunk.clicks - next_chunk.clicks
        if chunkdiff < 0:
            prev_chunk.clicks = 0
            next_chunk.clicks = 0
            chunkdiff = 0
        print (chunkdiff)
        chunk_indicator.value = str(chunkdiff)

    def chunk_indicator_update(attrib,old,new):
        try:
            if abs(int(old)-int(new))>1: #manual update, triggers new non-manual indicator update, ie else clause below
                prev_chunk.clicks = int(new)
                next_chunk.clicks = 0
            else:
                update_data('chunk',int(old),int(new))
            print("UPDATE", old, new)
        except Exception as e:
            print(type(e),e, old, new)

    chunkations.on_change('value', chunkation_update)
    chunk_indicator.on_change('value', chunk_indicator_update)
    next_chunk.on_click(chunk_func)
    prev_chunk.on_click(chunk_func)

    update_data(None,None,None)

    return vplot(control_box, figs)
コード例 #28
0
                                                  solution_select.value))

    data_table_source.data = new_data_table_source.data
    store_now.data = new_store_now.data
    store_future.data = new_store_future.data
    ecom_zipcode_all.data = new_ecom_zipcode_all.data


# Bokeh Widgets

from bokeh.io import output_file, show
from bokeh.layouts import widgetbox
from bokeh.models.widgets import TextInput, Slider, CheckboxGroup, DatePicker, DataTable, Div

state_choose = TextInput(value="MA", title="Choose State")
state_choose.on_change('value', update)

radius_select = TextInput(value='5', title='Select Radius (Miles)')
radius_select.on_change('value', update)

solution_select = TextInput(value='0', title='Select Solution')
solution_select.on_change('value', update)

tran_type_select = CheckboxGroup(labels=['Sale', 'Return'],
                                 active=[0, 1],
                                 name='Transaction Type')
tran_type_select.on_change('active', update)

min_date_select = DatePicker(title='Start Date',
                             value=min_date1,
                             min_date=min_date1,
コード例 #29
0
ファイル: taylor_server.py プロジェクト: jbcrail/bokeh

def on_text_value_change(attr, old, new):
    try:
        global expr
        expr = sy.sympify(new, dict(x=xs))
    except (sy.SympifyError, TypeError, ValueError) as exception:
        dialog.content = str(exception)
        dialog.visible = True
    else:
        update_data()


dialog = Dialog(title="Invalid expression")

slider = Slider(start=1, end=20, value=order, step=1, title="Order", callback_policy="mouseup")
slider.on_change("value", on_slider_value_change)

text = TextInput(value=str(expr), title="Expression:")
text.on_change("value", on_text_value_change)

inputs = WidgetBox(children=[slider, text], width=400)
layout = Column(children=[inputs, plot, dialog])
update_data()
document.add_root(layout)
session.show(layout)

if __name__ == "__main__":
    print("\npress ctrl-C to exit")
    session.loop_until_closed()
コード例 #30
0
ファイル: test.py プロジェクト: satishjhaldiyal/mohrs_circle
    R = float(new)
    x = R * np.cos(t)
    y = R * np.sin(t)
    source.data = dict(x=x, y=y)


def x_input_handler(attr, old, new):
    x = R * np.cos(t) + float(new)
    y = R * np.sin(t)
    source.data = dict(x=x, y=y)


def y_input_handler(attr, old, new):
    x = R * np.cos(t)
    y = R * np.sin(t) + float(new)
    source.data = dict(x=x, y=y)


R_input = TextInput(value="1", title="R:")
x_input = TextInput(value="0", title="x:")
y_input = TextInput(value="0", title="y:")

R_input.on_change("value", R_input_handler)
x_input.on_change("value", x_input_handler)
y_input.on_change("value", y_input_handler)

inputs = column(R_input, x_input, y_input)

curdoc().add_root(row(inputs, plot, width=800))
curdoc().title = "circle"
コード例 #31
0
ファイル: ui.py プロジェクト: yyzreal/nlp-architect
def _create_ui_components() -> (Figure, ColumnDataSource):  # pylint: disable=too-many-statements
    global asp_table_source, asp_filter_src, op_table_source, op_filter_src
    global stats, aspects, tabs, lexicons_dropdown
    stats = pd.DataFrame(columns=["Quantity", "Score"])
    aspects = pd.Series([])

    def new_col_data_src():
        return ColumnDataSource({"file_contents": [], "file_name": []})

    large_text = HTMLTemplateFormatter(template="""<div><%= value %></div>""")

    def data_column(title):
        return TableColumn(field=title,
                           title='<span class="header">' + title + "</span>",
                           formatter=large_text)

    asp_table_columns = [
        data_column("Term"),
        data_column("Alias1"),
        data_column("Alias2"),
        data_column("Alias3"),
    ]
    op_table_columns = [
        data_column("Term"),
        data_column("Score"),
        data_column("Polarity")
    ]

    asp_table_source = empty_table("Term", "Alias1", "Alias2", "Alias3")
    asp_filter_src = empty_table("Term", "Alias1", "Alias2", "Alias3")
    asp_src = new_col_data_src()

    op_table_source = empty_table("Term", "Score", "Polarity", "Polarity")
    op_filter_src = empty_table("Term", "Score", "Polarity", "Polarity")
    op_src = new_col_data_src()

    asp_table = DataTable(
        source=asp_table_source,
        selectable="checkbox",
        columns=asp_table_columns,
        editable=True,
        width=600,
        height=500,
    )
    op_table = DataTable(
        source=op_table_source,
        selectable="checkbox",
        columns=op_table_columns,
        editable=True,
        width=600,
        height=500,
    )

    asp_examples_box = _create_examples_table()
    op_examples_box = _create_examples_table()
    asp_layout = layout([[asp_table, asp_examples_box]])
    op_layout = layout([[op_table, op_examples_box]])
    asp_tab = Panel(child=asp_layout, title="Aspect Lexicon")
    op_tab = Panel(child=op_layout, title="Opinion Lexicon")
    tabs = Tabs(tabs=[asp_tab, op_tab], width=700, css_classes=["mytab"])

    lexicons_menu = [("Open", "open"), ("Save", "save")]
    lexicons_dropdown = Dropdown(
        label="Edit Lexicons",
        button_type="success",
        menu=lexicons_menu,
        width=140,
        height=31,
        css_classes=["mybutton"],
    )

    train_menu = [("Parsed Data", "parsed"), ("Raw Data", "raw")]
    train_dropdown = Dropdown(
        label="Extract Lexicons",
        button_type="success",
        menu=train_menu,
        width=162,
        height=31,
        css_classes=["mybutton"],
    )

    inference_menu = [("Parsed Data", "parsed"), ("Raw Data", "raw")]
    inference_dropdown = Dropdown(
        label="Classify",
        button_type="success",
        menu=inference_menu,
        width=140,
        height=31,
        css_classes=["mybutton"],
    )

    text_status = TextInput(value="Select training data",
                            title="Train Run Status:",
                            css_classes=["statusText"])
    text_status.visible = False

    train_src = new_col_data_src()
    infer_src = new_col_data_src()

    with open(join(SOLUTION_DIR, "dropdown.js")) as f:
        args = dict(
            clicked=lexicons_dropdown,
            asp_filter=asp_filter_src,
            op_filter=op_filter_src,
            asp_src=asp_src,
            op_src=op_src,
            tabs=tabs,
            text_status=text_status,
            train_src=train_src,
            infer_src=infer_src,
            train_clicked=train_dropdown,
            infer_clicked=inference_dropdown,
            opinion_lex_generic="",
        )
        code = f.read()

    args["train_clicked"] = train_dropdown
    train_dropdown.js_on_change("value", CustomJS(args=args, code=code))

    args["train_clicked"] = inference_dropdown
    inference_dropdown.js_on_change("value", CustomJS(args=args, code=code))

    args["clicked"] = lexicons_dropdown
    lexicons_dropdown.js_on_change("value", CustomJS(args=args, code=code))

    def update_filter_source(table_source, filter_source):
        df = table_source.to_df()
        sel_inx = sorted(table_source.selected.indices)
        df = df.iloc[sel_inx, 1:]
        new_source = ColumnDataSource(df)
        filter_source.data = new_source.data

    def update_examples_box(data, examples_box, old, new):
        examples_box.source.data = {"Examples": []}
        unselected = list(set(old) - set(new))
        selected = list(set(new) - set(old))
        if len(selected) <= 1 and len(unselected) <= 1:
            examples_box.source.data.update({
                "Examples":
                [str(data.iloc[unselected[0], i])
                 for i in range(4, 24)] if len(unselected) != 0 else
                [str(data.iloc[selected[0], i]) for i in range(4, 24)]
            })

    def asp_selected_change(_, old, new):
        global asp_filter_src, asp_table_source, aspects_data
        update_filter_source(asp_table_source, asp_filter_src)
        update_examples_box(aspects_data, asp_examples_box, old, new)

    def op_selected_change(_, old, new):
        global op_filter_src, op_table_source, opinions_data
        update_filter_source(op_table_source, op_filter_src)
        update_examples_box(opinions_data, op_examples_box, old, new)

    def read_csv(file_src, headers=False, index_cols=False, readCSV=True):
        if readCSV:
            raw_contents = file_src.data["file_contents"][0]

            if len(raw_contents.split(",")) == 1:
                b64_contents = raw_contents
            else:
                # remove the prefix that JS adds
                b64_contents = raw_contents.split(",", 1)[1]
            file_contents = base64.b64decode(b64_contents)
            return pd.read_csv(
                io.BytesIO(file_contents),
                encoding="ISO-8859-1",
                keep_default_na=False,
                na_values={None},
                engine="python",
                index_col=index_cols,
                header=0 if headers else None,
            )
        return file_src

    def read_parsed_files(file_content, file_name):
        try:
            # remove the prefix that JS adds
            b64_contents = file_content.split(",", 1)[1]
            file_content = base64.b64decode(b64_contents)
            with open(SENTIMENT_OUT / file_name, "w") as json_file:
                data_dict = json.loads(file_content.decode("utf-8"))
                json.dump(data_dict, json_file)
        except Exception as e:
            print(str(e))

    # pylint: disable=unused-argument
    def train_file_callback(attr, old, new):
        global train_data
        SENTIMENT_OUT.mkdir(parents=True, exist_ok=True)
        train = TrainSentiment(parse=True, rerank_model=None)
        if len(train_src.data["file_contents"]) == 1:
            train_data = read_csv(train_src, index_cols=0)
            file_name = train_src.data["file_name"][0]
            raw_data_path = SENTIMENT_OUT / file_name
            train_data.to_csv(raw_data_path, header=False)
            print("Running_SentimentTraining on data...")
            train.run(data=raw_data_path)
        else:
            f_contents = train_src.data["file_contents"]
            f_names = train_src.data["file_name"]
            raw_data_path = SENTIMENT_OUT / train_src.data["file_name"][
                0].split("/")[0]
            if not os.path.exists(raw_data_path):
                os.makedirs(raw_data_path)
            for f_content, f_name in zip(f_contents, f_names):
                read_parsed_files(f_content, f_name)
            print("Running_SentimentTraining on data...")
            train.run(parsed_data=raw_data_path)

        text_status.value = "Lexicon extraction completed"

        with io.open(AcquireTerms.acquired_aspect_terms_path, "r") as fp:
            aspect_data_csv = fp.read()
        file_data = base64.b64encode(str.encode(aspect_data_csv))
        file_data = file_data.decode("utf-8")
        asp_src.data = {
            "file_contents": [file_data],
            "file_name": ["nameFile.csv"]
        }

        out_path = LEXICONS_OUT / "generated_opinion_lex_reranked.csv"
        with io.open(out_path, "r") as fp:
            opinion_data_csv = fp.read()
        file_data = base64.b64encode(str.encode(opinion_data_csv))
        file_data = file_data.decode("utf-8")
        op_src.data = {
            "file_contents": [file_data],
            "file_name": ["nameFile.csv"]
        }

    def show_analysis() -> None:
        global stats, aspects, plot, source, tabs
        plot, source = _create_plot()
        events_table = _create_events_table()

        # pylint: disable=unused-argument
        def _events_handler(attr, old, new):
            _update_events(events_table, events_type.active)

        # Toggle display of in-domain / All aspect mentions
        events_type = RadioButtonGroup(
            labels=["All Events", "In-Domain Events"], active=0)

        analysis_layout = layout([[plot], [events_table]])

        # events_type display toggle disabled
        # analysis_layout = layout([[plot],[events_type],[events_table]])

        analysis_tab = Panel(child=analysis_layout, title="Analysis")
        tabs.tabs.insert(2, analysis_tab)
        tabs.active = 2
        events_type.on_change("active", _events_handler)
        source.selected.on_change("indices", _events_handler)  # pylint: disable=no-member

    # pylint: disable=unused-argument
    def infer_file_callback(attr, old, new):

        # run inference on input data and current aspect/opinion lexicons in view
        global infer_data, stats, aspects

        SENTIMENT_OUT.mkdir(parents=True, exist_ok=True)

        df_aspect = pd.DataFrame.from_dict(asp_filter_src.data)
        aspect_col_list = ["Term", "Alias1", "Alias2", "Alias3"]
        df_aspect = df_aspect[aspect_col_list]
        df_aspect.to_csv(SENTIMENT_OUT / "aspects.csv",
                         index=False,
                         na_rep="NaN")

        df_opinion = pd.DataFrame.from_dict(op_filter_src.data)
        opinion_col_list = ["Term", "Score", "Polarity", "isAcquired"]
        df_opinion = df_opinion[opinion_col_list]
        df_opinion.to_csv(SENTIMENT_OUT / "opinions.csv",
                          index=False,
                          na_rep="NaN")

        solution = SentimentSolution()

        if len(infer_src.data["file_contents"]) == 1:
            infer_data = read_csv(infer_src, index_cols=0)
            file_name = infer_src.data["file_name"][0]
            raw_data_path = SENTIMENT_OUT / file_name
            infer_data.to_csv(raw_data_path, header=False)
            print("Running_SentimentInference on data...")
            text_status.value = "Running classification on data..."
            stats = solution.run(
                data=raw_data_path,
                aspect_lex=SENTIMENT_OUT / "aspects.csv",
                opinion_lex=SENTIMENT_OUT / "opinions.csv",
            )
        else:
            f_contents = infer_src.data["file_contents"]
            f_names = infer_src.data["file_name"]
            raw_data_path = SENTIMENT_OUT / infer_src.data["file_name"][
                0].split("/")[0]
            if not os.path.exists(raw_data_path):
                os.makedirs(raw_data_path)
            for f_content, f_name in zip(f_contents, f_names):
                read_parsed_files(f_content, f_name)
            print("Running_SentimentInference on data...")
            text_status.value = "Running classification on data..."
            stats = solution.run(
                parsed_data=raw_data_path,
                aspect_lex=SENTIMENT_OUT / "aspects.csv",
                opinion_lex=SENTIMENT_OUT / "opinions.csv",
            )

        aspects = pd.read_csv(SENTIMENT_OUT / "aspects.csv",
                              encoding="utf-8")["Term"]
        text_status.value = "Classification completed"
        show_analysis()

    # pylint: disable=unused-argument
    def asp_file_callback(attr, old, new):
        global aspects_data, asp_table_source
        aspects_data = read_csv(asp_src, headers=True)
        # Replaces None values by empty string
        aspects_data = aspects_data.fillna("")
        new_source = ColumnDataSource(aspects_data)
        asp_table_source.data = new_source.data
        asp_table_source.selected.indices = list(range(len(aspects_data)))

    # pylint: disable=unused-argument
    def op_file_callback(attr, old, new):
        global opinions_data, op_table_source, lexicons_dropdown, df_opinion_generic
        df = read_csv(op_src, headers=True)
        # Replaces None values by empty string
        df = df.fillna("")
        # Placeholder for generic opinion lexicons from the given csv file
        df_opinion_generic = df[df["isAcquired"] == "N"]
        # Update the argument value for the callback customJS
        lexicons_dropdown.js_property_callbacks.get("change:value")[0].args[
            "opinion_lex_generic"] = df_opinion_generic.to_dict(orient="list")
        opinions_data = df[df["isAcquired"] == "Y"]
        new_source = ColumnDataSource(opinions_data)
        op_table_source.data = new_source.data
        op_table_source.selected.indices = list(range(len(opinions_data)))

    # pylint: disable=unused-argument
    def txt_status_callback(attr, old, new):
        print("Previous label: " + old)
        print("Updated label: " + new)

    text_status.on_change("value", txt_status_callback)

    asp_src.on_change("data", asp_file_callback)
    # pylint: disable=no-member
    asp_table_source.selected.on_change("indices", asp_selected_change)

    op_src.on_change("data", op_file_callback)
    op_table_source.selected.on_change("indices", op_selected_change)  # pylint: disable=no-member

    train_src.on_change("data", train_file_callback)
    infer_src.on_change("data", infer_file_callback)

    return layout(
        [[_create_header(train_dropdown, inference_dropdown, text_status)],
         [tabs]])
コード例 #32
0
ファイル: temp.py プロジェクト: baileymichele/Owlet-code
def date_of_file(filename):
    day = filename[11:19]
    datetime_format = date(int(day[0:4]), int(day[4:6]), int(
        day[6:])) + timedelta(days=1)
    return datetime_format


with open("two_reds/column_names.txt") as f:
    content = f.readlines()
content = content[0].split(',')

text_dsn = TextInput(title="DSN:")
dsn_button = Button(label="Update DSN", button_type="success")
curdoc().add_root(row(text_dsn))


def dsn_text_update(attrname, old, new):
    all_dates = get_file_names(text_dsn.value)
    most_recent = all_dates[-1]  # is it already sorted???
    df = dsn_date(most_recent)
    # df must not be empty
    p, source, source1, source2, source3, source4, source5 = plot_data(
        df, text_dsn.value)
    text_dsn.remove_on_change('value', dsn_text_update)
    set_up_widgets(p, source, source1, source2, source3, source4, source5, df,
                   all_dates, text_dsn)


text_dsn.on_change('value', dsn_text_update)
コード例 #33
0
    u_val = u_fun(x_val, y_val)
    v_val = v_fun(x_val, y_val)

    return x_val, y_val, u_val, v_val, hx


# initialize controls
# text input for input of the vector function [fx(x,y),fy(x,y)]
u_input = TextInput(value=curveintegral_settings.sample_functions[
    curveintegral_settings.init_fun_key][0],
                    title="fx(x,y):")
v_input = TextInput(value=curveintegral_settings.sample_functions[
    curveintegral_settings.init_fun_key][1],
                    title="fy(x,y):")

u_input.on_change('value', function_change)
v_input.on_change('value', function_change)

# text input for input of the parametrized curve [cx(t),cy(t)]
cx_input = TextInput(value=curveintegral_settings.sample_curves[
    curveintegral_settings.init_curve_key][0],
                     title="cx(t):")
cy_input = TextInput(value=curveintegral_settings.sample_curves[
    curveintegral_settings.init_curve_key][1],
                     title="cy(t):")
# slider controlling the parameter t
parameter_input = Slider(title="t",
                         value=curveintegral_settings.parameter_input_init,
                         start=curveintegral_settings.parameter_min,
                         end=curveintegral_settings.parameter_max,
                         step=curveintegral_settings.parameter_step)
コード例 #34
0
ファイル: temp.py プロジェクト: baileymichele/Owlet-code
def set_up_widgets(p, source, source1, source2, source3, source4, source5, df,
                   all_dates, text_dsn):
    '''Set up widgets needed after an initial dsn is entered'''
    dsn = text_dsn.value
    # Set up widgets
    text_title = TextInput(title="Title:", value="{} Data".format(dsn))
    text_save = TextInput(title="Save As:", value=dsn)

    max_for_dsn = df.timestamp.max()
    min_day = date_of_file(
        all_dates[0])  # works if sorted, needs to be formatted...
    max_day = date_of_file(all_dates[-1])

    plus_one = max_for_dsn + timedelta(days=1)
    calendar = DatePicker(title="Day:",
                          value=date(plus_one.year, plus_one.month,
                                     plus_one.day),
                          max_date=max_day,
                          min_date=min_day)
    button = Button(label="Update", button_type="success")

    columns = [
        TableColumn(field="day",
                    title="Date",
                    formatter=DateFormatter(format="%m/%d/%Y")),
        TableColumn(field="hr",
                    title="Avg HR",
                    formatter=NumberFormatter(format="0.0")),
        TableColumn(field="o2",
                    title="Avg O2",
                    formatter=NumberFormatter(format="0.0")),
        TableColumn(field="temp",
                    title="Avg Temp",
                    formatter=NumberFormatter(format="0.0"))
    ]

    # table_title = Div(text="""Daily Averages:""", width=200)
    # daily_avg = get_daily_avgs(df_full)
    # data =  {
    #     'day' : daily_avg.index,
    #     'hr' : daily_avg.heart_rate_avg,
    #     'o2' : daily_avg.oxygen_avg,
    #     'temp' : daily_avg.skin_temperature
    # }
    # table_source = ColumnDataSource(data=data)
    # data_table = DataTable(source=table_source, columns=columns, width=280, height=180, index_position=None)
    # export_png(data_table, filename="table.png")
    # save_table = Button(label='Save Daily Averages Table', button_type="primary")

    # Set up callbacks
    def update_title(attrname, old, new):
        p.title.text = text_title.value

    def update_save(attrname, old, new):
        p.tools[0].save_name = text_save.value

    def update():
        text_dsn.value = text_dsn.value.strip(" ")  # Get rid of extra space
        # Make sure time is valid

        update_data(p,
                    source,
                    source1,
                    source2,
                    source3,
                    source4,
                    source5,
                    df,
                    text_dsn.value,
                    all_dates,
                    date=str(calendar.value))

        # Title/save update dsn
        text_title.value = text_dsn.value + " Data"
        text_save.value = text_dsn.value

    def save():
        export_png(data_table,
                   filename="{}_averages.png".format(text_dsn.value))

    text_title.on_change('value', update_title)
    text_save.on_change('value', update_save)

    button.on_click(update)
    button.js_on_click(CustomJS(args=dict(p=p), code="""p.reset.emit()"""))

    # save_table.on_click(save)

    # Set up layouts and add to document
    inputs = widgetbox(text_title, text_save, calendar,
                       button)  #, table_title, data_table, save_table)

    curdoc().add_root(row(inputs, p, width=1300))
コード例 #35
0
            P2_experiment.append(float(j[3]))

      
        P0_experiment_source.data = dict(x=t, y=P0_experiment)
        P1_experiment_source.data = dict(x=t, y=P1_experiment)
        P2_experiment_source.data = dict(x=t, y=P2_experiment)
    
    except:
        print "could not open file, doesnt exist"

    
    



file_name.on_change('value', update_exp_data)
path.on_change('value', update_exp_data)

def update_data(attrname, old, new):
    global t 

    delta = 2 * math.pi * 17.5 * 10 ** 3

    # Get the current slider values
    eta = etta.value
    pi_time = Pi_time.value
    nbar = n_bar.value
    
    omega = math.pi / (pi_time * 10 ** (-6))
    lamda = 1.0*(eta*omega)**2/delta
    kai = 1.0*(eta*omega/delta)**2
コード例 #36
0
class SlidersApp(HBox):
    """The main app, where parameters and controllers are defined."""
    extra_generated_classes = [["SlidersApp", "SlidersApp", "HBox"]]
    ## read the BTC price data
    raw_price = pd.read_csv(os.path.join(input_folder, "price.csv"), names=['time', 'price'], 
                       index_col='time', parse_dates=[0], 
                       date_parser=lambda x: datetime.strptime(x, "%Y-%m-%dT%H:%M:%S"))
    raw_price['time_index'] = raw_price.index
    raw_price.drop_duplicates(subset='time_index', take_last=True, inplace=True)
    del raw_price['time_index']
    ## downsample to 12h data
    price_data = pd.DataFrame(raw_price.resample('12h', how='ohlc').ix[:, 3])
    price_data.columns = ['price']    
    ## data source
    source = Instance(ColumnDataSource)
    ## inputs
    inputs = Instance(VBoxForm)
    text = Instance(TextInput)
    threshold = Instance(Slider)
    ## plots
    plot = Instance(Plot)

    @classmethod
    def create(cls):
        """One-time creation of app's objects.

        This function is called once, and is responsible for
        creating all objects (plots, datasources, etc)
        """
        ## create the obj of the app
        obj = cls()
        obj.inputs = VBoxForm()
        ## create input widgets
        obj.make_inputs()
        ## create data source
        obj.make_source()       
        ##
        obj.make_plots()
        ## layout
        obj.set_children()
        return obj

    def make_inputs(self):
        self.text = TextInput(
            title="Title", name='title', value='BTC chart with news tag'
        )
        self.threshold = Slider(
            title="Threshold", name='threshold',
            value=0.0, start=-1.0, end=1.0, step=0.1
        )

    def make_source(self):
        self.source = ColumnDataSource(data=self.df)           

    def make_plots(self):
        toolset = "crosshair,pan,reset,resize,save,wheel_zoom"
        ## fixed time index limit (epoch)
        start_time = (datetime.strptime("1/1/12 16:30", "%d/%m/%y %H:%M") - datetime(1970,1,1)).total_seconds()*1000
        end_time = (datetime.strptime("1/5/15 16:30", "%d/%m/%y %H:%M") - datetime(1970,1,1)).total_seconds()*1000
        ## Generate a figure container        
        plot = figure(title_text_font_size="12pt",
                      plot_height=600,
                      plot_width=1100,
                      tools=toolset,
                      title=self.text.value,
                      # title="BTC chart with news",
                      x_axis_type="datetime",
                      x_range=[start_time, end_time],
                      y_range=[0, 1300]
        )        
        plot.below[0].formatter.formats = dict(years=['%Y'], months=['%b %Y'], days=['%d %b %Y'])
        ## the price line plot
        plot.line(
            self.price_data.index, self.price_data['price'],
            # color='#A6CEE3',
            legend='BTC Price'
        )      
        ## the news tag plot
        plot.circle('time', 'price',
                    source=self.source,
                    fill_color=self.source.data['color'],
                    legend="News", 
                    size=8,
        )
        self.plot = plot

    def set_children(self):
        self.children = [self.inputs, self.plot]
        self.inputs.children = [self.text, self.threshold]

    def setup_events(self):
        """Attaches the on_change event to the value property of the widget.

        The callback is set to the input_change method of this app.
        """
        super(SlidersApp, self).setup_events()
        # if not self.text:
            # return
        # Text box event registration
        if self.text:
            self.text.on_change('value', self, 'input_change')
        # Slider event registration
        if self.threshold:
            self.threshold.on_change('value', self, 'input_change')

    def input_change(self, obj, attrname, old, new):
        """Executes whenever the input form changes.

        It is responsible for updating the plot, or anything else you want.

        Args:
            obj : the object that changed
            attrname : the attr that changed
            old : old value of attr
            new : new value of attr
        """
        self.update_data()
        curdoc().add(self)

    def update_data(self):
        """Called each time that any watched property changes.

        This updates the sin wave data with the most recent values of the
        sliders. This is stored as two numpy arrays in a dict into the app's
        data source property.
        """
        self.make_source()
        self.make_plots()
        self.set_children()
        # x = news_price.index
        # y = news_price['price']
        # logging.debug(
        #     "Threshold: %f" % self.threshold.value
        # )
        ## plug back to source of the obj
        # self.source.data = dict(x=x, y=y, color=news_price['color'])

    @property
    def df(self):
        return get_data(self.threshold.value)
コード例 #37
0
ファイル: main.py プロジェクト: yanboshu/paperweekly_bot
    current = df[df.content.str.contains(
        search_text.value.strip()) == True]  # df[""]
    source.data = {
        'group_name': current.group_name,
        'group_user_name': current.group_user_name,
        'content': current.content,
        'user_img': current.user_img,
        'createdAt': current.createdAt,
    }


search_text = TextInput(title="search")
#search_text_val = search_text.value.strip()
#slider = Slider(title="开始时间", start=10000, end=250000, value=150000, step=1000)
#slider.on_change('value', lambda attr, old, new: update())
search_text.on_change('value', lambda attr, old, new: update())
'''
button = Button(label=u"搜索", button_type="success")
button.callback = CustomJS(args=dict(source=source),
                           code=open(join(dirname(__file__), "download.js")).read())

'''

columns = [
    TableColumn(field="group_name", title="Group Name"),
    TableColumn(field="group_user_name", title="Group User Name"),
    TableColumn(field="content", title="Content"),
    TableColumn(field="user_img", title="User Img"),
    TableColumn(field="createdAt", title="createdAt"),
    #TableColumn(field="salary", title="Income", formatter=NumberFormatter(format="$0,0.00")),
]
コード例 #38
0
                     ])
    df = df[1:]
    df['failure'] = [1 if b == 1 or b == '1' else 0 for b in df.failure]
    y = df.failure
    x = df.drop('failure', axis=1)
    x_train_original, x_test_original, y_train_original, y_test_original = train_test_split(
        x, y, test_size=N)
    clf = svm.LinearSVC(loss='l2', penalty='l1', dual=False)
    clf.fit(x_train_original, y_train_original)
    predictions = clf.predict(x_test_original)
    print("Accuracy =", accuracy_score(y_test_original, predictions))
    print(np.unique(predictions))
    tn, fp, fn, tp = confusion_matrix(y_test_original, predictions).ravel()
    print("True Negative:", tn)
    print("False Positive:", fp)
    print("False Negative:", fn)
    print("True Positive:", tp)
    #newdict = {'values':[tn,fp,tn,fn], 'names':['TP','FP','TN',"FN"]
    newdict = {'values': ['TP', 'FN', 'TN', 'FP'], 'timing': [tp, fp, fn, tn]}
    data.update(newdict)
    print(data)


input.on_change('value', update_points)

layout = column(row(input, width=400), row(bar))

#show(layout)

curdoc().add_root(layout)
コード例 #39
0
def main():
    df = pd.read_csv("nlp_entities.csv")
    per_df = pd.read_csv("Person.csv")
    pla_df = pd.read_csv("Place.csv")
    org_df = pd.read_csv("Org.csv")

    entities = ["Person", "Place", "Org"]
    relations = [["Person_Org","Person_Place","Person_Person"],["Place_Person","Place_Org","Place_Place"],["Org_Person","Org_Place","Org_Org"]]
    dfs = [per_df, pla_df,org_df]

    button_group = RadioButtonGroup(labels=entities, active=0)
    slider = Slider(title="by Index", value=0, start=0, end=len(per_df.index)-1, step=1)
    text = TextInput(title="by Name:", value='')
    select1 = Select(title="y axis:", value="Person", options=["Person", "Place", "Org"])

    files = per_df.iloc[0]["Documents"]
    d = []
    y_n = []
    headlines = []
    datestr = []
    if files == "None":
        files = []
    else:
        files = ast.literal_eval(files)
        for f in files:
            v_d = df.loc[df["filename"] == f]["date"].values
            datestr.append(v_d[0])
            d.append(datetime.strptime(v_d[0],"%m/%d/%Y"))
            v_p = df.loc[df["filename"] == f]["Person"].values
            y_n.append(len(ast.literal_eval(v_p[0])))
            v_h = df.loc[df["filename"] == f]["headlines"].values
            headlines.append(v_h[0])
    dates = np.array(d)
    n = np.array(y_n)
    headlines = np.array(headlines)
    name = per_df.iloc[0]["Person"]

    # create the timeline plot for the searched entity
    s_source = ColumnDataSource(data=dict(x = dates, y = n, f = files, h =headlines, d = datestr))
    TOOLS = "box_select,pan,box_zoom,reset"
    f1 = figure(tools=TOOLS, plot_width=600, plot_height=450, title="Timeline for '" + name + "'",
                x_axis_type='datetime', x_axis_label='Time', y_range=[-50,250],
                y_axis_label='number in total', output_backend="webgl")
    c1 = f1.circle('x', 'y', source=s_source, size=10, color="red", alpha=0.5)
    hover1 = HoverTool(
        tooltips=[
            ("document: ", "@f"),
            ("date", "@d"),
            ("headline: ", "@h")
        ]
    )
    f1.add_tools(hover1)
    f1.x_range = Range1d(datetime.strptime('01/01/2002',"%m/%d/%Y"),datetime.strptime('12/31/2004',"%m/%d/%Y"))

    def update_y(attr,old,new):
        y = select1.value
        new_y = []
        for f in s_source.data["f"]:
            v = df.loc[df["filename"] == f][y].values
            new_y.append(len(ast.literal_eval(v[0])))
        new_y = np.array(new_y)
        s_source.data["y"] = new_y
        return

    select1.on_change("value",update_y)

    # create interactive network graph for timeline plot
    s1 = ColumnDataSource(data=dict(xs=[], ys=[]))
    s2 = ColumnDataSource(data=dict(vx=[], vy=[], labels=[], color=[]))
    n1 = figure(plot_width=500, plot_height=500, x_axis_type=None, y_axis_type=None,
                outline_line_color=None, tools="pan,box_zoom,reset,save",title="Related Entities of Selected Files",
                output_backend="webgl")
    n1.multi_line('xs', 'ys', line_color="blue", source=s1, alpha=0.3)
    c2 = n1.circle('vx', 'vy', size=20, line_color="black", fill_color='color', source=s2, alpha=0.5)
    n1.text('vx', 'vy', text='labels', text_color="black", text_font_size="10px", text_align="center",
            text_baseline="middle", source=s2)

    # update network graph when the files are selected
    def update_network1(attr, old, new):
        inds = new['1d']['indices']
        nodes = []
        edges = []
        t_t_edges = []
        selected = []
        if inds == []:
            s1.data = dict(xs=[], ys=[])
            s2.data = dict(vx=[], vy=[], labels=[], color=[])
            return
        for i in inds:
            f = s_source.data["f"][i]
            v = df.loc[df["filename"] == f]["all_nodes"].values
            n = ast.literal_eval(v[0])
            selected.append(f)
            nodes = list(set(nodes).union(n))
            e = ast.literal_eval(df.loc[df["filename"] == f]["all_edges"].values[0])
            edges = list(set(edges).union(e))
            t_t_e = ast.literal_eval(df.loc[df["filename"] == f]["file_neighbors_edge"].values[0])
            t_t_edges = list(set(t_t_edges).union(t_t_e))
        t_t = list(itertools.combinations(selected, 2))
        for t in t_t:
            if (t in t_t_edges) or ((t[1],t[0])in t_t_edges):
                edges.append(t)
        new_dict = create_graph(nodes, edges, selected)
        s1.data = new_dict["s1"]
        s2.data = new_dict["s2"]
        return

    c1.data_source.on_change("selected", update_network1)

    # create person-place network graph
    v1 = per_df.at[0, "Person_Place"]
    edges_1 = ast.literal_eval(v1)
    nodes_1 = [name]
    for e in edges_1:
        nodes_1.append(e[1])

    g1 = create_graph(nodes_1,edges_1,[name])
    s3 = ColumnDataSource(data=g1["s1"])
    s4 = ColumnDataSource(data=g1["s2"])
    n2 = figure(plot_width=400, plot_height=400, x_axis_type=None, y_axis_type=None,
                outline_line_color=None, tools="pan,box_zoom,reset,save", title="Person_Place Relationships for Person: " + name,
                output_backend="webgl")
    n2.multi_line('xs', 'ys', line_color="blue", source=s3, alpha=0.3)
    c3 = n2.circle('vx', 'vy', size=20, line_color="black", fill_color='color', source=s4, alpha=0.5)
    n2.text('vx', 'vy', text='labels', text_color="black", text_font_size="8px", text_align="center",
            text_baseline="middle", source=s4)

    # create person-org network graph
    v2 = per_df.at[0, "Person_Org"]
    edges_2 = ast.literal_eval(v2)
    nodes_2 = [name]
    for e in edges_2:
        nodes_2.append(e[1])

    g2 = create_graph(nodes_2, edges_2, [name])
    s5 = ColumnDataSource(data=g2["s1"])
    s6 = ColumnDataSource(data=g2["s2"])
    n3 = figure(plot_width=400, plot_height=400, x_axis_type=None, y_axis_type=None,
                outline_line_color=None, tools="pan,box_zoom,reset,save", title="Person_Org Relationships for Person: " + name,
                output_backend="webgl")
    n3.multi_line('xs', 'ys', line_color="blue", source=s5, alpha=0.3)
    c4 = n3.circle('vx', 'vy', size=20, line_color="black", fill_color='color', source=s6, alpha=0.5)
    n3.text('vx', 'vy', text='labels', text_color="black", text_font_size="8px", text_align="center",
            text_baseline="middle", source=s6)

    # create person-org network graph
    v3 = per_df.at[0, "Person_Person"]
    edges_3 = ast.literal_eval(v3)
    nodes_3 = [name]
    for e in edges_3:
        nodes_3.append(e[1])

    g3 = create_graph(nodes_3, edges_3, [name])
    s7 = ColumnDataSource(data=g3["s1"])
    s8 = ColumnDataSource(data=g3["s2"])
    n4 = figure(plot_width=400, plot_height=400, x_axis_type=None, y_axis_type=None,
                outline_line_color=None, tools="pan,box_zoom,reset,save",
                title="Person_Person Relationships for Person: " + name,
                output_backend="webgl")
    n4.multi_line('xs', 'ys', line_color="blue", source=s7, alpha=0.3)
    c5 = n4.circle('vx', 'vy', size=20, line_color="black", fill_color='color', source=s8, alpha=0.5)
    n4.text('vx', 'vy', text='labels', text_color="black", text_font_size="8px", text_align="center",
            text_baseline="middle", source=s8)


    #update visualizations when button group changes
    def button_group_update(attr, old, new):
        b = button_group.active
        entity = entities[b]
        d = dfs[b]
        t = text.value
        #change slider
        slider.end = len(d.index) - 1
        if (slider.value>slider.end):
            slider.value = slider.end

        if t == '':
            slider_update(attr, old, new)
        else:
            text_update(attr, old, new)

    button_group.on_change("active", button_group_update)

    def slider_update(attr,old,new):
        text.value = ''

        b = button_group.active
        entity = entities[b]
        d = dfs[b]
        s = slider.value

        # clear the visualizations
        sources = [[s1, s2], [s3, s4], [s5, s6], [s7, s8]]
        networks = [n1, n2, n3, n4]
        s_source.data = dict(x=[], y=[], f=[], h=[], d=[])
        for i in range(4):
            sources[i][0].data = dict(xs=[], ys=[])
            sources[i][1].data = dict(vx=[], vy=[], labels=[], color=[])

        name = d.iloc[s][entity]

        # update cooccurence relationships
        for i in range(3):
            v = d.at[s, relations[b][i]]
            edges = ast.literal_eval(v)
            nodes = [name]
            for e in edges:
                nodes.append(e[1])
            g = create_graph(nodes, edges, [name])
            sources[i + 1][0].data = g["s1"]
            sources[i + 1][1].data = g["s2"]
            networks[i + 1].title.text = relations[b][i] + " Relationships for " + entity + ": " + name

        #update timeline plot
        files = d.iloc[s]["Documents"]
        dates = []
        y_n = []
        headlines = []
        datestr = []
        select = select1.value
        if files == "None":
            files = []
            f1.title.text = "No files found for '" + name + "'"
        else:
            files = ast.literal_eval(files)
            for f in files:
                v_d = df.loc[df["filename"] == f]["date"].values
                datestr.append(v_d[0])
                dates.append(datetime.strptime(v_d[0], "%m/%d/%Y"))
                v_p = df.loc[df["filename"] == f][select].values
                y_n.append(len(ast.literal_eval(v_p[0])))
                v_h = df.loc[df["filename"] == f]["headlines"].values
                headlines.append(v_h[0])
        dates = np.array(dates)
        n = np.array(y_n)
        headlines = np.array(headlines)
        s_source.data = dict(x=dates, y=n, f=files, h=headlines, d=datestr)
        f1.title.text = "Timeline for '" + name + "'"
        return

    slider.on_change("value", slider_update)

    def text_update(attr,old,new):
        b = button_group.active
        entity = entities[b]
        print(entity)
        d = dfs[b]
        t = text.value
        slider.value = 0

        # clear the visualizations
        sources = [[s1, s2], [s3, s4], [s5, s6], [s7, s8]]
        networks = [n1, n2, n3, n4]
        s_source.data = dict(x=[], y=[], f=[], h=[], d=[])
        for i in range(4):
            sources[i][0].data = dict(xs=[], ys=[])
            sources[i][1].data = dict(vx=[], vy=[], labels=[], color=[])

        name = t
        if t in np.array(d[entity]):
            s = d[d[entity] == t].index.tolist()[0]
        else:
            f1.title.text = "No such " + entity + " in the datasets"
            for x in networks:
                x.title.text = "No such " + entity + " in the datasets"
            return

        # update cooccurence relationships
        for i in range(3):
            v = d.at[s, relations[b][i]]
            edges = ast.literal_eval(v)
            nodes = [name]
            for e in edges:
                nodes.append(e[1])
            g = create_graph(nodes, edges, [name])
            sources[i + 1][0].data = g["s1"]
            sources[i + 1][1].data = g["s2"]
            networks[i + 1].title.text = relations[b][i] + " Relationships for " + entity + ": " + name

        # update timeline plot
        files = d.iloc[s]["Documents"]
        dates = []
        y_n = []
        headlines = []
        datestr = []
        select = select1.value
        if files == "None":
            files = []
            f1.title.text = "No files found for '" + name + "'"
        else:
            files = ast.literal_eval(files)
            for f in files:
                v_d = df.loc[df["filename"] == f]["date"].values
                datestr.append(v_d[0])
                dates.append(datetime.strptime(v_d[0], "%m/%d/%Y"))
                v_p = df.loc[df["filename"] == f][select].values
                y_n.append(len(ast.literal_eval(v_p[0])))
                v_h = df.loc[df["filename"] == f]["headlines"].values
                headlines.append(v_h[0])
        dates = np.array(dates)
        n = np.array(y_n)
        headlines = np.array(headlines)
        s_source.data = dict(x=dates, y=n, f=files, h=headlines, d=datestr)
        f1.title.text = "Timeline for '" + name + "'"
        return

    text.on_change("value",text_update)

    widgets = widgetbox(button_group,slider,text)
    layout = column(widgets,row(n2,n3,n4),select1,row(f1,n1))

    curdoc().add_root(layout)
    show(layout)
コード例 #40
0
        clf = KNeighborsClassifier(n_neighbors=Kn.value, p=Kd.value)
        n_feats = f.shape[1]
        y = []
        for i in range(n_feats):
            b_trs = b_tr.drop([i], axis=1)
            b_tss = b_ts.drop([i], axis=1)
            clf.fit(b_trs, y_tr)
            y.append(knn.score(b_tss, y_ts))
    source.data = dict(x=x, y=y)


controls = [Test, Plot, model, Kn, Kd, Rn, Rmd, Rmf]

for control in controls[1:len(controls)]:
    control.on_change('value', lambda attr, old, new: update(f))
Test.on_change('value', lambda attr, old, new: call())
inputs = column(*controls, width=320, height=1000)
inputs.sizing_mode = "fixed"
l = layout([
    [desc],
    [inputs, plot],
], sizing_mode="scale_both")

#plot = figure(y_range=(0, 2),plot_width=400, plot_height=400)
#plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)

#layout=column(plot,n)
#n.on_change('value', lambda attr, old, new: call())
call()
update(f)
curdoc().add_root(l)
コード例 #41
0
                              )

T_select = Slider(start=0.0,
                  end=1000.0,
                  step=1.0,
                  value=10.0,
                  title='T'
                  )

r0_select = TextInput(value='1.00', title='r0')
drdt0_select = TextInput(value='0.00', title='dr/dt_0')
theta0_select = TextInput(value='0.00', title='theta0')
dthetadt0_select = TextInput(value='1.00', title='dtheta/dt_0')

# Update the plot when yields are changed
mass_select.on_change('value', update)
mass_attractive_center_select.on_change('value', update)
mass_attractive_center_select.on_change('value', update)
exponent_select.on_change('value', update)

n_steps_ode_select.on_change('value', update)
solver_method_select.on_change('value', update)
T_select.on_change('value', update)

r0_select.on_change('value', update)
drdt0_select.on_change('value', update)
theta0_select.on_change('value', update)
dthetadt0_select.on_change('value', update)

div = Div(text='', width=150, height=50)
コード例 #42
0
ファイル: main.py プロジェクト: sourabhsc/NULIRG_fit_app
         size=25,
         color="red",
         alpha=2.0)

    p = gridplot([[p1, p2, p6], [p3, p4, p5], [p7]])

    return row(
        p,
        column(widgetbox(button),
               widgetbox(table_values(ulirg, x_plot, y_plot)), widgetbox(pre)))


def update(attr, old, new):

    layout.children[1] = row(create_figure())


ulirg = Select(title='Select ULIRG:', value="1", options=ulirg_num)
ulirg.on_change('value', update)

x_plot = TextInput(value="60", title="x_value:")
x_plot.on_change('value', update)

y_plot = TextInput(value="75", title="y_value:")
y_plot.on_change('value', update)

controls = column(widgetbox([ulirg, x_plot, y_plot]), width=250)
layout = row(controls, create_figure())

curdoc().add_root(layout)
curdoc().title = "LYA ULIRG plotting "
コード例 #43
0
scalemax = Slider(title="Singles Scale maximum",
                  value=10.0,
                  start=-100.0,
                  end=100.0,
                  step=1)
statsA = Paragraph(text="100", width=400, height=40)
statsB = Paragraph(text="100", width=400, height=40)


# Set up callbacks
def send_command(attrname, old, new):
    #TODO turn into a raw command area for sending any device command
    plot.title.text = command.value


command.on_change('value', send_command)

last_time = time.time()


def update_data():
    # TODO: store data in a stream for charting vs time

    global last_time
    T = time.time() - last_time
    last_time = time.time()
    #print(T)

    # get data:
    if realInstrument:
        data = inst.get_data("CH2")  # select channel later on
コード例 #44
0
ファイル: mc_bokeh.py プロジェクト: steve98654/bokeh-demos
class StockApp(VBox):
    extra_generated_classes = [["StockApp", "StockApp", "VBox"]]
    jsmodel = "VBox"

    Y = Instance(ColumnDataSource)

    # plots
    hist1 = Instance(Plot)
    hist2 = Instance(Plot)
    hist3 = Instance(Plot)

    # data source
    source = Instance(ColumnDataSource)
    risk_source = Instance(ColumnDataSource)

    # layout boxes
    mainrow = Instance(HBox)
    ticker1_box = Instance(HBox)
    ticker2_box = Instance(HBox)
    ticker3_box = Instance(HBox)
    ticker4_box = Instance(HBox)
    ticker5_box = Instance(HBox)
    second_row = Instance(HBox)
    histrow = Instance(HBox)

    # inputs
    ticker1  = String(default="1.2*(1.1-x)")
    ticker1p = String(default="-1.2")
    ticker2 = String(default="4.0")
    ticker2p = String(default="0.0")
    ticker3 = String(default="500")
    ticker3_1 = String(default="252")
    ticker3_2 = String(default="0.01")
    ticker4 = String(default="100")
    ticker4_1 = String(default="1.01")
    ticker4_2 = String(default="Milstein")
    button = String(default="")
    ticker1_select = Instance(TextInput)
    ticker1p_select = Instance(TextInput)
    ticker2_select = Instance(TextInput)
    ticker2p_select = Instance(TextInput)
    ticker3_select = Instance(TextInput)
    ticker3_1_select = Instance(TextInput)
    ticker3_2_select = Instance(TextInput)
    ticker4_select = Instance(TextInput)
    ticker4_1_select = Instance(TextInput)
    ticker4_2_select = Instance(Select)
    button_select = Instance(TextInput)
    input_box = Instance(VBoxForm)

    def __init__(self, *args, **kwargs):
        super(StockApp, self).__init__(*args, **kwargs)
        self._dfs = {}

    @classmethod
    def create(cls):
        """
        This function is called once, and is responsible for
        creating all objects (plots, datasources, etc)
        """
        # create layout widgets
        obj = cls()
        obj.mainrow = HBox()
        obj.ticker1_box = HBox(width=500)
        obj.ticker2_box = HBox(width=500)
        obj.ticker3_box = HBox(width=467)
        obj.ticker4_box = HBox(width=500)
        obj.ticker5_box = HBox(width=500)
        obj.second_row = HBox()
        obj.histrow = HBox()
        obj.input_box = VBoxForm(width=600)

        # create input widgets
        obj.make_inputs()

        # outputs
        #obj.make_source()
        obj.main_mc(252,500,0.01,'Milstein',1.01)
        obj.make_plots()

        # layout
        obj.set_children()
        return obj

    def make_inputs(self):

        self.ticker1_select = TextInput(
            name='ticker1',
            title='Drift Function:',
            value='1.2*(1.1-x)',
        )
        self.ticker1p_select = TextInput(
            name='ticker1p',
            title='Drift Derivative:',
            value='-1.2',
        )
        self.ticker2_select = TextInput(
            name='ticker2',
            title='Volatility Function:',
            value='4.0',
        )
        self.ticker2p_select = TextInput(
            name='ticker2p',
            title='Volatility Derivative:',
            value='0.0',
        )
        self.ticker3_select = TextInput(
            name='ticker3',
            title='Number of Paths:',
            value='500'
        )
        self.ticker3_1_select = TextInput(
            name='ticker3_1',
            title='Number of Points:',
            value='252'
        )
        self.ticker3_2_select = TextInput(
            name='ticker3_2',
            title='Time Step:',
            value='0.01'
        )
        self.ticker4_select = TextInput(
            name='ticker4',
            title='Histogram Line:',
            value='100'
        )
        self.ticker4_1_select = TextInput(
            name='ticker4_1',
            title='Initial Value:',
            value='1.01'
        )
        self.ticker4_2_select = Select(
            name='ticker4_2',
            title='MC Scheme:',
            value='Milstein',
            options=['Euler','Milstein', 'Pred/Corr']
        )
        self.button_select = TextInput(
            name='button',
            title='Type any word containing "run" to run Simulation ',
            value = ''
        )

    
    def make_source(self):
        self.source = ColumnDataSource(data=self.Y)

    def main_mc(self,num_pts,num_paths, delta_t, method, Y0):
        def a(x):
            return eval(self.ticker1)
        def ap(x):
            return eval(self.ticker1p)
        def b(x): 
            return eval(self.ticker2)
        def bp(x):
            return eval(self.ticker2p)

        rpaths = np.random.normal(0, delta_t, size=(num_pts,num_paths))
        Y = np.array([[Y0]*num_paths]) 
        dt_vec = np.array([delta_t]*num_paths)

        if method == 'Milstein':
            for i in xrange(0,num_pts):
                tY = Y[-1,:]
                dW = rpaths[i,:]
                Y = np.vstack([Y, tY + a(tY)*dt_vec + b(tY)*dW + 0.5*b(tY)*bp(tY)*(dW*dW-dt_vec)])

        elif method == 'Pred/Corr':
            # Predictor corrector method is taken from equation 2.6 in this paper:
            # http://www.qfrc.uts.edu.au/research/research_papers/rp222.pdf
            rpaths2 = np.random.normal(0, delta_t, size=(num_pts,num_paths))

            for i in xrange(0,num_pts):
                tY = Y[-1,:]
                Ybar = tY + a(tY)*dt_vec + b(tY)*rpaths[i,:]
                abar_before = a(tY) - 0.5*b(tY)*bp(tY)  
                abar_after = a(Ybar) - 0.5*b(Ybar)*bp(Ybar)  
                Y = np.vstack([Y, tY + 0.5*(abar_before + abar_after)*dt_vec + 0.5*(b(tY)+b(Ybar))*rpaths2[i,:]])

        else:  # default to Euler Scheme 
            for i in xrange(0,num_pts):
                tY = Y[-1,:]
                Y = np.vstack([Y, tY + a(tY)*dt_vec + b(tY)*rpaths[i,:]])
    
        return Y  # return simulated paths 
    
    def path_plot(self):
        num_paths_plot = min(50,int(self.ticker3))
        hist_point = int(self.ticker4)
        #print 'Hist Point ', hist_point
        Y = self.Y.as_matrix()
        pltdat = Y[:,0:num_paths_plot]
        mY, MY = min(Y[hist_point,:]), max(Y[hist_point,:])

        plt.plot(pltdat, alpha=0.1, linewidth=1.8)
        sns.tsplot(pltdat.T,err_style='ci_band', ci=[68,95,99], alpha=1, \
                linewidth = 2.5, color='indianred')
        #sns.tsplot(pltdat.T,err_style='ci_band', ci=[68,95,99,99.99999], alpha=1, \
        #        linewidth = 2.5, condition='Mean Path', color='indianred')
        plt.plot([hist_point, hist_point], [0.99*mY, 1.01*MY], 'k-',label='Time Series Histogram')
        plt.xlabel('Time Step')
        plt.ylabel('Price')
        #plt.legend()

        p = mpl.to_bokeh()
        p.title = 'Mean Path (Red), MC Paths (Background) and Density Line (Black)'
        p.title_text_font_size= str(TITLE_SIZE)+'pt'

        return p

    def hist_den_plot(self):
        Y = self.Y.as_matrix()
        hist_point = int(self.ticker4)
        delta_t = float(self.ticker3_2)

        data = Y[hist_point,:]
        sns.distplot(data, color='k', hist_kws={"color":"b"}, norm_hist=True)
        #sns.distplot(data, color='k', hist_kws={"color":"b"})
        plt.hist(data)
        plt.title('Distribution at T = ' + str(np.round(delta_t*hist_point,4)) + ' with Mean: ' +str(np.round(np.mean(data),4)) + ' and Std Dev: ' + str(np.round(np.std(data),4)))
        plt.xlabel('Price Bins')
        plt.ylabel('Bin Count')
       
        p = mpl.to_bokeh()
        p.title_text_font_size= str(TITLE_SIZE)+'pt'

        return p

    def mc_results(self):
        # Compute Monte Carlo results 
        Y = self.Y.as_matrix()
        Y0 = float(self.ticker4_1)
        hist_point = int(self.ticker4)
        num_paths = int(self.ticker3)

        center_point = np.mean(Y[hist_point,:])
        stkgrid = np.linspace(0.5*center_point,1.5*center_point,100)
        meanlst = np.array([])
        stdlst  = np.array([])
        paylst  = np.array([])

        for stk in stkgrid:
            meanlst = np.append(meanlst, np.mean(payoff(Y[hist_point,:],stk)))
            stdlst = np.append(stdlst,np.std(payoff(Y[hist_point,:],stk))/np.sqrt(num_paths))

        plt.plot(stkgrid,meanlst+2*stdlst, 'g-')
        plt.plot(stkgrid,meanlst-2*stdlst,'g-',label='2-Sig Error')
        plt.plot(stkgrid,meanlst+stdlst,'r-')
        plt.plot(stkgrid,meanlst-stdlst,'r-',label='1-Sig Error')
        plt.plot(stkgrid,meanlst,'b',label='Mean')
        plt.title('MC Option Price (Blue) with 1-Sig (Red) and 2-Sig (Green) Errors')
        plt.xlabel('Strike')
        plt.ylabel('Value')

        p = mpl.to_bokeh()
        p.title_text_font_size= str(TITLE_SIZE)+'pt'

        return p

    def hist_plot(self):
        histdf = pd.DataFrame(np.random.randn(100, 4), columns=list('ABCD'))
        #pltdf = self.source.to_df().set_index('date').dropna()
        #qlow, qhigh = mquantiles(pltdf[ticker],prob=[0.01,0.99]) 
        #tdf = pltdf[ticker]
        #histdf = tdf[((tdf > qlow) & (tdf < qhigh))]
        hist, bins = np.histogram(histdf, bins=50)
        width = 0.7 * (bins[1] - bins[0])
        center = (bins[:-1] + bins[1:]) / 2
        start = bins.min()
        end = bins.max()
        top = hist.max()

        p = figure(
            title=' Histogram',
            plot_width=600, plot_height=400,
            tools="",
            title_text_font_size="16pt",
            x_range=[start, end],
            y_range=[0, top],
            x_axis_label = ' Bins',
            y_axis_label = 'Bin Count' 
        )
        p.rect(center, hist / 2.0, width, hist)
        return p

    def make_plots(self):

        self.hist_plots()

    def hist_plots(self):
        self.hist1 = self.path_plot()
        self.hist2 = self.hist_den_plot()
        self.hist3 = self.mc_results()

    def set_children(self):
        self.children = [self.mainrow, self.second_row]
        self.mainrow.children = [self.input_box, self.hist1]
        self.second_row.children = [self.hist2, self.hist3]
        self.input_box.children = [self.ticker1_box, self.ticker2_box, self.ticker3_box,self.ticker4_box,self.ticker5_box]
        self.ticker1_box.children =[self.ticker1_select, self.ticker1p_select]
        self.ticker2_box.children =[self.ticker2_select, self.ticker2p_select]
        self.ticker3_box.children =[self.ticker3_select, self.ticker3_1_select, self.ticker3_2_select]
        self.ticker4_box.children =[self.ticker4_select, self.ticker4_1_select, self.ticker4_2_select]
        self.ticker5_box.children =[self.button_select]

    def input_change(self, obj, attrname, old, new):
        if obj == self.ticker4_2_select:
            self.ticker4_2 = new
        if obj == self.ticker4_1_select:
            self.ticker4_1 = new
        if obj == self.ticker4_select:
            self.ticker4 = new
        if obj == self.ticker3_2_select:
            self.ticker3_2 = new
        if obj == self.ticker3_1_select:
            self.ticker3_1 = new
        if obj == self.ticker3_select:
            self.ticker3 = new
        if obj == self.ticker2p_select:
            self.ticker2p = new
        if obj == self.ticker2_select:
            self.ticker2 = new
        if obj == self.ticker1p_select:
            self.ticker1p = new
        if obj == self.ticker1_select:
            self.ticker1 = new
        if obj == self.button_select:
            self.button = new 
            if 'run' in self.button:
                self.make_source()
                self.make_plots()
                self.set_children()
                curdoc().add(self)

        #self.make_source()
        #self.make_plots()
        #self.set_children()
        #curdoc().add(self)

    def setup_events(self):
        super(StockApp, self).setup_events()
        if self.ticker1_select:
            self.ticker1_select.on_change('value', self, 'input_change')
        if self.ticker1p_select:
            self.ticker1p_select.on_change('value', self, 'input_change')
        if self.ticker2_select:
            self.ticker2_select.on_change('value', self, 'input_change')
        if self.ticker2p_select:
            self.ticker2p_select.on_change('value', self, 'input_change')
        if self.ticker3_select:
            self.ticker3_select.on_change('value', self, 'input_change')
        if self.ticker3_1_select:
            self.ticker3_1_select.on_change('value', self, 'input_change')
        if self.ticker3_2_select:
            self.ticker3_2_select.on_change('value', self, 'input_change')
        if self.ticker4_select:
            self.ticker4_select.on_change('value', self, 'input_change')
        if self.ticker4_1_select:
            self.ticker4_1_select.on_change('value', self, 'input_change')
        if self.ticker4_2_select:
            self.ticker4_2_select.on_change('value', self, 'input_change')
        if self.button_select:
            self.button_select.on_change('value',self, 'input_change')

    @property
    def Y(self):
        tmpdf = pd.DataFrame(self.main_mc(int(self.ticker3_1),int(self.ticker3),float(self.ticker3_2),self.ticker4_2,float(self.ticker4_1)))
        tmpdf.columns = ['Col_' + str(i) for i in xrange(len(tmpdf.columns))]
        #print tmpdf
        return tmpdf
コード例 #45
0
ファイル: Test6.py プロジェクト: skianzad/MagicPen
Speed = Slider(title="Speed", value=250, start=100, end=250)
Delay = Slider(title="Delay", value=1, start=1, end=100)

CurveList = [("Sin", "C1"), ("Poly", "C2"), ("Abs", "C3"), ("inv/inf", "C5"),
             ("Frq", "C6"), ("Custom", "C4")]
dropdown = Dropdown(label="Curve Lists", button_type="warning", menu=CurveList)

button = Button(label="Run ", button_type="success")


def update_title(attrname, old, new):
    plot.title.text = text.value
    x = np.linspace(-4 * np.pi, 4 * np.pi, N)


text.on_change('value', update_title)

div = Div(width=1000)


def change_output(attr, old, new):
    global Del
    global Sp
    Del = Delay.value
    Sp = Speed.value


def display_event(div):
    return CustomJS(args=dict(div=div),
                    code="""
    console.log("run")
コード例 #46
0
def load_page(plate):
    '''
    Load new page
    '''
    global well_id 
    well_id = (0, 0)
    
    global sample 
    sample = plate[well_id]
    
    # Button to upload local file
    global file_source
    file_source = ColumnDataSource(data=dict(file_contents = [], file_name = []))
    file_source.on_change('data', file_callback)
    try:
        output_file_name = file_source.data['file_name'] + '-out.csv'
    except:
        output_filename = 'output.csv'
    global upload_button
    upload_button = Button(label="Upload local file", button_type="success", width=200, height=30)
    upload_button.js_on_click(CustomJS(args=dict(file_source=file_source),
                               code=open(join(dirname(__file__), "upload.js")).read()))
    
    # Text boxes for setting fit parameters
    global bottom_set_text
    bottom_set_text = TextInput(value='', title="Set initial value for Fmin", width=200, height=50)
    bottom_set_text.on_change('value', parameter_set_callback)

    global top_set_text
    top_set_text = TextInput(value='', title="Set initial value for Fmax", width=200, height=50)
    top_set_text.on_change('value', parameter_set_callback)
    
    global slope_set_text
    slope_set_text = TextInput(value='', title="Set initial value for a", width=200, height=50)
    slope_set_text.on_change('value', parameter_set_callback)
    
    # Radio button group for setting plate type
    global plate_type_buttons
    global plate_type
    plate_type_buttons = RadioButtonGroup(labels=['96 well', '384 well'], 
                                          width=200, height=25, active=plate_type)
    plate_type_buttons.on_change('active', plate_type_callback)
    
    # Radio button group for setting data layout
    global plate_layout_buttons
    global plate_layout
    plate_layout_buttons = RadioButtonGroup(labels=['by row', 'by column'],
                                           width=200, height=25, active=plate_layout)
    plate_layout_buttons.on_change('active', plate_layout_callback)
    
    # Checkbox groups for fixing fit parameters
    global fix_bottom_checkbox
    fix_bottom_checkbox = CheckboxButtonGroup(labels=['Fix min fluoresence (Fmin)'], 
                                              width=200, height=30)
    fix_bottom_checkbox.on_change('active', parameter_set_callback)
    
    global fix_top_checkbox
    fix_top_checkbox = CheckboxButtonGroup(labels=['Fix max fluorescence (Fmax)'], width=200, height=30)
    fix_top_checkbox.on_change('active', parameter_set_callback)
    
    global fix_slope_checkbox
    fix_slope_checkbox = CheckboxButtonGroup(labels=['Fix curve shape parameter (a)'], width=200, height=30)
    fix_slope_checkbox.on_change('active', parameter_set_callback)
    
    # Slider for selecting data to fit
    global df
    xmin = df[df.columns[0]].values[0]
    xstep = df[df.columns[0]].values[1] - xmin
    xstart = sample.data['x'].values[0]
    xend = sample.data['x'].values[-1]
    xmax = df[df.columns[0]].values[-1]
    
    global range_slider
    range_slider = RangeSlider(start=xmin, end=xmax, value=(xstart, xend),
                    step=xstep,
                    title='Fine tune temperature range', width=550)
    range_slider.on_change('value', slider_callback)
    
    # Scatter plot for fitting individual samples
    global sample_source
    sample_source = ColumnDataSource(data=dict(x=sample.data.x, y=sample.data.y, 
                                        fit=sample.y_fit, residuals=sample.residuals))
    global sample_scatter
    plot_tools = 'wheel_zoom, pan, reset, save'
    sample_scatter = figure(title="Boltzman sigmoidal fit", x_axis_label='Temperature ('+degree_sign+'C)',
                            y_axis_label="Fluoresence intensity", plot_width=600, 
                            plot_height=300, tools=plot_tools)
    sample_scatter.circle(x='x', y='y', color='grey', size=8, alpha=0.6, source=sample_source)
    sample_scatter.line(x='x', y='fit', color='black', line_width=2, 
                        alpha=1.0, source=sample_source)
    sample_scatter.title.text = sample.name + ' fit'
    
    # Scatter plot for residuals of individual sample fit
    global residual_scatter
    residual_scatter = figure(title="Fit residuals", x_axis_label='Temperature ('+degree_sign+'C)',
                              y_axis_label="Residual", plot_width=600, 
                              plot_height=200, tools='wheel_zoom,pan,reset')
    residual_scatter.yaxis.formatter = BasicTickFormatter(precision=2, use_scientific=True)
    residual_scatter.circle('x', 'residuals', size=8, source=sample_source, 
                            color='grey', alpha=0.6)
    
    # Heatmap for displaying all Tm values in dataset
    global plate_source
    letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P']
    w, n, t, e = [], [], [], []
    if plate_type_buttons.active == 1:
        rows = 16
        columns = 24
    else:
        rows = 8
        columns = 12
    
    for i in range(rows):
        for j in range(columns):
            w.append(letters[i]+str(j+1))
            try:
                n.append(plate[(i, j)].name)
                t.append(plate[(i, j)].v50_fit)
                e.append(plate[(i, j)].v50_err)
            except:
                n.append('')
                t.append(np.nan)
                e.append(np.nan)
                
    xname = [x[1:] for x in w]
    yname = [y[0] for y in w]
    
    plate_source = ColumnDataSource(dict(w=w, n=n, t=t, e=e, xname=xname, yname=yname)) 
    plate_columns = [
        TableColumn(field='w', title='Well ID'),
        TableColumn(field='n', title='Sample name'),
        TableColumn(field='t', title='Tm ('+degree_sign+'C)'),
        TableColumn(field='e', title='Error ('+degree_sign+'C)'),
    ]
    
    plate_map_hover = HoverTool(tooltips="""
        <div>
                <div>
                        <span style="font-size: 14px; font-weight: bold; ">@n:</span>
                        <span style="font-size: 14px; font-weight: bold; ">@t</span>
                </div>
        </div>
        """
    )
    
    if plate_type_buttons.active == 1:
        plate_map = figure(title="Plate view", x_axis_location="above", height=400, width=620, 
                       tools=["save, tap, reset", plate_map_hover], 
                       x_range=[str(x+1) for x in range(0, columns)]+['', 'Tm ('+degree_sign+'C)'],
                       y_range=letters[:rows][::-1])
    else:
        plate_map = figure(title="Plate view", x_axis_location="above", height=400, width=620, 
                       tools=["save, tap, reset", plate_map_hover], 
                       x_range=[str(x+1) for x in range(0, columns)]+['Tm ('+degree_sign+'C)'], 
                       y_range=letters[:rows][::-1])
        
    taptool = plate_map.select(type=TapTool)
    plate_map.on_event(Tap, plate_select)
    
    global mapper
    mapper = linear_cmap(field_name='t', palette=RdBu[8], low=min(t), high=max(t))
    
    global color_bar
    color_bar = ColorBar(color_mapper=mapper['transform'], width=10, height=250, name='Tm ('+degree_sign+'C)')
    plate_map.add_layout(color_bar, 'right')
    
    plate_map.grid.grid_line_color = None
    plate_map.axis.axis_line_color = None
    plate_map.axis.major_tick_line_color = None
    plate_map.axis.major_label_text_font_size = "10pt"
    plate_map.axis.major_label_standoff = 0
    plate_map.rect('xname', 'yname', .95, .95, source=plate_source,
            color=mapper, line_color='black', line_width=1)
    
    # Table listing all Tm values in dataset
    global plate_table
    plate_table = DataTable(source=plate_source, columns=plate_columns, width=500,
                            height=500, selectable=True, editable=True)
    plate_table.source.selected.on_change('indices', table_select)
    
    # Table showing fitting parameters for current sample
    global sample_table_source
    sample_table_source = ColumnDataSource(data=dict(l=['Fit value', 'Std. error'],
                                                     b=[sample.bottom_fit, sample.bottom_err],
                                                     t=[sample.top_fit, sample.top_err],
                                                     v=[sample.v50_fit, sample.v50_err],
                                                     s=[sample.slope_fit, sample.slope_err])) 
    sample_table_columns = [
        TableColumn(field='l', title=''),
        TableColumn(field='b', title='Fmin'),
        TableColumn(field='t', title='Fmax'),
        TableColumn(field='v', title='Tm ('+degree_sign+'C)'),
        TableColumn(field='s', title='a')
    ]
    global sample_table
    sample_table = DataTable(source=sample_table_source, columns=sample_table_columns, width=600,
                            height=200, selectable=False, editable=False)
   
    # Button to re-fit all with current parameter settings
    global refit_all_button
    refit_all_button = Button(label="Re-fit all samples", 
                              button_type='danger', width=200, height=30)
    refit_all_button.on_click(refit_all_callback)
    
    # Button to download Tm table to csv file
    global download_button
    download_button = Button(label="Download table to CSV", 
                             button_type="primary", width=200, height=30)
    download_button.js_on_click(CustomJS(args=dict(source=plate_source, file_name=output_filename), 
                                        code=open(join(dirname(__file__), "download.js")).read()))

    # Button to copy Tm table to clipboard
    global copy_button
    copy_button = Button(label="Copy table to clipboard", button_type="primary", 
                         width=200, height=30)
    copy_button.js_on_click(CustomJS(args=dict(source=plate_source),
                               code=open(join(dirname(__file__), "copy.js")).read()))

    # page formatting
    desc = Div(text=open(join(dirname(__file__), "description.html")).read(), width=1200)
    main_row = row(column(plate_type_buttons, plate_layout_buttons, upload_button, 
                          fix_bottom_checkbox, bottom_set_text, fix_top_checkbox, 
                          top_set_text, fix_slope_checkbox, slope_set_text, refit_all_button,
                          download_button, copy_button),
                   column(sample_scatter, residual_scatter, range_slider, sample_table),
                   column(plate_map, plate_table))
        
    sizing_mode = 'scale_width'
    l = layout([
        [desc],
        [main_row]
    ], sizing_mode=sizing_mode)
    
    update()
    curdoc().clear()
    curdoc().add_root(l)
    curdoc().title = "DSF"
コード例 #47
0
ファイル: taylor_server.py プロジェクト: xnx/bokeh
    order = int(new)
    update_data()

def on_text_value_change(attr, old, new):
    try:
        global expr
        expr = sy.sympify(new, dict(x=xs))
    except (sy.SympifyError, TypeError, ValueError) as exception:
        dialog.content = str(exception)
        dialog.visible = True
    else:
        update_data()

dialog = Dialog(title="Invalid expression")

slider = Slider(start=1, end=20, value=order, step=1, title="Order",callback_policy='mouseup')
slider.on_change('value', on_slider_value_change)

text = TextInput(value=str(expr), title="Expression:")
text.on_change('value', on_text_value_change)

inputs = WidgetBox(children=[slider, text],width=400)
layout = Column(children=[inputs, plot, dialog])
update_data()
document.add_root(layout)
session.show(layout)

if __name__ == "__main__":
    print("\npress ctrl-C to exit")
    session.loop_until_closed()
コード例 #48
0
ファイル: main.py プロジェクト: alxsoares/zjsxzy_in_js
    TableColumn(field="name", title=u"组合"),
    TableColumn(field="net value", title=u"单位净值"),
    TableColumn(field="year return", title=u"今年以来业绩"),
    TableColumn(field="total return", title=u"成立以来业绩"),
    TableColumn(field="max drawdown", title=u"最大回撤"),
    TableColumn(field="sharpe", title=u"夏普率"),
    TableColumn(field="volatility", title=u"波动率")
]

data_table = DataTable(source=source, columns=columns, width=900, height=500)

today = datetime.datetime.now()
yesterday = today - datetime.timedelta(1)
yesterday = yesterday.strftime("%Y-%m-%d")
time_text = TextInput(value=yesterday, title="提取时间", width=300)
time_text.on_change('value', lambda attr, old, new: update_excel())

portfolio_select = Select(value=portfolio_selection[0],
                          title="组合",
                          width=300,
                          options=portfolio_selection)
portfolio_select.on_change('value', lambda attr, old, new: update_plot())

update_excel()
update_plot()

update_time_text = TextInput(value=today.strftime("%Y-%m-%d"),
                             title=u"更新日期",
                             width=300)
update_button = Button(label=u"更新数据", width=300, button_type="success")
update_row = row(update_time_text, update_button)
コード例 #49
0
contour_g = my_bokeh_utils.Contour(plot, line_color='red', line_width=2, legend='g(x,y) = 0')
# Plot corresponding tangent vector
quiver_isolevel = my_bokeh_utils.Quiver(plot, fix_at_middle=False, line_width=2, color='black')
# Plot corresponding tangent vector
quiver_constraint = my_bokeh_utils.Quiver(plot, fix_at_middle=False, line_width=2, color='red')
# Plot mark at position on constraint function
plot.cross(x='x', y='y', color='red', size=10, line_width=2, source=source_mark)

# object that detects, if a position in the plot is clicked on
interactor = my_bokeh_utils.Interactor(plot)
# adds callback function to interactor, if position in plot is clicked, call on_selection_change
interactor.on_click(on_selection_change)

# text input window for objective function f(x,y) to be optimized
f_input = TextInput(value=lagrange_settings.f_init, title="f(x,y):")
f_input.on_change('value', f_changed)

# dropdown menu for selecting one of the sample functions
sample_fun_input_f = Dropdown(label="choose a sample function f(x,y) or enter one below",
                              menu=lagrange_settings.sample_f_names)
sample_fun_input_f.on_click(sample_fun_input_f_changed)

# text input window for side condition g(x,y)=0
g_input = TextInput(value=lagrange_settings.g_init, title="g(x,y):")
g_input.on_change('value', g_changed)

# dropdown menu for selecting one of the sample functions
sample_fun_input_g = Dropdown(label="choose a sample function g(x,y) or enter one below",
                              menu=lagrange_settings.sample_g_names)
sample_fun_input_g.on_click(sample_fun_input_g_changed)
コード例 #50
0
ファイル: bug_script.py プロジェクト: liekunyang/SIMS
    #for mytool in Tools:
    #	mytool.plot = figure_obj

    #figure_obj.tools = Tools

    figure_obj.line("x",
                    "y",
                    source=two[0],
                    line_width=2,
                    line_color=colour_list[3])
    figure_obj.line("x",
                    "y",
                    source=two[1],
                    line_width=2,
                    line_color=colour_list[1])

    text = TextInput(title="title", value='my sine wave')
    radio = RadioGroup(labels=["0", "1"], active=0)

    text.on_change('value',
                   lambda attr, old, new, radio=radio, sources=two:
                   update_title(new, radio, sources))

    tabs.append(
        Panel(child=hplot(figure_obj, vform(text, radio)), title="two by two"))

tabs = Tabs(tabs=tabs)
session = push_session(curdoc())
session.show()
session.loop_until_closed()
コード例 #51
0
    x_val, y_val = np.meshgrid(xx, yy)
    # evaluate function at sample points
    u_val = u_fun(x_val, y_val)
    v_val = v_fun(x_val, y_val)

    return x_val, y_val, u_val, v_val, hx


# initialize controls
# text input for input of the vector function [fx(x,y),fy(x,y)]
u_input = TextInput(value=curveintegral_settings.sample_functions[curveintegral_settings.init_fun_key][0],
                    title="fx(x,y):")
v_input = TextInput(value=curveintegral_settings.sample_functions[curveintegral_settings.init_fun_key][1],
                    title="fy(x,y):")

u_input.on_change('value', function_change)
v_input.on_change('value', function_change)

# text input for input of the parametrized curve [cx(t),cy(t)]
cx_input = TextInput(value=curveintegral_settings.sample_curves[curveintegral_settings.init_curve_key][0],
                     title="cx(t):")
cy_input = TextInput(value=curveintegral_settings.sample_curves[curveintegral_settings.init_curve_key][1],
                     title="cy(t):")
# slider controlling the parameter t
parameter_input = Slider(title="t",
                         value=curveintegral_settings.parameter_input_init,
                         start=curveintegral_settings.parameter_min,
                         end=curveintegral_settings.parameter_max,
                         step=curveintegral_settings.parameter_step)

cx_input.on_change('value', curve_change)
コード例 #52
0
def country_tab(list_sp_objs):
    def make_data_set(speeches, country, type_display):
        overall_counter = Counter()
        word_counter = dict()
        dict_of_selected_counters_inp = search_mentions(country)
        dict_of_selected_counters_out = search_is_mentioned_by(country)
        tot_mentions = Counter()
        tot_mentioned_by = Counter()

        for k, val in dict_of_selected_counters_inp.items():
            tot_mentions += Counter(dict(dict_of_selected_counters_inp[k]))

        for k, val in dict_of_selected_counters_inp.items():
            tot_mentioned_by += Counter(dict(dict_of_selected_counters_out[k]))

        sp_country = []
        for s in speeches:
            if s.country == country:
                sp_country.append(s)

        # sp_country = speeches[idx]
        # counts = defaultdict(int)
        for sp in list(sp_country):
            overall_counter += sp.word_frequency

        most_common_words = list(dict(overall_counter.most_common(10)).keys())
        most_common_counter = Counter()
        for mcw in most_common_words:
            most_common_counter[mcw] = overall_counter[mcw]
        word = list(dict(most_common_counter).keys())
        counts = list(dict(most_common_counter).values())

        for w in most_common_words:
            word_counter[w] = Counter()
            for sp in sp_country:
                if sp.word_frequency[w]:
                    add = sp.word_frequency[w]
                else:
                    add = 0
                word_counter[w][sp.year] += add

        years = range(1970, 2016, 1)
        selected_data = dict()
        for w, cnter in word_counter.items():
            selected_data[w] = []
            for yr in years:
                if yr in cnter:
                    count = cnter[yr]
                else:
                    count = float('nan')
                selected_data[w].append(count)
        # print(selected_data)

        multi_counts = [val for val in selected_data.values()]
        multi_years = [list(years)] * len(multi_counts)
        colors = word_colors[:len(multi_counts)]
        labels = most_common_words
        data = {
            'counts': multi_counts,
            'years': multi_years,
            'colors': colors,
            'labels': labels
        }

        if type_display == 'mentions':
            prepared_map_data = make_map_data(tot_mentions)
        else:
            prepared_map_data = make_map_data(tot_mentioned_by)

        return ColumnDataSource(data), ColumnDataSource(prepared_map_data)

    def update(attr, old, new):
        country_code = list(country_dic.keys())[list(
            country_dic.values()).index(country_input.value)]
        # print('updating ', country_input.value, country_code, dropdown.value)
        (word_frequency_to_plot,
         map_data) = make_data_set(list_sp_objs, country_code, dropdown.value)
        # print(country_input.value, word_frequency_to_plot)
        src.data.update(word_frequency_to_plot.data)
        map_src.data.update(map_data.data)

    def search_mentions(input_country, output_countries=list_country_codes):
        specific_mentions_dict = yearwise_data(input_country, 'mentions')

        return specific_mentions_dict

    def search_is_mentioned_by(input_country,
                               output_countries=list_country_codes):
        specific_mentioned_by_dict = yearwise_data(input_country,
                                                   'is_mentioned_by')

        return specific_mentioned_by_dict

    def yearwise_data(inp_country, m):
        '''Function to get yearly data on either mentions or is mentioned by
        data for a specific country.
        @param inp_country: country input.
        @param out_country: country of interest
        @param m: 'mentions' or 'is_mentioned_by'
        Example: After receiving an input country 'IND', to find the number of
        mentions of 'USA' in speeches by 'IND'.
        -Here, dict_of_interest is the mentions dict of IND
        >>>yearwise_data('IND','USA','mentions')
        <returns two arrays: years and mentions
        >>>[1971,1975,1995,2000],[1,2,2,1]

        The same thing can be applied to an is_mentioned_by dict
        '''
        # [x for x in dict_searched_country[1989] if x[0]=='COL']
        # print(inp_country,out_country,m)
        input_file = Path.cwd().joinpath(f'data/{m}.pickle')
        # Path.cwd().joinpath('data/members_dic.pkl')

        with input_file.open('rb') as pkl_file:
            dict = pickle.load(pkl_file)
            try:
                dict_of_int = dict[inp_country]
            except KeyError:
                print("Check the country again!")
                return None, None

        return dict_of_int

    def make_plot(src):
        p = figure(plot_height=600, title='Most used words')
        # print('SRC', src['years'], src['counts'])
        # print(src.daa['labels'])
        p.multi_line('years',
                     'counts',
                     color='colors',
                     legend='labels',
                     source=src)
        p.xaxis.axis_label = 'Year'
        p.yaxis.axis_label = 'Word count'
        # print(selected_countries)
        # for country in selected_countries:
        #     p.line('years', country, source=src)

        return p

    def make_map_data(country_counter1):
        # 1 A mentions B , 2 A is mentioned by B
        unzipped = list(zip(*country_counter1))
        countries = list(dict(country_counter1).keys())
        country_counts = list(dict(country_counter1).values())

        data = dict()
        data['country'] = countries
        data['counts'] = country_counts

        k = list(country_shapes.keys())
        country_xs = [country_shapes[i]['lats'] for i in k]
        country_ys = [country_shapes[i]['lons'] for i in k]
        country_names = [country_shapes[i]['name'] for i in k]
        # country_rates = list(range(len(country_names)))

        country_rates = [float('NaN')] * len(country_names)
        country_inds = {country_shapes[j]['ID']: i for i, j in enumerate(k)}

        for i in range(len(data['country'])):
            try:
                country_rates[country_inds[data['country']
                                           [i]]] = data['counts'][i]
            except:
                pass

        src_map = dict(
            x=country_xs,
            y=country_ys,
            name=country_names,
            rate=country_rates,
        )

        return src_map

    def make_map(src_map, country, type_display):
        color_mapper = LogColorMapper(palette=palette)
        TOOLS = "pan,wheel_zoom,reset,hover,save"

        p = figure(
            plot_width=1150,
            plot_height=800,
            title='World Map',
            tools=TOOLS,
            x_axis_location=None,
            y_axis_location=None,
            tooltips=[
                ("Name", "@name"),
                ("Mentions", "@rate")  # ,
                # ("(Long, Lat)", "($x, $y)")
            ])
        p.grid.grid_line_color = None
        p.hover.point_policy = "follow_mouse"
        p.x_range = Range1d(start=-180, end=180)
        p.y_range = Range1d(start=-90, end=90)

        p.grid.grid_line_color = None

        p.patches('x',
                  'y',
                  source=src_map,
                  fill_color={
                      'field': 'rate',
                      'transform': color_mapper
                  },
                  fill_alpha=0.7,
                  line_color="white",
                  line_width=0.5)

        return (p)

    country_input = TextInput(value="India", title="Label:")
    country_code = list(country_dic.keys())[list(country_dic.values()).index(
        country_input.value)]
    country_input.on_change('value', update)

    # For the dropdown
    menu = [("Mentions", "mentions"), ("Is mentioned by", "is_mentioned_by")]
    dropdown = Dropdown(label="Type of display",
                        button_type="primary",
                        value='mentions',
                        menu=menu)
    dropdown.on_change('value', update)

    word_colors = Category20_16
    word_colors.sort()

    src, map_src = make_data_set(list_sp_objs, country_code, dropdown.value)

    p = make_plot(src)
    map = make_map(map_src, country_input, dropdown.value)
    # Put controls in a single element
    controls = widgetbox(country_input)

    # Create a row layout
    layout = row(
        column(controls, dropdown, p),
        map,
    )

    # Make a tab with the layout
    tab = Panel(child=layout, title='Country referencing')

    return tab
コード例 #53
0
ファイル: sliders.py プロジェクト: Chetver/bokeh
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)


# Set up widgets
text = TextInput(title="title", value='my sine wave')
offset = Slider(title="offset", value=0.0, start=-5.0, end=5.0, step=0.1)
amplitude = Slider(title="amplitude", value=1.0, start=-5.0, end=5.0)
phase = Slider(title="phase", value=0.0, start=0.0, end=2*np.pi)
freq = Slider(title="frequency", value=1.0, start=0.1, end=5.1)


# Set up callbacks
def update_title(attrname, old, new):
    plot.title.text = text.value

text.on_change('value', update_title)

def update_data(attrname, old, new):

    # Get the current slider values
    a = amplitude.value
    b = offset.value
    w = phase.value
    k = freq.value

    # Generate the new curve
    x = np.linspace(0, 4*np.pi, N)
    y = a*np.sin(k*x + w) + b

    source.data = dict(x=x, y=y)
コード例 #54
0
def correlations_tab(ts):

    # Making dataset using the autocorrelation function,
    # input is a timeseries and default maximum lag of 10
    def make_dataset_autocorr(ts, col, lag):

        df_to_plot_autocorr = auto_corr(ts[col], lag)

        return ColumnDataSource(df_to_plot_autocorr)

    # Making autocorrelation plot
    def make_autocorrplot(source):
        ttp = [("Lag", "$x"), ("Autocorrelation", "$y")]

        plot_autocorr = figure(plot_height=400, plot_width=600, tooltips=ttp,
                               title="Autocorrelation",
                               x_axis_label="Lag",
                               y_axis_label="Autocorrelation",
                               tools="hover, pan, zoom_in, zoom_out, \
                                   reset, save")
        plot_autocorr.circle('lags', 'auto_corrs', source=source)
        plot_autocorr.line('lags', 'auto_corrs', source=source, line_width=0.5)
        plot_autocorr.title.text_font_size = '14pt'
        plot_autocorr.xaxis.axis_label_text_font_size = '12pt'
        plot_autocorr.xaxis.axis_label_text_font_style = 'bold'
        plot_autocorr.yaxis.axis_label_text_font_size = '12pt'
        plot_autocorr.yaxis.axis_label_text_font_style = 'bold'

        return plot_autocorr

    # Set up callbacks to interactively update data when different
    # time series and lag values are selected, and letting the user
    # set the title
    def update_title(attrname, old, new):
        plot_autocorr.title.text = text_autocorr.value

    def update_data_autocorr(attrname, old, new):
        new_source_autocorr = make_dataset_autocorr(ts,
                                                    ts_select_autocorr.value,
                                                    lag=lag_select_autocorr.
                                                    value)
        source_autocorr.data.update(new_source_autocorr.data)

    # Set up widgets.
    # Input for plot titles
    text_autocorr = TextInput(title="Title", value='Autocorrelation')
    text_autocorr.on_change('value', update_title)
    # Select time series for Autocorrelation
    ts_available = ts.columns.tolist()
    print(ts_available)
    ts_available.remove('time')
    ts_select_autocorr = Select(value=ts_available[0], title='Time Series',
                                options=ts_available)
    ts_select_autocorr.on_change('value', update_data_autocorr)
    # Select lag value up to a max of
    # the length of the time series
    max_lag = ts.shape[0]
    lag_select_autocorr = Slider(start=10, end=max_lag, step=1, value=10,
                                 title='Lag')
    lag_select_autocorr.on_change('value', update_data_autocorr)

    # Initial state and plotting.
    source_autocorr = make_dataset_autocorr(ts, ts_available[0], 10)

    plot_autocorr = make_autocorrplot(source_autocorr)

    # Set up layouts and add to document.
    # Put controls in a single element.
    controls_autocorr = WidgetBox(text_autocorr, ts_select_autocorr,
                                  lag_select_autocorr)

    # making dataset using the correlation function
    # inputs are two different timeseries and default maximum lag of 10
    def make_dataset_corr(ts, col1, col2, lag):

        df_to_plot_corr = corr(ts[col1], ts[col2], lag)

        return ColumnDataSource(df_to_plot_corr)

    # Make line plot for correlation
    def make_corrplot(source):
        ttp = [("Lag", "$x"), ("Autocorrelation", "$y")]

        plot_corr = figure(plot_height=400, plot_width=600, tooltips=ttp,
                           title="Correlation",
                           x_axis_label="Lag", y_axis_label="Correlation",
                           tools="hover, pan, zoom_in, zoom_out, reset, save")
        plot_corr.circle('lags', 'corrs', source=source)
        plot_corr.line('lags', 'corrs', source=source, line_width=0.5)
        plot_corr.title.text_font_size = '14pt'
        plot_corr.xaxis.axis_label_text_font_size = '12pt'
        plot_corr.xaxis.axis_label_text_font_style = 'bold'
        plot_corr.yaxis.axis_label_text_font_size = '12pt'
        plot_corr.yaxis.axis_label_text_font_style = 'bold'

        return plot_corr

    # Set up callbacks to interactively update data when different
    # time series and lag values are selected, and letting the user
    # set the title
    def update_title(attrname, old, new):
        plot_corr.title.text = text_corr.value

    def update_data_corr(attrname, old, new):
        new_source_corr = make_dataset_corr(ts, ts_select_corr.value,
                                            ts_select2_corr.value,
                                            lag=lag_select_corr.value)
        source_corr.data.update(new_source_corr.data)

    # Set up widgets.
    # Input for plot titles
    text_corr = TextInput(title="Title", value='Correlation')
    text_corr.on_change('value', update_title)
    # Select time series for Autocorrelation
    ts_available = ts.columns.tolist()
    ts_available.remove('time')
    ts_select_corr = Select(value=ts_available[0], title='Time Series 1',
                            options=ts_available)
    ts_select_corr.on_change('value', update_data_corr)
    ts_select2_corr = Select(value=ts_available[0],
                             title='Time Series 2 (shifted)',
                             options=ts_available)
    ts_select2_corr.on_change('value', update_data_corr)
    # Select lag value up to a max of
    # the length of the time series
    max_lag = ts.shape[0]
    lag_select_corr = Slider(start=10, end=max_lag, step=1, value=10,
                             title='Lag')
    lag_select_corr.on_change('value', update_data_corr)

    # Initial state and plotting.
    source_corr = make_dataset_corr(ts, ts_available[0], ts_available[0], 10)

    plot_corr = make_corrplot(source_corr)

    # Set up layouts and add to document.
    # Put controls in a single element.
    controls_corr = WidgetBox(text_corr, ts_select_corr,
                              ts_select2_corr, lag_select_corr)

    # Create a row layout
    grid = gridplot([[controls_autocorr, plot_autocorr],
                     [controls_corr, plot_corr]],
                    plot_width=500, plot_height=500)

    # Make a tab with the layout.
    tab = Panel(child=grid, title='Autocorrelation and Correlation')
    return tab
コード例 #55
0
from bokeh.models.widgets import Button, TextInput

label_saccade_button = Button(label='saccade')
label_saccade_button.on_click(label_saccade_cb)

label_pursuit_button = Button(label='pursuit')
label_pursuit_button.on_click(label_pursuit_cb)

label_fixation_button = Button(label='fixation')
label_fixation_button.on_click(label_fixation_cb)

remove_button = Button(label='remove')
remove_button.on_click(remove_cb)

trial_text = TextInput(value=str(trialNum))
trial_text.on_change('value', trial_text_cb)

nextTrial_button = Button(label='+trial')
nextTrial_button .on_click(next_trial_cb)

prevTrial_button= Button(label='-trial')
prevTrial_button.on_click(prev_trial_cb)

from bokeh.layouts import row
buttonBox = row(label_saccade_button, label_pursuit_button, label_fixation_button, remove_button)
trialSelectBox = row(prevTrial_button, trial_text, nextTrial_button)
#, sizing_mode='scale_width'

###########################################################################
# Get the HTML description header
from os.path import dirname, join
コード例 #56
0
ファイル: risk_app.py プロジェクト: steve98654/bokeh-demos
class StockApp(VBox):
    extra_generated_classes = [["StockApp", "StockApp", "VBox"]]
    jsmodel = "VBox"

    # text statistics
    pretext = Instance(DataTable)

    # plots
    line_plot1 = Instance(Plot)
    hist1 = Instance(Plot)

    # data source
    source = Instance(ColumnDataSource)
    risk_source = Instance(ColumnDataSource)

    # layout boxes
    mainrow = Instance(HBox)
    histrow = Instance(HBox)
    statsbox = Instance(VBox)

    # inputs
    ticker1 = String(default="INTC")
    ticker2 = String(default="Daily Prices")
    ticker3 = String(default='63')
    ticker4 = String(default='2010-01-01')
    ticker5 = String(default='2015-08-01')
    ticker1_select = Instance(Select)
    ticker2_select = Instance(Select)
    ticker3_select = Instance(TextInput)
    ticker4_select = Instance(TextInput)
    ticker5_select = Instance(TextInput)
    input_box = Instance(VBoxForm)

    def __init__(self, *args, **kwargs):
        super(StockApp, self).__init__(*args, **kwargs)
        self._dfs = {}

    @classmethod
    def create(cls):
        """
        This function is called once, and is responsible for
        creating all objects (plots, datasources, etc)
        """
        # create layout widgets
        obj = cls()
        obj.mainrow = HBox()
        obj.histrow = HBox()
        obj.statsbox = VBox()
        obj.input_box = VBoxForm()

        # create input widgets
        obj.make_inputs()

        # outputs
        #obj.pretext = PreText(text="", width=300)
        obj.pretext = DataTable(width=300)
        obj.make_source()
        obj.make_plots()
        obj.make_stats()

        # layout
        obj.set_children()
        return obj

    def make_inputs(self):

        self.ticker1_select = Select(
            name='ticker1',
            title='Portfolio:',
            value='MSFT',
            options = ['INTC', 'Tech Basket', 'IBB', 'IGOV']
        )
        self.ticker2_select = Select(
            name='ticker2',
            title='Risk/Performance Metric:',
            value='Price',
            options=['Daily Prices', 'Daily Returns', 'Daily Cum Returns', 'Max DD Percentage', 'Percentage Up Days', 'Rolling 95% VaR', 'Rolling Ann. Volatility', 'Rolling Worst Dly. Loss', 'Ann. Sharpe Ratio']
        )
        self.ticker3_select = TextInput(
            name='ticker3',
            title='Window Size:',
            value='63'
        )
        self.ticker4_select = TextInput(
            name='ticker4',
            title='Start Date:',
            value='2010-01-01'
        )
        self.ticker5_select = TextInput(
            name='ticker5',
            title='End Date:',
            value='2015-08-01'
        )

    @property
    def selected_df(self):
        pandas_df = self.df
        selected = self.source.selected['1d']['indices']
        if selected:
            pandas_df = pandas_df.iloc[selected, :]
        return pandas_df

    def make_source(self):
        if self.ticker2 == 'Daily Prices': 
            self.source = ColumnDataSource(data=self.df)
        elif self.ticker2 == 'Daily Returns': 
            self.source = ColumnDataSource(data=daily_returns_df(self.df))
        elif self.ticker2 == 'Daily Cum Returns': 
            self.source = ColumnDataSource(data=daily_cum_returns_df(daily_returns_df(self.df),int(self.ticker3)))
        elif self.ticker2 == 'Rolling Ann. Volatility': 
            self.source = ColumnDataSource(data=annualized_volatility_df(self.df,int(self.ticker3)))
        elif self.ticker2 == 'Rolling Worst Dly. Loss': 
            self.source = ColumnDataSource(data=worst_daily_loss_df(self.df,int(self.ticker3)))
        elif self.ticker2 == 'Rolling 95% VaR': 
            self.source = ColumnDataSource(data=VaR_df(self.df,int(self.ticker3),0.95))
        elif self.ticker2 == 'Ann. Sharpe Ratio':
            self.source = ColumnDataSource(data=sharpe_ratio_df(self.df,int(self.ticker3)))
        elif self.ticker2 == 'Max DD Percentage':
            self.source = ColumnDataSource(data=draw_down_df(self.df,int(self.ticker3)))
        elif self.ticker2 == 'Percentage Up Days':
            self.source = ColumnDataSource(data=per_up_days_df(self.df,int(self.ticker3)))
        else:
            self.source = ColumnDataSource(data=self.df)

    def line_plot(self, ticker, x_range=None):

        if self.ticker2 in ['Daily Prices', 'Daily Returns']:
            tltstr = self.ticker1 + ' ' + self.ticker2 
        else:
            tltstr=self.ticker1 + ' '  +self.ticker2 + ' with ' + self.ticker3 + ' Day Trailing Window'

        p = figure(
            title=tltstr,
            x_range=x_range,
            x_axis_type='datetime',
            plot_width=1200, plot_height=400,
            title_text_font_size="16pt",
            tools="pan,box_zoom,wheel_zoom,reset",
            x_axis_label = 'Date',
            y_axis_label = self.ticker2
        )

        p.line(
            'date', ticker,
            line_width=2,
            line_join='bevel',
            source=self.source,
            nonselection_alpha=0.02
        )
        return p

    def hist_plot(self, ticker):
        pltdf = self.source.to_df().set_index('date').dropna()
        qlow, qhigh = mquantiles(pltdf[ticker],prob=[0.01,0.99]) 
        tdf = pltdf[ticker]
        histdf = tdf[((tdf > qlow) & (tdf < qhigh))]
        hist, bins = np.histogram(histdf, bins=50)
        width = 0.7 * (bins[1] - bins[0])
        center = (bins[:-1] + bins[1:]) / 2
        start = bins.min()
        end = bins.max()
        top = hist.max()

        p = figure(
            title=self.ticker1 + ' ' + self.ticker2 + ' Histogram',
            plot_width=600, plot_height=400,
            tools="",
            title_text_font_size="16pt",
            x_range=[start, end],
            y_range=[0, top],
            x_axis_label = self.ticker2 + ' Bins',
            y_axis_label = 'Bin Count' 
        )
        p.rect(center, hist / 2.0, width, hist)
        return p

    def make_plots(self):
        ticker1 = self.ticker1
        ticker2 = self.ticker2

        self.line_plot1 = self.line_plot(ticker1)
        self.hist_plots()

    def hist_plots(self):
        ticker1 = self.ticker1
        ticker2 = self.ticker2
        self.hist1 = self.hist_plot(ticker1)
        #self.hist2 = self.hist_plot(ticker2)

    def set_children(self):
        self.children = [self.mainrow, self.line_plot1]
        self.mainrow.children = [self.input_box, self.hist1, self.pretext]
        self.input_box.children = [self.ticker1_select, self.ticker2_select, self.ticker3_select, self.ticker4_select, self.ticker5_select]
        #self.statsbox.children = [self.pretext]

    def input_change(self, obj, attrname, old, new):
        if obj == self.ticker5_select:
            self.ticker5 = new
        if obj == self.ticker4_select:
            self.ticker4 = new
        if obj == self.ticker3_select:
            self.ticker3 = new
        if obj == self.ticker2_select:
            self.ticker2 = new
        if obj == self.ticker1_select:
            self.ticker1 = new

        self.make_source()
        self.make_stats()
        self.make_plots()
        self.set_children()
        curdoc().add(self)

    def setup_events(self):
        super(StockApp, self).setup_events()
        if self.source:
            self.source.on_change('selected', self, 'selection_change')
        if self.ticker1_select:
            self.ticker1_select.on_change('value', self, 'input_change')
        if self.ticker2_select:
            self.ticker2_select.on_change('value', self, 'input_change')
        if self.ticker3_select:
            self.ticker3_select.on_change('value', self, 'input_change')
        if self.ticker4_select:
            self.ticker4_select.on_change('value', self, 'input_change')
        if self.ticker5_select:
            self.ticker5_select.on_change('value', self, 'input_change')

    def make_stats(self):
        ## Build up a list of Summary stats for the time series 
        statsdf = self.source.to_df().dropna()
        stats = statsdf.describe()
        stats.index = ['Data Point Count', 'Mean', 'Standard Deviation', 'Minimum', '25%-ile',
        '50%-ile', '75%-ile', 'Maximum']
        stats.loc['Skewness',:] = my_round(my_skewness(statsdf[self.ticker1].values),4)
        stats.loc['Excess Kurtosis',:] = my_round(my_ex_kurtosis(statsdf[self.ticker1].values),4)
        stats = stats.fillna(-10)
        stats.index.name = "stats"
        columns = [TableColumn(field='stats', title='Time Series Statistic'),
                TableColumn(field=self.ticker1, title="Value",
                    formatter=NumberFormatter(format='0.0000'))]    
        self.pretext.columns = columns
        self.pretext.source = ColumnDataSource(data=stats)

    def selection_change(self, obj, attrname, old, new):
        self.make_stats()
        self.hist_plots()
        self.set_children()
        curdoc().add(self)

    @property
    def df(self):
        return get_data(self.ticker1).truncate(before=self.ticker4, after=self.ticker5)