def gene_structure_plot(self, exton_records, utr_records, DNM_xy, trans_count): if trans_count != 0: if trans_count <= 3: trans_len = 200 elif trans_count <= 4: trans_len = 300 elif trans_count <= 10: trans_len = 500 elif trans_count <= 13: trans_len = 600 else: trans_len = trans_count * 30 else: trans_len = 200 trace_coding = [] exton_num = 0 for index, item in enumerate(exton_records): if index != len(exton_records) - 1: trace_coding_item = Scatter( visible=True, hoverinfo='text', showlegend=False, mode='lines', x=[item[0][0], item[1][0]], y=[item[0][1], item[1][1]], text='[' + str(item[0][2]) + '-' + str(item[1][2]) + ']', line=dict( # color='rgb(72,130,180)', color='rgb(65,105,225)', # color='rgb(193,210,240)', shape='linear', width=10, simplify=True), ) trace_coding.append(trace_coding_item) exton_num += 1 else: trace_coding_item = Scatter( visible=True, hoverinfo='text', name='Exon', showlegend=False, mode='lines', x=[item[0][0], item[1][0]], y=[item[0][1], item[1][1]], text='[' + str(item[0][2]) + '-' + str(item[1][2]) + ']', line=dict( # color='rgb(72,130,180)', color='rgb(65,105,225)', # color='rgb(193,210,240)', shape='linear', width=10, simplify=True), ) trace_coding.append(trace_coding_item) exton_num += 1 for item in utr_records: trace_utr_item = Scatter( visible=True, hoverinfo='text', showlegend=False, mode='lines', x=[item[0][0], item[1][0]], y=[item[0][1], item[1][1]], text='[' + str(item[0][2]) + '-' + str(item[1][2]) + ']', line=dict(color='gray', shape='linear', width=5, simplify=True), ) trace_coding.append(trace_utr_item) exton_num += 1 for item in DNM_xy: type = item[0][2].split(',')[2] c = self.get_DNM_color(type) dnm_item = Scatter( visible=True, hoverinfo='text', showlegend=False, mode='lines', x=[item[0][0], item[1][0]], y=[item[0][1], item[1][1]], legendgroup=c, name=c, text=str(item[0][2]), hoverlabel={ # 'bgcolor': None, 'font': { 'size': 10 } }, line=dict(color=c, shape='linear', width=12, simplify=True), ) trace_coding.append(dnm_item) layouts = go.Layout( # paper_bgcolor='rgb(249, 249, 249)', # plot_bgcolor='rgb(249, 249, 249)', height=trans_len, width=800, # title='Transcript DNM Visualizations', # titlefont=dict(size=25), hovermode='closest', # hoverdistance=5, margin=go.Margin( # x,y轴label距离图纸四周的距离 l=130, r=0, # b=50, t=10, pad=0), xaxis=dict( showgrid=False, zeroline=False, showline=False, showticklabels=False, # tickformat='r', # dtick=10, autorange=True), yaxis=dict(titlefont=dict(family='Arial, sans-serif', size=20, color='lightgrey'), showticklabels=True, tickangle=360, tickfont=dict(family='Arial, serif', size=12, color='black'), exponentformat='e', showexponent='All')) figs = go.Figure(data=trace_coding, layout=layouts) # plotly.offline.plot(figs, show_link=False) try: return plotly.offline.plot(figs, show_link=False, output_type="div" # , include_plotlyjs=False ) except: return '<div>There is no corresponding data published yet, we will update it when such data available. </div>'
def plot(pca_results, plot_counter): # Get results pca = pca_results['pca'] var_explained = pca_results['var_explained'] sample_metadata = pca_results['sample_metadata'] color_by = pca_results.get('color_by') color_type = pca_results.get('color_type') color_column = pca_results['sample_metadata'][color_by] if color_by else None colors = ['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928'] sample_titles = ['<b>{}</b><br>'.format(index)+'<br>'.join('<i>{key}</i>: {value}'.format(**locals()) for key, value in rowData.items()) for index, rowData in sample_metadata.iterrows()] if not color_by: marker = dict(size=15) trace = go.Scatter3d(x=pca.components_[0], y=pca.components_[1], z=pca.components_[2], mode='markers', hoverinfo='text', text=sample_titles, marker=marker) data = [trace] elif color_by and color_type == 'continuous': marker = dict(size=15, color=color_column, colorscale='Viridis', showscale=True) trace = go.Scatter3d(x=pca.components_[0], y=pca.components_[1], z=pca.components_[2], mode='markers', hoverinfo='text', text=sample_titles, marker=marker) data = [trace] elif color_by and color_type == 'categorical': # Get unique categories unique_categories = color_column.unique() # Define empty list data = [] # Loop through the unique categories for i, category in enumerate(unique_categories): # Get the color corresponding to the category # If signature if color_by == 'Sample Group': group_A, group_B = [x.split(' vs ') for x in pca_results['signature_metadata'].keys()][0] if category == group_A: category_color = 'blue' elif category == group_B: category_color = 'red' else: category_color = 'black' else: category_color = colors[i] # Get the indices of the samples corresponding to the category category_indices = [i for i, sample_category in enumerate(color_column) if sample_category == category] # Create new trace trace = go.Scatter3d(x=pca.components_[0][category_indices], y=pca.components_[1][category_indices], z=pca.components_[2][category_indices], mode='markers', hoverinfo='text', text=[sample_titles[x] for x in category_indices], name = category, marker=dict(size=15, color=category_color)) # Append trace to data list data.append(trace) colored = '' if str(color_by) == 'None' else 'Colored by {}'.format(color_by) layout = go.Layout(title='<b>PCA Analysis | Scatter Plot</b><br><i>{}</i>'.format(colored), hovermode='closest', margin=go.Margin(l=0,r=0,b=0,t=50), width=900, scene=dict(xaxis=dict(title=var_explained[0]), yaxis=dict(title=var_explained[1]),zaxis=dict(title=var_explained[2]))) fig = go.Figure(data=data, layout=layout) # Plot iplot(fig) # Add Figure Legend display(Markdown('** Figure '+plot_counter()+' | Principal Component Analysis results. ** The figure displays an interactive, three-dimensional scatter plot of the first three Principal Components (PCs) of the data. Each point represents an RNA-seq sample. Samples with similar gene expression profiles are closer in the three-dimensional space. If provided, sample groups are indicated using different colors, allowing for easier interpretation of the results.'.format(**locals())))
enc_format="jpeg", ) GRAPH_PLACEHOLDER = dcc.Graph( id="interactive-image", figure={ "data": [], "layout": { "autosize": True, "paper_bgcolor": "#272a31", "plot_bgcolor": "#272a31", "margin": go.Margin(l=40, b=40, t=26, r=10), "xaxis": { "range": (0, 1527), "scaleanchor": "y", "scaleratio": 1, "color": "white", "gridcolor": "#43454a", "tickwidth": 1, }, "yaxis": { "range": (0, 1200), "color": "white", "gridcolor": "#43454a", "tickwidth": 1, }, "images": [{
def build_map(word, compare_word=None, type='scattergeo', scope='country'): import pandas as pd import numpy as np transform = lambda x: np.log(1 + x / maxval) if scope == 'country': data = get_word_by_country(word).copy() if compare_word: data2 = get_word_by_country(compare_word).copy() field = 'publication_country' scope = 'world' projection = 'Mercator' locationmode = 'ISO-3' elif scope == 'state': data = get_word_by_us_state(word).copy() if compare_word: data2 = get_word_by_us_state(compare_word).copy() field = 'publication_state' scope = 'usa' projection = 'albers usa' locationmode = 'USA-states' if compare_word and (compare_word.strip() != ''): sizemod = 45 data = pd.merge(data, data2, on=[field, 'code']) if type == 'scattergeo': data = data[(data['WordsPerMillion_x'] != 0) & (data['WordsPerMillion_y'] != 0)] maxval = data[['WordsPerMillion_x', 'WordsPerMillion_y']].max().max() logcounts = sizemod * (data['WordsPerMillion_x'].apply(transform) - data['WordsPerMillion_y'].apply(transform)) text = (data[field] + "<br> Words Per Million<br> '{}': ".format(word) + data['WordsPerMillion_x'].round(1).astype(str) + "<br> '{}': ".format(compare_word) + data['WordsPerMillion_y'].round(1).astype(str)) title = "\'%s\' vs. '%s' in the HathiTrust" % (word, compare_word) else: sizemod = 40 if type == 'scattergeo': data = data[(data['WordsPerMillion'] != 0)] counts = data['WordsPerMillion'].astype(int) maxval = counts.max() logcounts = sizemod * counts.apply(transform) text = data[field] + '<br> Words Per Million:' + data[ 'WordsPerMillion'].round(2).astype('str') title = "\'%s\' in the HathiTrust" % word if compare_word: counts2 = data2['WordsPerMillion'].astype(int) plotdata = [ dict(type=type, hoverinfo="location+text", locationmode=locationmode, locations=data['code'], text=text, marker=dict(line=dict(width=0.5, color='rgb(40,40,40)'), )) ] if type == 'choropleth': plotdata[0]['z'] = logcounts #plotdata[0]['colorscale'] = scl, plotdata[0]['autocolorscale'] = False plotdata[0]['showscale'] = False plotdata[0]['zauto'] = False plotdata[0]['zmax'] = logcounts.abs().max() plotdata[0]['zmin'] = -logcounts.abs().max() elif type == 'scattergeo': plotdata[0]['marker']['size'] = logcounts.abs() plotdata[0]['marker']['color'] = logcounts plotdata[0]['marker']['cauto'] = False plotdata[0]['marker']['cmax'] = logcounts.abs().max() plotdata[0]['marker']['cmin'] = -logcounts.abs().max() layout = dict(title=title, margin=go.Margin(l=10, r=10, b=10, t=50, pad=4), geo=dict(scope=scope, projection=dict(type=projection), showframe=False, showcoastlines=True, showland=True, landcolor="rgb(229, 229, 229)", countrycolor="rgb(255, 255, 255)", coastlinecolor="rgb(255, 255, 255)", showlakes=True, lakecolor='rgb(255, 255, 255)')) return (plotdata, layout)
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["event"] = self.event context["plots"] = {} def get_basic_layout(title="Title"): return { # 'title': title, "height": 380, "margin": pgo.Margin(l=30, r=10, b=80, t=20, pad=5), "legend": { "x": 0, "y": 1 }, "orientation": 0, } def get_registrations_data(event): c = 0 x = [] y_dict = {} for r in (Registration.objects.filter(event_id=event.id).extra({ "date": "DATE(created_at)" }).values("date").order_by("date").annotate(count=Count("id"))): c += r["count"] delta = (r["date"] - event.start_date).days x.append(delta) y_dict[delta] = (c, r["count"]) return [x, y_dict] def fill_the_gaps(range_days, data): """Returns an array for a range of dates, filling the gaps with existing data.""" output = [] c = 0 last_day = sorted(data.keys())[-1] for d in range_days: c = data[d][0] if d in data else c if d > last_day: break output.append(c) return output previous_conf = Event.objects.get(start_date__year=self.event.year - 1, type=self.event.type) registrations = { "previous": get_registrations_data(previous_conf), "current": get_registrations_data(self.event), } # # Show data with plot.ly # color_primary = "#1976D2" # Blue 700 color_secondary = "#B0BEC5" # Blue Grey 200 # ------------- # REGISTRATIONS # ------------- sorted_x = sorted( list( set(registrations["previous"][0] + registrations["current"][0]))) sorted_x = list(range(sorted_x[0], sorted_x[-1] + 1)) previous_trace = pgo.Scatter( x=sorted_x, y=fill_the_gaps(sorted_x, registrations["previous"][1]), name=previous_conf.year, line=dict(color=color_secondary, width=2, dash="dot"), ) current_trace = pgo.Scatter( x=sorted_x, y=fill_the_gaps(sorted_x, registrations["current"][1]), name=self.event.year, line=dict(color=color_primary, width=4), ) layout = get_basic_layout("Registrations") layout.update({ "annotations": [ dict( x=-(previous_conf.start_date.day - 1), y=150, xref="x", yref="y", text="Jan 1 %s" % previous_conf.year, showarrow=True, arrowhead=6, arrowcolor=color_secondary, ax=1, ay=-40, ), dict( x=-(self.event.start_date.day - 1), y=0, xref="x", yref="y", text="Jan 1 %s" % self.event.year, showarrow=True, arrowhead=6, arrowcolor=color_primary, ax=1, ay=-40, ), ] }) figure = pgo.Figure(data=[current_trace, previous_trace], layout=layout) context["plots"]["registrations"] = poff.plot(figure, auto_open=False, output_type="div") # ------ # GENDER # ------ def get_gender_data(event): y_dict = {1: 0, 2: 0, None: 0} for r in (Registration.objects.select_related("user").filter( event_id=event.id).values("user__profile__gender"). order_by("user__profile__gender").annotate( count=Count("id"))): y_dict[r["user__profile__gender"]] = r["count"] return [y_dict[1], y_dict[2], y_dict[None]] genders = ["Female", "Male", "(not set)"] previous_trace = pgo.Bar( x=genders, y=get_gender_data(previous_conf), name=previous_conf.year, marker=dict(color=color_secondary), opacity=0.6, ) current_trace = pgo.Bar(x=genders, y=get_gender_data(self.event), name=self.event.year, marker=dict(color=color_primary)) layout = get_basic_layout("Gender distribution") layout.update({"barmode": "group", "xaxis": dict(tickangle=30)}) figure = pgo.Figure(data=[current_trace, previous_trace], layout=layout) context["plots"]["gender"] = poff.plot(figure, auto_open=False, output_type="div") # ---------- # MEMBERSHIP # ---------- def get_membership_data(event): y_dict = { "MEMB": 0, "ASSO": 0, "AFFI": 0, "APHD": 0, "STAF": 0, "None": 0, } for r in (Registration.objects.select_related( "user__profile").filter( event_id=event.id, user__profile__membership_revocation_date__isnull=True ).values("user__profile__membership_tags")): if not r["user__profile__membership_tags"]: y_dict["None"] += 1 continue if "member" in r["user__profile__membership_tags"]: if "non-eu" in r["user__profile__membership_tags"]: y_dict["ASSO"] += 1 continue y_dict["MEMB"] += 1 continue if "affiliated" in r["user__profile__membership_tags"]: if "phd" in r["user__profile__membership_tags"]: y_dict["APHD"] += 1 continue if "staff" in r["user__profile__membership_tags"]: y_dict["STAF"] += 1 continue y_dict["AFFI"] += 1 continue return list(y_dict.values()) membership_types = [ "Member", "Assoc. member", "Affil. member", "Affil. PhD", "Staff", "(none)" ] previous_trace = pgo.Bar( x=membership_types, y=get_membership_data(previous_conf), name=previous_conf.year, marker=dict(color=color_secondary), opacity=0.6, ) current_trace = pgo.Bar( x=membership_types, y=get_membership_data(self.event), name=self.event.year, marker=dict(color=color_primary), ) layout = get_basic_layout("Membership") layout.update({"barmode": "group", "xaxis": dict(tickangle=30)}) figure = pgo.Figure(data=[current_trace, previous_trace], layout=layout) context["plots"]["membership"] = poff.plot(figure, auto_open=False, output_type="div") # --------- # COUNTRIES # --------- country_names = dict(countries) country_names["GB"] = "UK" country_names["US"] = "USA" def get_countries_data(event): y_odict = [] for r in (Registration.objects.select_related( "user__profile__institution").filter(event_id=event.id). values("user__profile__institution__country").order_by( "-count", "user__profile__institution__country").annotate( count=Count("id"))): try: y_odict.append((country_names[ r["user__profile__institution__country"]], r["count"])) except Exception: y_odict.append(("(not set)", r["count"])) return OrderedDict(y_odict) previous_data = get_countries_data(self.event) previous_trace = pgo.Bar( x=list(previous_data.keys()), y=list(previous_data.values()), name=self.event.year, marker=dict(color=color_primary), ) layout = get_basic_layout("Countries (institutions)") layout.update({ "margin": pgo.Margin(l=20, r=0, b=100, t=20, pad=5), "barmode": "group", "xaxis": dict(tickangle=-90) }) figure = pgo.Figure(data=[previous_trace], layout=layout) context["plots"]["countries"] = poff.plot(figure, auto_open=False, output_type="div") # ----------------- # INSTITUTION TYPES # ----------------- def get_institution_types_data(event): y_dict = OrderedDict([ (Institution.SME, 0), (Institution.INDUSTRY, 0), (Institution.UNIVERSITY, 0), (Institution.LAB, 0), (Institution.INNOVATION, 0), (Institution.OTHER, 0), ]) for r in (Registration.objects.select_related( "user__profile__institution").filter(event_id=event.id). values("user__profile__institution__type").order_by( "user__profile__institution__type").annotate( count=Count("id"))): y_dict[r["user__profile__institution__type"]] = r["count"] return y_dict keys = [ "SMEs", "Industry", "Universities", "Labs", "Innovation", "Other" ] previous_trace = pgo.Bar( x=keys, y=list(get_institution_types_data(previous_conf).values()), name=previous_conf.year, marker=dict(color=color_secondary), opacity=0.6, ) current_trace = pgo.Bar( x=keys, y=list(get_institution_types_data(self.event).values()), name=self.event.year, marker=dict(color=color_primary), ) layout = get_basic_layout("Institution types") layout.update({"barmode": "group", "xaxis": dict(tickangle=30)}) figure = pgo.Figure(data=[current_trace, previous_trace], layout=layout) context["plots"]["institution_types"] = poff.plot(figure, auto_open=False, output_type="div") return context
def plot_distribution_age_distance(data, runnings=None, title='Distribution of runners by age', age_column_name='age', sex_column_name='sex'): ''' This function plots, for each running, the distribution of ages of runners based on the genders of participants. Parameters - data: DataFrame to use during generation of the distribution - runnings: Dict containing name of column containing runnings (key: column_name) and set of runnings (key: values, value: dict() with following keys: name, color) By default, None. If None, default values will be set by function. - title: Title of the graph (by default, 'Distribution of runners by age categories') - age_column_name: Name of the column containing age of participants('age' or 'age category', by default, 'age') - sex_column_name: Name of the column containing sex of participants (by default, 'sex') Return - figure: Plotly figure ''' if not runnings: runnings = { 'column_name': 'distance (km)', 'values': OrderedDict([(10, { 'name': '10 km', 'color': KM_10_COLOR, 'position': 1 }), (21, { 'name': 'Semi-marathon', 'color': KM_21_COLOR, 'position': 2 }), (42, { 'name': 'Marathon', 'color': KM_42_COLOR, 'position': 3 })]) } colors = { 'female': FEMALE_COLOR, 'male': MALE_COLOR, 'all': ALL_GENDERS_COLOR } statistics = {} with study_utils.ignore_stdout(): figure = tools.make_subplots( rows=3, cols=1, subplot_titles=([ attributes['name'] for km, attributes in runnings['values'].items() ])) for key, attributes in runnings['values'].items(): filtered_df = data[data[runnings['column_name']] == key] statistics[attributes['name']] = 'Mean age: ' + str( round(np.mean(filtered_df[age_column_name]), 2)) + ' years (SD: ' + str( round(np.std(filtered_df[age_column_name]), 2)) + ')' for sex in np.concatenate( (filtered_df[sex_column_name].unique(), ['all']), axis=0): if sex == 'all': x = filtered_df[age_column_name] else: x = filtered_df[filtered_df[sex_column_name] == sex][age_column_name] nbinsx = ((np.max(x) - np.min(x)) + 1) if (age_column_name == 'age') else len(x) figure.append_trace( go.Histogram(nbinsx=nbinsx, x=x, name=sex.capitalize() + ' runners', legendgroup=sex, showlegend=(attributes['position'] == 1), marker={'color': colors[sex]}, opacity=0.75), attributes['position'], 1) # Format of axes and layout if age_column_name == 'age category': for axis, attributes in { k: v for k, v in figure['layout'].items() if 'xaxis' in k }.items(): figure['layout'][axis].update(categoryorder='array', categoryarray=YEAR_CATEGORIES) figure.layout.xaxis3.update(title='Age of participants') figure.layout.yaxis2.update(title='Number of participants') figure.layout.update(title=title, barmode='stack', bargroupgap=0.1, bargap=0, margin=go.Margin(t=100, b=50, l=50, r=50)) # Add of statistics # Trick: We use position of subtitles annotations to create the ones related to statistics annotations_statistics = [] for annotation in figure['layout']['annotations']: annotations_statistics.append( Annotation(y=annotation['y'], x=1, text=statistics[annotation['text']], xref='paper', yref='paper', yanchor='bottom', showarrow=False)) figure['layout']['annotations'].extend(annotations_statistics) plotly.offline.iplot(figure) return figure
def _update_grapgcl(n_clicks, catalogue, nbre_clst, points, initialisation): radius_multiplier = {'inner': 1.5, 'outer': 3} df = pd.read_csv(catalogues[catalogue]['clink']) colum = [ catalogues[catalogue]['cdate'], catalogues[catalogue]['clat'], catalogues[catalogue]['clong'], catalogues[catalogue]['cdepth'], catalogues[catalogue]['cmag'], catalogues[catalogue]['cid'] ] df1 = df.iloc[:, colum] df1 = df1.dropna(axis=0, how='any') df1.columns = ['Date', 'lat', 'lon', 'depth', 'mag', 'id'] if initialisation == 'k++': colonnes = ['lat', 'long', 'depth', 'mag'] df1.columns = ['Date', 'lat', 'lon', 'depth', 'mag', 'id'] dff = df1 df1 = df1.iloc[:, [1, 2, 3, 4]] dd = df1.values.tolist() datax = np.array(dd) k = catalogues[catalogue]['k'] k = 3 kmeans = KMeans(n_clusters=k, init='k-means++', max_iter=1000).fit(datax) regles = genererregles(kmeans, k, datax) for l in range(0, k): print "Cluster " + str(l + 1) print regles[l] col = [ 'blue', 'orange', 'green', 'purple', 'red', 'white', 'yellow', 'cyan', 'gray', 'rosybrown', 'peru', 'darkorchid', 'plum', 'gainsboro' ] data_clst = [] layout = go.Layout( title=regions[region], autosize=True, hovermode='closest', height=750, margin=go.Margin(l=0, r=0, t=45, b=10), mapbox=dict( accesstoken=mapbox_access_token, bearing=0, center=dict( lat=regions[region]['lat'], lon=regions[region]['lon'], ), pitch=0, zoom=regions[region]['zoom'], ), ) for ind in range(0, k): #datac=[] c = datax[ClusterIndicesNumpy(ind, kmeans.labels_)] x = c[:, 0] y = c[:, 1] df3 = pd.DataFrame(c, columns=colonnes) # outer circles represent magnitude datac = go.Scattermapbox( lat=df3['lat'], lon=df3['long'], mode='markers', marker=go.Marker( size=df3['mag'] * radius_multiplier['outer'], color=col[ind], opacity=0.8, ), # hoverinfo='text', showlegend=False, ) data_clst.append(datac) figure = go.Figure(data=data_clst, layout=layout) return figure if initialisation == 'custom': if points: if n_clicks > 0: result = json.loads(points) if result is not None: datafr = pd.DataFrame(result) col = [ 'curveNumber', 'marker.size', 'pointNumber', 'pointIndex', 'text', 'marker.color' ] datafr = datafr.drop(col, axis=1) datafr = datafr.rename(index=str, columns={"hoverinfo": "id"}) centroid = pd.merge(datafr, df1, on=['lat', 'lon', 'id'], how='inner') centroid = centroid.iloc[:, [1, 2, 4, 5]] centroid = centroid[['lat', 'lon', 'depth', 'mag']] centroid = centroid.values print centroid if nbre_clst == len(centroid): radius_multiplier = {'inner': 1.5, 'outer': 3} dff = df1 df1 = df1.iloc[:, [1, 2, 3, 4]] dd = df1.values.tolist() colonnes = ['lat', 'long', 'depth', 'mag'] datax = np.array(dd) k = nbre_clst kmeans = KMeans(n_clusters=nbre_clst, n_init=1, init=centroid, max_iter=1000).fit(datax) print "kkk" col = [ '#990000', '#E7C843', '#6B8E23', 'blue', 'olive', 'orange', 'cyan', 'purple' ] data_clst = [] layout = go.Layout( title=regions[region], autosize=True, hovermode='closest', height=750, margin=go.Margin(l=0, r=0, t=45, b=10), mapbox=dict( accesstoken=mapbox_access_token, bearing=0, center=dict( lat=regions[region]['lat'], lon=regions[region]['lon'], ), pitch=0, zoom=regions[region]['zoom'], ), ) for ind in range(0, k): #datac=[] c = datax[ClusterIndicesNumpy(ind, kmeans.labels_)] x = c[:, 0] y = c[:, 1] df3 = pd.DataFrame(c, columns=colonnes) # outer circles represent magnitude datac = go.Scattermapbox( lat=df3['lat'], lon=df3['long'], mode='markers', marker=go.Marker( size=df3['mag'] * radius_multiplier['outer'], color=col[ind], opacity=0.8, ), # hoverinfo='text', showlegend=False, ) data_clst.append(datac) figure = go.Figure(data=data_clst, layout=layout) centroid = [] return figure else: colonnes = ['lat', 'long', 'depth', 'mag'] df1.columns = ['Date', 'lat', 'lon', 'depth', 'mag', 'id'] dff = df1 df1 = df1.iloc[:, [1, 2, 3, 4]] dd = df1.values.tolist() datax = np.array(dd) k = catalogues[catalogue]['k'] kmeans = KMeans(n_clusters=k, init='k-means++', max_iter=1000).fit(datax) col = [ '#990000', '#E7C843', '#6B8E23', 'blue', 'olive', 'orange', 'cyan', 'purple' ] data_clst = [] layout = go.Layout( title=regions[region], autosize=True, hovermode='closest', height=750, margin=go.Margin(l=0, r=0, t=45, b=10), mapbox=dict( accesstoken=mapbox_access_token, bearing=0, center=dict( lat=regions[region]['lat'], lon=regions[region]['lon'], ), pitch=0, zoom=regions[region]['zoom'], ), ) for ind in range(0, k): #datac=[] c = datax[ClusterIndicesNumpy(ind, kmeans.labels_)] x = c[:, 0] y = c[:, 1] df3 = pd.DataFrame(c, columns=colonnes) # outer circles represent magnitude datac = go.Scattermapbox( lat=df3['lat'], lon=df3['long'], mode='markers', marker=go.Marker( size=df3['mag'] * radius_multiplier['outer'], color=col[ind], opacity=0.8, ), # hoverinfo='text', showlegend=False, ) data_clst.append(datac) figure = go.Figure(data=data_clst, layout=layout) return figure
def table_view_plot( experiment: Experiment, data: Data, use_empirical_bayes: bool = True, only_data_frame: bool = False, arm_noun: str = "arm", ): """Table of means and confidence intervals. Table is of the form: +-------+------------+-----------+ | arm | metric_1 | metric_2 | +=======+============+===========+ | 0_0 | mean +- CI | ... | +-------+------------+-----------+ | 0_1 | ... | ... | +-------+------------+-----------+ """ model_func = get_empirical_bayes_thompson if use_empirical_bayes else get_thompson model = model_func(experiment=experiment, data=data) # We don't want to include metrics from a collection, # or the chart will be too big to read easily. # Example: # experiment.metrics = { # 'regular_metric': Metric(), # 'collection_metric: CollectionMetric()', # collection_metric =[metric1, metric2] # } # model.metric_names = [regular_metric, metric1, metric2] # "exploded" out # We want to filter model.metric_names and get rid of metric1, metric2 metric_names = [ metric_name for metric_name in model.metric_names if metric_name in experiment.metrics ] metric_name_to_lower_is_better = { metric_name: experiment.metrics[metric_name].lower_is_better for metric_name in metric_names } plot_data, _, _ = get_plot_data(model=model, generator_runs_dict={}, metric_names=metric_names) if plot_data.status_quo_name: status_quo_arm = plot_data.in_sample.get(plot_data.status_quo_name) rel = True else: status_quo_arm = None rel = False results = {} records_with_mean = [] records_with_ci = [] for metric_name in metric_names: arms, _, ys, ys_se = _error_scatter_data( arms=list(plot_data.in_sample.values()), y_axis_var=PlotMetric(metric_name, True), x_axis_var=None, rel=rel, status_quo_arm=status_quo_arm, ) # results[metric] will hold a list of tuples, one tuple per arm tuples = list(zip(arms, ys, ys_se)) results[metric_name] = tuples # used if only_data_frame == True records_with_mean.append({arm: y for (arm, y, _) in tuples}) records_with_ci.append({arm: y_se for (arm, _, y_se) in tuples}) if only_data_frame: return tuple( pd.DataFrame.from_records(records, index=metric_names).transpose() for records in [records_with_mean, records_with_ci]) # cells and colors are both lists of lists # each top-level list corresponds to a column, # so the first is a list of arms cells = [[f"<b>{x}</b>" for x in arms]] colors = [["#ffffff"] * len(arms)] metric_names = [] for metric_name, list_of_tuples in sorted(results.items()): cells.append([ "{:.3f} ± {:.3f}".format(y, Z * y_se) for (_, y, y_se) in list_of_tuples ]) metric_names.append(metric_name.replace(":", " : ")) color_vec = [] for (_, y, y_se) in list_of_tuples: color_vec.append( get_color( x=y, ci=Z * y_se, rel=rel, reverse=metric_name_to_lower_is_better[metric_name], )) colors.append(color_vec) header = [f"{arm_noun}s"] + metric_names header = [f"<b>{x}</b>" for x in header] trace = go.Table( header={ "values": header, "align": ["left"] }, cells={ "values": cells, "align": ["left"], "fill": { "color": colors } }, ) layout = go.Layout( height=min([400, len(arms) * 20 + 200]), width=175 * len(header), margin=go.Margin(l=0, r=20, b=20, t=20, pad=4), # noqa E741 ) fig = go.Figure(data=[trace], layout=layout) return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
# In[6]: df_white_sorted = df['pwhite10'].sort(inplace=False) # In[7]: # Create a horizontal bar chart with plotly. data = pgo.Data( [pgo.Bar(y=df_white_sorted.index, x=df_white_sorted, orientation='h')]) # In[8]: layout = pgo.Layout( title='% White', margin=pgo.Margin(l=300) # add left margin for y-labels are long ) # In[9]: fig = pgo.Figure(data=data, layout=layout) # In[10]: # Address InsecurePlatformWarning from running Python 2.7.6 import urllib3.contrib.pyopenssl urllib3.contrib.pyopenssl.inject_into_urllib3() # In[11]:
def plot(self): x_list, y_list, array_list, id_list = self.get_express_value_from_db() if len(x_list) == len(y_list) and len(x_list) != 0: trace = [] for x_item, y_item, array_item, id_item in zip( x_list, y_list, array_list, id_list): for i, item in enumerate(x_item): if item in [ "brain", 'arachnoid cyst', 'cerebral cortex', 'cerebrospinal fluid', 'prefrontal cortex', 'spinal cord' ]: x_item[i] = "<b>" + x_item[i].capitalize() + "</b>" else: x_item[i] = x_item[i].capitalize() trace.append( go.Bar( x=x_item, y=y_item, name=id_item, # orientation='h', hoverinfo='all', error_y=dict(type='data', array=array_item, visible=True))) layout = go.Layout( paper_bgcolor='rgb(249, 249, 249)', plot_bgcolor='rgb(249, 249, 249)', barmode='stack', height=400, width=1300, title='<br>Median protein expression</br>', # yaxis=dict(range=[0, 10]), # titlefont=dict(size=25), plot_bgcolor='#EFECEA', hovermode='closest', margin=go.Margin( # x,y轴label距离图纸四周的距离 l=50, r=20, b=150, t=40, pad=0), xaxis=dict( # autorange=True, # title='Diffient Brain region', # titlefont=dict( # family='Arial, sans-serif', # size=18, # color='lightgrey' # ), # title='log <sub>10</sub> normalized iBAQ intensit', titlefont=dict( # family='Arial, sans-serif', size=8, # color='lightgrey' ), showgrid=True, zeroline=True, showline=True, showticklabels=True, tickangle=50, # x轴刻度之间距离 # automargin=False, # separatethousands=True, ), yaxis=dict( title='log <sub>10</sub> normalized iBAQ intensity', # range=[0,], # weight=0.5, # tickmode='linear', # ticks='outside', showgrid=True, autorange=True, # gridwidth=0.5, showticklabels=True, # tickwidth=10, # tickangle=60, # tickfont=dict( # # family='Old Standard TT, serif', # # size=14, # # color='black' # ), exponentformat='e', showexponent='All')) fig = go.Figure(data=trace, layout=layout) # plotly.offline.plot(fig, show_link=False) return plotly.offline.plot(fig, show_link=False, output_type="div", include_plotlyjs=False) else: return '<div> There is no corresponding data published yet, we will update it when such data available. </div>'
def InteractiveImagePIL(image_id, image, enc_format="png", dragmode="select", verbose=False, **kwargs): if enc_format == "jpeg": if image.mode == "RGBA": image = image.convert("RGB") encoded_image = pil_to_b64(image, enc_format=enc_format, verbose=verbose, quality=80) else: encoded_image = pil_to_b64(image, enc_format=enc_format, verbose=verbose) width, height = image.size return dcc.Graph( id=image_id, figure={ "data": [], "layout": { "autosize": True, "paper_bgcolor": "#272a31", "plot_bgcolor": "#272a31", "margin": go.Margin(l=40, b=40, t=26, r=10), "xaxis": { "range": (0, width), "scaleanchor": "y", "scaleratio": 1, "color": "white", "gridcolor": "#43454a", "tickwidth": 1, }, "yaxis": { "range": (0, height), "color": "white", "gridcolor": "#43454a", "tickwidth": 1, }, "images": [{ "xref": "x", "yref": "y", "x": 0, "y": 0, "yanchor": "bottom", "sizing": "stretch", "sizex": width, "sizey": height, "layer": "below", "source": HTML_IMG_SRC_PARAMETERS + encoded_image, }], "dragmode": dragmode, }, }, config={ "modeBarButtonsToRemove": [ "sendDataToCloud", "autoScale2d", "toggleSpikelines", "hoverClosestCartesian", "hoverCompareCartesian", "zoom2d", ] }, **_omit(["style"], kwargs), )
def main(): parser = argparse.ArgumentParser( description= 'Generates a graphic showing how well reference transcripts are covered by a transcript assembly' ) ## output file to be written parser.add_argument('-i', '--input_files', type=str, required=True, help='Comma-separated list of cov files to be plotted') parser.add_argument('-l', '--labels', type=str, required=True, help='Labels for each cov file passed') parser.add_argument('-t', '--title', type=str, required=False, default='Transcript coverage', help='Title for the plot') parser.add_argument('-s', '--stacked', dest='stacked', action='store_true') parser.set_defaults(stacked=False) parser.add_argument('-rf', '--ref_fasta', required=False, help='Only needed if passing --stacked') parser.add_argument('-qf', '--qry_fasta', required=False, help='Only needed if passing --stacked') parser.add_argument( '-mb', '--margin_bottom', type=int, required=False, default=120, help='Size of the bottom margin, in case X labels are being cut off') parser.add_argument( '-o', '--output_image', type=str, required=False, help= 'Name for PNG file to be created. If not passed, will post to plotly site' ) args = parser.parse_args() cov_files = args.input_files.split(",") labels = args.labels.split(",") colors = [ 'rgb(49,130,189)', #blue 'rgb(204,204,204)', #light grey 'rgb(50, 171, 96)', #green 'rgb(222,45,38)', #red 'rgb(142, 124, 195)', #purple 'rgb(100,100,100)', #darker grey 'rgb(255,255,61)', #yellow 'rgb(255,169,58)' #orange ] #print("Got {0} coverage files".format(len(cov_files))) #print("Got {0} labels".format(len(labels))) if len(labels) > len(colors): raise Exception( "Sorry, this many datasets is not yet supported (only because not enough colors were defined in code.)" ) # This stores the positions of the labels label_position = dict() if len(cov_files) > 1 and args.stacked == True: raise Exception( "Use of the --stacked option requires a single input file") # Only used if doing a single-file stacked bar chart if args.stacked == True: ref_sizes = biocode.utils.fasta_sizes_from_file(args.ref_fasta) qry_sizes = biocode.utils.fasta_sizes_from_file(args.qry_fasta) traces = [] file_idx = 0 for file in cov_files: xvals = [] yvals = [] stacked_yvals = [] for line in open(file): cols = line.rstrip().split("\t") ref_id = cols[0] xvals.append(ref_id) yvals.append(float(cols[1])) if args.stacked == True: qry_id = cols[2] if qry_sizes[qry_id] > ref_sizes[ref_id]: # what percentage larger than the reference is the query? rel_perc = (qry_sizes[qry_id] / ref_sizes[ref_id]) * 100 # for the stacked bars we have to subtract the current yval cov, since this adds to it rel_perc_adj = rel_perc - float(cols[1]) stacked_yvals.append(rel_perc_adj) else: stacked_yvals.append(0) trace = go.Bar(x=xvals, y=yvals, name=labels[file_idx], marker=dict(color=colors[file_idx])) traces.append(trace) if args.stacked == True: trace2 = go.Bar(x=xvals, y=stacked_yvals, name=labels[file_idx], marker=dict(color='rgb(200,200,200)')) traces.append(trace2) file_idx += 1 if args.stacked == True: barmode = 'stack' else: barmode = 'group' layout = go.Layout( title=args.title, xaxis=dict( # set x-axis' labels direction at 45 degree angle tickangle=-65), yaxis=dict(title='Percent coverage', titlefont=dict(size=16, color='rgb(107, 107, 107)'), tickfont=dict(size=16, color='rgb(107, 107, 107)')), legend=dict( #x=0, #y=1.2, bgcolor='rgba(255, 255, 255, 0)', bordercolor='rgba(255, 255, 255, 0)', font=dict(size=20, color='#000')), barmode=barmode, bargap=0.15, bargroupgap=0.1, width=1500, height=800, margin=go.Margin(b=args.margin_bottom, pad=5)) fig = go.FigureWidget(data=traces, layout=layout) if args.output_image is None: plot_url = py.plot(fig, filename='angled-text-bar') print("Navigate to {0} for your image".format(plot_url)) else: #py.image.save_as(fig, filename=args.output_image) fig.write_image(args.output_image) print("Output written to file: {0}".format(args.output_image))
def table_view_plot( experiment: Experiment, data: Data, use_empirical_bayes: bool = True ): """Table of means and confidence intervals. Table is of the form: +-------+------------+-----------+ | arm | metric_1 | metric_2 | +=======+============+===========+ | 0_0 | mean +- CI | ... | +-------+------------+-----------+ | 0_1 | ... | ... | +-------+------------+-----------+ """ model_func = get_empirical_bayes_thompson if use_empirical_bayes else get_thompson model = model_func(experiment=experiment, data=data) results = {} plot_data, _, _ = get_plot_data( model=model, generator_runs_dict={}, metric_names=model.metric_names ) if plot_data.status_quo_name: status_quo_arm = plot_data.in_sample.get(plot_data.status_quo_name) rel = True else: status_quo_arm = None rel = False for metric_name in model.metric_names: arms, _, ys, ys_se = _error_scatter_data( arms=list(plot_data.in_sample.values()), y_axis_var=PlotMetric(metric_name, True), x_axis_var=None, rel=rel, status_quo_arm=status_quo_arm, ) # add spaces to metric name to it wraps metric_name = metric_name.replace(":", " : ") # results[metric] will hold a list of tuples, one tuple per arm results[metric_name] = list(zip(arms, ys, ys_se)) # cells and colors are both lists of lists # each top-level list corresponds to a column, # so the first is a list of arms cells = [[f"<b>{x}</b>" for x in arms]] colors = [["#ffffff"] * len(arms)] metric_names = [] for metric_name, list_of_tuples in sorted(results.items()): cells.append( [ "{:.3f} ± {:.3f}".format(y, Z * y_se) for (_, y, y_se) in list_of_tuples ] ) metric_names.append(metric_name) colors.append([get_color(y, Z * y_se, rel) for (_, y, y_se) in list_of_tuples]) header = ["arms"] + metric_names header = [f"<b>{x}</b>" for x in header] trace = go.Table( header={"values": header, "align": ["left"]}, cells={"values": cells, "align": ["left"], "fill": {"color": colors}}, ) layout = go.Layout( height=min([400, len(arms) * 20 + 200]), width=175 * len(header), margin=go.Margin(l=0, r=20, b=20, t=20, pad=4), # noqa E741 ) fig = go.Figure(data=[trace], layout=layout) return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
def makeDEbox_pval(input_file, de_file=None): input_file = input_file.replace("\\", "/") #print((input_file)) #return None #first_table = pandas.read_table(input_file, header=None ,sep='\t') pval_dict = dict() label_dict = dict() if de_file: with open(de_file, "r") as de_hand: lines = de_hand.readlines() lines.pop(0) for line in lines: row = line.split("\t") # pval_dict[row[0]] = str(float(row[-2])) pval_dict[row[0]] = str(round(float(row[-2]), 8)) print(pval_dict) color_list = ["red", "green", "blue", "yellow", "purple", "orange"] * 10 with open(input_file, "r") as ifile: lines = ifile.readlines() x_dict = dict() y_dict = dict() x_list = [] size_dict = dict() for i, line in enumerate(lines): row = line.split("\t") x, cond = row[0].split("#") x_list.append(x) if len(set(x_list)) < 21: if x_dict.get(cond): to_ap = [ "<b>" + x + "</b>" + " [pval=" + pval_dict.get(x) + "]" ] * (len(row) - 1) if not size_dict.get(cond): size_dict[cond] = len(to_ap) x_dict[cond].extend(to_ap) y_dict[cond].extend(row[1:]) label_dict[cond].extend([pval_dict.get(x)] * (len(row) - 1)) else: x_dict[cond] = [x + " [pval=" + pval_dict.get(x) + "]" ] * (len(row) - 1) y_dict[cond] = row[1:] label_dict[cond] = [pval_dict.get(x)] * (len(row) - 1) data = [] print(len(label_dict["Group1"])) print(len(x_dict["Group1"])) for i, key in enumerate(x_dict.keys()): to_y = numpy.array(list(map(float, y_dict[key]))) to_y.astype(float) to_y = to_y + 1 trace = go.Box( x=x_dict[key], #y=y_dict[key], #numpy.ndarray.flatten( y=to_y, #z=label_dict.get(key), marker=dict(color=color_list[i]), #text=label_dict.get(key), #hovermode="compare", # hoverinfo=label_dict.get(key), #hoveron="points", name=key + " n=" + str(size_dict[key])) data.append(trace) # print(data) layout = go.Layout( hovermode="x", #hoveron="points", boxmode='group', autosize=True, margin=go.Margin(l=50, r=50, b=150, t=100, pad=4), title="Differentially Expressed miRNAs", xaxis=dict( # title='Nulceotide added', tick0=0, dtick=1, ), yaxis=dict(type='log', title='RPM+1')) fig = go.Figure(data=data, layout=layout) div_obj = plot(fig, show_link=False, auto_open=False, output_type='div') #plot(fig, filename="C:/Users/Ernesto/Desktop/Groups/test.html", show_link=False, auto_open=False) return div_obj
def plot_norm_data_vertical_lines(df_orders, portvals, portvals_bm, vert_lines=False): """Plots portvals and portvals_bm, showing vertical lines for buy and sell orders Parameters: df_orders: A dataframe that contains portfolio orders portvals: A dataframe with one column containing daily portfolio value portvals_bm: A dataframe with one column containing daily benchmark value save_fig: Whether to save the plot or not fig_name: The name of the saved figure Returns: Plot a chart of the portfolio and benchmark performances """ # Normalize data portvals = normalize_data(portvals) portvals_bm = normalize_data(portvals_bm) df = portvals_bm.join(portvals) # Min range if (df.loc[:, "Benchmark"].min() < df.loc[:, "Portfolio"].min()): min_range = df.loc[:, "Benchmark"].min() else: min_range = df.loc[:, "Portfolio"].min() # Max range if (df.loc[:, "Benchmark"].max() > df.loc[:, "Portfolio"].max()): max_range = df.loc[:, "Benchmark"].max() else: max_range = df.loc[:, "Portfolio"].max() # Plot the normalized benchmark and portfolio trace_bench = go.Scatter(x=df.index, y=df.loc[:, "Benchmark"], name="Benchmark", line=dict(color='#17BECF'), opacity=0.8) trace_porfolio = go.Scatter(x=df.index, y=df.loc[:, "Portfolio"], name="Portfolio", line=dict(color='#000000'), opacity=0.8) data = [trace_bench, trace_porfolio] # Plot the vertical lines for buy and sell signals shapes = list() if vert_lines: buy_line = [] sell_line = [] for date in df_orders.index: if df_orders.loc[date, "Order"] == "BUY": buy_line.append(date) else: sell_line.append(date) # Vertical lines line_size = max_range + (max_range * 10 / 100) # Buy line for i in buy_line: shapes.append({ 'type': 'line', 'xref': 'x', 'yref': 'y', 'x0': i, 'y0': 0, 'x1': i, 'y1': line_size, 'line': { 'color': 'rgb(0, 102, 34)', 'width': 1, 'dash': 'dash', }, }) # Sell line for i in sell_line: shapes.append({ 'type': 'line', 'xref': 'x', 'yref': 'y', 'x0': i, 'y0': 0, 'x1': i, 'y1': line_size, 'line': { 'color': 'rgb(255, 0, 0)', 'width': 1, 'dash': 'dash', }, }) layout = dict( autosize=True, shapes=shapes, margin=go.Margin(l=50, r=50, b=100, t=100, pad=4), title="Portfolio vs Benchmark", xaxis=dict( title='Dates', rangeselector=dict(buttons=list([ dict(count=1, label='1m', step='month', stepmode='backward'), dict(count=6, label='6m', step='month', stepmode='backward'), dict(step='all') ])), range=[portvals.index[0], portvals.index[-1]]), yaxis=dict(title='Normalized Prices', range=[ min_range - (min_range * 10 / 100), max_range + (max_range * 10 / 100) ]), ) fig = dict(data=data, layout=layout) iplot(fig)
paper_bgcolor='#fff', plot_bgcolor='#fff', showlegend=False, yaxis = dict( range=[0,9000000], tickfont=dict( family='Arial, sans-serif', size=11, color='grey' ), ), xaxis = dict( tickfont = dict( family = 'Arial, sans-serif', size = 11, color = 'grey' ), ), margin=go.Margin( l=40, r=40, b=40, t=40, pad=.1 ), bargap = 0.1, ) fig = go.Figure(data=data, layout=layout) iplot(fig)
def compute_waterfall_contribution(contribution, bias, n_round=3, delta_y_display=0.05, hover_value='x'): """Function used to compute plotly traces for a waterfall display. """ ## Compute the 4 traces list base = [0] positive = [bias] negative = [0] total = [0] text = [str(round(bias, n_round))] y_text = [bias + delta_y_display] for contrib in contribution['contribution']: base.append(base[-1] + positive[-1]) total.append(0) if contrib>=0: negative.append(0) positive.append(contrib) text.append('+' + str(round(contrib, n_round))) else: positive.append(0) negative.append(-contrib) base[-1] = base[-1] + contrib text.append(str(round(contrib, n_round))) y_text.append(base[-1] + negative[-1] + positive[-1] + delta_y_display) total.append(base[-1] + positive[-1]) base.append(0) positive.append(0) negative.append(0) text.append(str(round(total[-1] + negative[-1], n_round))) y_text.append(total[-1] + delta_y_display) ## Create the 4 traces x_data = ['_BASE RATE_'] + list(contribution['variable']) + ['Prediction'] trace_base = go.Bar(x=x_data, y=base, marker=dict( color='rgba(1,1,1, 0.0)', ), hoverinfo=hover_value) trace_positive = go.Bar(x=x_data, y=positive, marker=dict( color='rgba(55, 128, 191, 0.7)', line=dict( color='rgba(55, 128, 191, 1.0)', width=2, ) ), hoverinfo=hover_value) trace_negative = go.Bar(x=x_data, y=negative, marker=dict( color='rgba(255, 128, 0, 0.7)', line=dict( color='rgba(255, 128, 0, 1.0)', width=2, ) ), hoverinfo=hover_value) trace_total = go.Bar(x=x_data, y=total, marker=dict( color='rgba(50, 171, 96, 0.7)', line=dict( color='rgba(50, 171, 96, 1.0)', width=2, ) ), hoverinfo=hover_value) data = [trace_base, trace_positive, trace_negative, trace_total] annotations = [] for i in range(len(text)): annotations.append(dict(x=x_data[i], y=y_text[i], text=text[i], font=dict(family='Arial', size=14, color='rgba(0, 0, 0, 1)'), showarrow=False,)) layout = go.Layout( barmode='stack', xaxis={'title': ''}, yaxis={'title': 'Prediction score'#, #'range': [0.0, 1.1] }, title='Score breakdown by variable contribution', margin=go.Margin( l=200, r=20, b=100, t=50, pad=4 ), #paper_bgcolor='rgba(245, 246, 249, 1)', #plot_bgcolor='rgba(245, 246, 249, 1)', showlegend=False ) layout['annotations'] = annotations return data, layout
def update_graph_scatter(): Y1.append(scrape()[0]) Y2.append(scrape()[1]) X.append(datetime.datetime.now()) if (int(list(Y1)[0]) == 0): diff = "-.--%" else: diff = str(round(((int(list(Y1)[-1]) - int(list(Y1)[0])) / int(list(Y1)[0])) * 100, 2)) + "%" if (int(list(Y2)[0]) == 0): diff2 = "-.--%" else: diff2 = str(round(((int(list(Y2)[-1]) - int(list(Y2)[0])) / int(list(Y2)[0])) * 100, 2)) + "%" # ============================================ # graph elements for the regular inbound queue # ============================================ data1 = plotly.graph_objs.Scatter( x=list(X), y=list(Y1), name='Inbound Queue', mode= 'lines', fill='tozeroy', line = dict(color='rgb(0, 255, 242)') ) line1 = plotly.graph_objs.Scatter( x=[list(X)[0], list(X)[-1]], y=[list(Y1)[0], list(Y1)[-1]], name = "Inbound 1H Diff", line = dict( color = '#ff0101', width = 2, dash = 'dot' ), yaxis='y3' ) # ============================================ # graph elements for the regular french queue # ============================================ data2 = plotly.graph_objs.Scatter( x=list(X), y=list(Y2), name='French Queue', mode= 'lines', fill='tozeroy', yaxis='y2', line = dict(color='rgb(255, 94, 225)') ) line2 = plotly.graph_objs.Scatter( x=[list(X)[0], list(X)[-1]], y=[list(Y2)[0], list(Y2)[-1]], name = "French 1H Diff", line = dict( color = '#ffff00', width = 2, dash = 'dot' ), yaxis='y4' ) annotations=[ dict( xref='x', yref='y', x=list(X)[-1], y=list(Y1)[-1], text=str(list(Y1)[-1]), showarrow=False, font=dict( size=40, family='Monaco, monospace', color='rgb(0, 255, 242)' ), ax=-1000, ay=-10 ), dict( xref='x', yref='y2', x=list(X)[-1], y=list(Y2)[-1], text=str(list(Y2)[-1]), showarrow=False, font=dict( size=40, family='Monaco, monospace', color='rgb(255, 94, 225)' ), ax=-1000, ay=-10 ) ] fig = tools.make_subplots( rows=2, cols=1, vertical_spacing=0.07, subplot_titles=( 'INBOUND QUEUE: ' + str(scrape()[0]), 'FRENCH QUEUE: ' + str(scrape()[1]) ) ) # add the the new plots to the figure fig.append_trace(data1, 1, 1) fig.append_trace(line1, 1, 1) fig.append_trace(data2, 2, 1) fig.append_trace(line2, 2, 1) fig.append_trace(line3, 1, 1) # design of the live graph fig['layout'].update( annotations=annotations, paper_bgcolor='rgb(61, 61, 61)', plot_bgcolor='rgb(61, 61, 61)', margin=go.Margin( t=20 ), height=850, showlegend=False, xaxis=dict( tickcolor='black', gridcolor='rgb(112, 112, 112)', range=[min(X), max(X)], linecolor='black', linewidth=2, tickwidth=2, showticklabels=False, ), xaxis2=dict( color='lightgrey', tickcolor='black', gridcolor='rgb(112, 112, 112)', ticks='outside', title=str(datetime.datetime.now().time()), range=[min(X), max(X)], linecolor='black', linewidth=2, tickwidth=2, ), yaxis=dict( color='rgb(0, 226, 215)', tickcolor='black', gridcolor='rgb(112, 112, 112)', ticks='outside', title='INBOUND (' + diff + ')', titlefont=dict( family='Lucida Sans Unicode", "Lucida Grande", sans-serif', color='rgb(0, 226, 215)', size=20 ), range=[0, 350], linecolor='black', linewidth=2, mirror='ticks', tickwidth=2, side='left' ), yaxis2=dict( color='rgb(255, 94, 225)', tickcolor='black', gridcolor='rgb(112, 112, 112)', ticks='outside', title='FRENCH (' + diff2 + ')', titlefont=dict( family='Lucida Sans Unicode", "Lucida Grande", sans-serif', color='rgb(255, 94, 225)', size=20 ), range=[0, 15], linecolor='black', linewidth=2, mirror='ticks', tickwidth=2, side='left' ) ) return(fig)
def plot_speed_distribution_by_running(data, runnings=None, title='Speed distribution by running', speed_column_name='speed (m/s)', sex_column_name='sex'): ''' This function plots, for each running, the distribution of ages of runners based on the genders of participants. Parameters - data: DataFrame to use during generation of the distribution - runnings: Dict containing name of column containing runnings (key: column_name) and set of runnings (key: values, value: dict() with following keys: name, color) By default, None. If None, default values will be set by function. - title: Title of the graph (by default, 'Speed distribution by running') - speed_column_name: Name of the column containing age of participants('age' or 'age category', by default, 'speed (m/s)') - sex_column_name: Name of the column containing sex of participants (by default, 'sex') Return - figure: Plotly figure title, x axis name, statistics, column to select, categories or not, title of xaxis, title of yaxis (not modified) ''' if not runnings: runnings = { 'column_name': 'distance (km)', 'values': OrderedDict([(10, { 'name': '10 km', 'color': KM_10_COLOR, 'position': 1 }), (21, { 'name': 'Semi-marathon', 'color': KM_21_COLOR, 'position': 2 }), (42, { 'name': 'Marathon', 'color': KM_42_COLOR, 'position': 3 })]) } colors = { 'female': FEMALE_COLOR, 'male': MALE_COLOR, 'all': ALL_GENDERS_COLOR } statistics = {} with study_utils.ignore_stdout(): figure = tools.make_subplots( rows=3, cols=1, shared_xaxes=True, subplot_titles=([ attributes['name'] for km, attributes in runnings['values'].items() ])) for key, attributes in runnings['values'].items(): filtered_df = data[data[runnings['column_name']] == key] statistics[attributes['name']] = 'Total: ' + str( len(filtered_df)) + ' runners<br>Max: ' + str( round(np.max(filtered_df[speed_column_name]), 2)) + ' m/s<br>Min: ' + str( round(np.min(filtered_df[speed_column_name]), 2) ) + ' m/s<br>Median: ' + str( round(np.median(filtered_df[speed_column_name]), 2)) + ' m/s | SD: ' + str( round( np.std(filtered_df[speed_column_name]), 2)) + ' m/s' for sex in np.concatenate( (filtered_df[sex_column_name].unique(), ['all']), axis=0): if sex == 'all': x = filtered_df[speed_column_name] else: x = filtered_df[filtered_df[sex_column_name] == sex][speed_column_name] figure.append_trace( go.Histogram(xbins={ 'start': math.floor(np.min(data[speed_column_name])), 'end': math.ceil(np.max(data[speed_column_name])), 'size': 0.1 }, x=x, name=sex.capitalize() + ' runners', legendgroup=sex, showlegend=(attributes['position'] == 1), marker={'color': colors[sex]}, opacity=0.75), attributes['position'], 1) # Format of axes and layout figure.layout.xaxis1.update(title='Speed (m/s)', tickformat='.1f') figure.layout.yaxis2.update(title='Number of participants') figure.layout.update(title=title, barmode='stack', bargroupgap=0.1, bargap=0, margin=go.Margin(t=100, b=50, l=50, r=50)) # Add of statistics # Trick: We use position of subtitles annotations to create the ones related to statistics annotations_statistics = [] for annotation in figure['layout']['annotations']: annotations_statistics.append( Annotation(y=annotation['y'] - 0.12, x=1, align='left', text=statistics[annotation['text']], xref='paper', yref='paper', yanchor='bottom', showarrow=False)) figure['layout']['annotations'].extend(annotations_statistics) plotly.offline.iplot(figure) return figure
def sph_plot_diracs_plotly( colatitude_ref=None, azimuth_ref=None, colatitude=None, azimuth=None, dirty_img=None, azimuth_grid=None, colatitude_grid=None, surface_base=1, surface_height=0.0, ): """ Plots a 2D map on a sphere as well as a collection of diracs using the plotly library Parameters ---------- colatitude_ref: ndarray, optional The colatitudes of a collection of reference points azimuths_ref: ndarray, optional The azimuths of a collection of reference points for the Diracs colatitude: ndarray, optional The colatitudes of the collection of points to visualize azimuth: ndarray, optional The azimuths of the collection of points to visualize dirty_img: ndarray A 2D map for displaying a pattern on the sphere under the points azimuth_grid: ndarray The azimuths indexing the dirty_img 2D map colatitude_grid: ndarray The colatitudes indexing the dirty_img 2D map surface_base: radius corresponding to lowest height on the map sufrace_height: radius difference between the lowest and highest point on the map """ try: from plotly.offline import plot import plotly.graph_objs as go import plotly except ImportError: import warnings warnings.warn("The plotly package is required to use this function") return plotly.offline.init_notebook_mode() traces = [] if (dirty_img is not None and azimuth_grid is not None and colatitude_grid is not None): surfacecolor = np.abs(dirty_img) # for plotting purposes base = surface_base surf_diff = surfacecolor.max() - surfacecolor.min() if surf_diff > 0: height = surface_height / surf_diff else: height = 0 r_surf = base + height * (surfacecolor - surfacecolor.min()) / ( surfacecolor.max() - surfacecolor.min()) x_plt = r_surf * np.sin(colatitude_grid) * np.cos(azimuth_grid) y_plt = r_surf * np.sin(colatitude_grid) * np.sin(azimuth_grid) z_plt = r_surf * np.cos(colatitude_grid) trace1 = go.Surface( x=x_plt, y=y_plt, z=z_plt, surfacecolor=surfacecolor, opacity=1, colorscale="Portland", hoverinfo="none", ) trace1["contours"]["x"]["highlightwidth"] = 1 trace1["contours"]["y"]["highlightwidth"] = 1 trace1["contours"]["z"]["highlightwidth"] = 1 traces.append(trace1) if colatitude_ref is not None and azimuth_ref is not None: x_ref = np.sin(colatitude_ref) * np.cos(azimuth_ref) y_ref = np.sin(colatitude_ref) * np.sin(azimuth_ref) z_ref = np.cos(colatitude_ref) if not hasattr(colatitude_ref, "__iter__"): colatitude_ref = [colatitude_ref] azimuth_ref = [azimuth_ref] x_ref = [x_ref] y_ref = [y_ref] z_ref = [z_ref] text_str2 = [] for count, colatitude0 in enumerate(colatitude_ref): text_str2.append( "({0:.2f}\N{DEGREE SIGN}, {1:.2f}\N{DEGREE SIGN})".format( np.degrees(colatitude0), np.degrees(azimuth_ref[count]))) trace2 = go.Scatter3d( mode="markers", name="ground truth", x=x_ref, y=y_ref, z=z_ref, text=text_str2, hoverinfo="name+text", marker=dict( size=6, symbol="circle", opacity=0.6, line=dict(color="rgb(204, 204, 204)", width=2), color="rgb(0, 0.447, 0.741)", ), ) traces.append(trace2) if colatitude is not None and azimuth is not None: x_recon = np.sin(colatitude) * np.cos(azimuth) y_recon = np.sin(colatitude) * np.sin(azimuth) z_recon = np.cos(colatitude) if not hasattr(colatitude, "__iter__"): colatitude_ref = [colatitude] azimuth = [azimuth] x_recon = [x_recon] y_recon = [y_recon] z_recon = [z_recon] text_str3 = [] for count, colatitude0 in enumerate(colatitude): text_str3.append( "({0:.2f}\N{DEGREE SIGN}, {1:.2f}\N{DEGREE SIGN})".format( np.degrees(colatitude0), np.degrees(azimuth[count]))) trace3 = go.Scatter3d( mode="markers", name="reconstruction", x=x_recon, y=y_recon, z=z_recon, text=text_str3, hoverinfo="name+text", marker=dict( size=6, symbol="diamond", opacity=0.6, line=dict(color="rgb(204, 204, 204)", width=2), color="rgb(0.850, 0.325, 0.098)", ), ) traces.append(trace3) data = go.Data(traces) layout = go.Layout( title="", autosize=False, width=670, height=550, showlegend=True, margin=go.Margin(l=45, r=45, b=55, t=45), ) layout["legend"]["xanchor"] = "center" layout["legend"]["yanchor"] = "top" layout["legend"]["x"] = 0.5 fig = go.Figure(data=data, layout=layout) plot(fig)
def main(): # Centrality for graph representation. centrality = Centrality.Centrality(Graph(), "../FacebookNetwork/dataset/SplitedData/train.edges", "../FacebookNetwork/dataset/SplitedData/val.edges", "../FacebookNetwork/dataset/SplitedData/test.edges", "../FacebookNetwork/dataset/SplitedData/neg_train.edges", "../FacebookNetwork/dataset/SplitedData/neg_val.edges", "../FacebookNetwork/dataset/SplitedData/neg_test.edges") centrality.arg.LoadEdgesData() betweenness = centrality.BetweenCentrality() closeness = centrality.ClosenessCentrality() eigenVec = centrality.EigenVector_Centrality() #PredictionModels, Split positive and negative data pos_train = DataReader.DataReader(Graph(), "../FacebookNetwork/dataset/SplitedData/train.edges", "../FacebookNetwork/dataset/SplitedData/val.edges", "../FacebookNetwork/dataset/SplitedData/test.edges", "../FacebookNetwork/dataset/SplitedData/neg_train.edges", "../FacebookNetwork/dataset/SplitedData/neg_val.edges", "../FacebookNetwork/dataset/SplitedData/neg_test.edges") neg_train = DataReader.DataReader(Graph(), "../FacebookNetwork/dataset/SplitedData/train.edges", "../FacebookNetwork/dataset/SplitedData/val.edges", "../FacebookNetwork/dataset/SplitedData/test.edges", "../FacebookNetwork/dataset/SplitedData/neg_train.edges", "../FacebookNetwork/dataset/SplitedData/neg_val.edges", "../FacebookNetwork/dataset/SplitedData/neg_test.edges") pos_val = DataReader.DataReader(Graph(), "../FacebookNetwork/dataset/SplitedData/train.edges", "../FacebookNetwork/dataset/SplitedData/val.edges", "../FacebookNetwork/dataset/SplitedData/test.edges", "../FacebookNetwork/dataset/SplitedData/neg_train.edges", "../FacebookNetwork/dataset/SplitedData/neg_val.edges", "../FacebookNetwork/dataset/SplitedData/neg_test.edges") neg_val = DataReader.DataReader(Graph(), "../FacebookNetwork/dataset/SplitedData/train.edges", "../FacebookNetwork/dataset/SplitedData/val.edges", "../FacebookNetwork/dataset/SplitedData/test.edges", "../FacebookNetwork/dataset/SplitedData/neg_train.edges", "../FacebookNetwork/dataset/SplitedData/neg_val.edges", "../FacebookNetwork/dataset/SplitedData/neg_test.edges") #Load Edges for each graph pos_train.LoadEdgesData() neg_train.LoadEdgesData() pos_val.LoadEdgesData() neg_val.LoadEdgesData() #Split into negative and positive files. pos_train.GenerateTrain() #we will get pos and neg files. pos_val.GenerateValidation() #we will get pos and neg files. #LabelData into dictionnary for files pos_train.LoadGeneratedFiles(pos_train.fluxTr) neg_train.LoadGeneratedFiles(neg_train.fluxTr_neg) pos_val.LoadGeneratedFiles(pos_val.fluxV) neg_val.LoadGeneratedFiles(neg_val.fluxVal_neg) X_train_pos = pos_train.LabelData() X_train_neg = neg_train.LabelData() X_val_pos = pos_val.LabelData() X_val_neg = neg_val.LabelData() print('----------------- Spliting and labeling data X & Y------------------------- \n') Y_train_pos = np.full(shape=(X_train_pos.shape[0],1), fill_value=1) Y_train_neg = np.full(shape=(X_train_neg.shape[0],1), fill_value=0) Y_val_pos = np.full(shape=(X_val_pos.shape[0],1), fill_value=1) Y_val_neg = np.full(shape=(X_val_neg.shape[0],1), fill_value=0) X_train = np.append(X_train_pos,X_train_neg,axis=0) y_train = np.append(Y_train_pos,Y_train_neg,axis=0) X_val = np.append(X_val_pos, X_val_neg, axis=0) y_val = np.append(Y_val_pos, Y_val_neg, axis=0) np.random.shuffle(X_train) np.random.shuffle(y_train) np.random.shuffle(X_val) np.random.shuffle(y_val) print('----------------- Done ------------------------- \n ') print('\n----------------- Linear Model Predictions ------------------------- \n') reg = linear_model.Ridge (alpha = .5) reg.fit(X=X_train[:-1],y=y_train[:-1]) reg.predict(X_train[-1:]) len(reg.predict(X_val)) np.mean((reg.predict(X_val) - y_val)**2) print('Log loss ',log_loss(y_val,reg.predict(X_val))) print('\n ----------------- Linear LinearRegression ------------------------- \n') regressor = LinearRegression() regressor.fit(X_train, y_train) print('Slope',regressor.intercept_) y_pred = regressor.predict(X_val) df = pd.DataFrame({'Actual': y_val.flatten(), 'Predicted': y_pred.flatten()}) #print(df) print('\n ----------------- SVM ------------------------- \n') clf_svm = svm.SVC() clf_svm.fit(X=X_train[:-1],y=y_train[:-1]) print(log_loss(y_val,clf_svm.predict(X_val))) print('\n ------------------------ Implementing Kernel SVM | Polynomial ------------------------ \n') svclassifier2 = svm.SVC(kernel='poly', degree=8,C=150) # this is the degre of the polynomial. svclassifier2.fit(X_train, y_train) #making prediction y_predp = svclassifier2.predict(X_val) #evaluating the poly svm print(confusion_matrix(y_val, y_predp)) print(classification_report(y_val, y_predp)) print('\n --------------------------- Implementing Kernel SVM | Linear -------------------------- \n') svclassifier1 = svm.SVC(kernel='linear') svclassifier1.fit(X_train, y_train) #Make predict y_pred = svclassifier1.predict(X_val) #Evaluating the Algorithm print(svclassifier1.score(X_val, y_val)) print(confusion_matrix(y_val,y_pred)) print(classification_report(y_val,y_pred)) print('\n ------------------------ Implementing Kernel SVM | Sigmoid ------------------------ \n') svclassifier4 = svm.SVC(kernel='sigmoid') svclassifier4.fit(X_train, y_train) #making predict y_preds = svclassifier4.predict(X_val) #Evaluating Algorithm print(confusion_matrix(y_val, y_preds)) print(classification_report(y_val, y_preds)) print('\n------------------------ Implementing Kernel SVM | Gaussian ------------------------\n') svclassifier3 = svm.SVC(kernel='rbf') svclassifier3.fit(X_train, y_train) #making predit y_predg = svclassifier3.predict(X_val) #Evaluating Algorithm print(confusion_matrix(y_val, y_predg)) print(classification_report(y_val, y_predg)) print('\n ------------------------ KNN ------------------------ \n') sc = StandardScaler() sc.fit(X_train) X_train = sc.transform(X_train) X_val = sc.transform(X_val) print('Value for K Math.sqrt(len of X_train) -------> ',math.sqrt(len(X_train))) print("Please wait for graph representation ....") accuracy = [] #We agregate the Accuracy averages for 18 neighbors. f1_scores = [] #Metrics ... index = range(3,81) for i in index: classifier = KNeighborsClassifier(n_neighbors = i, metric= 'euclidean', weights='uniform', leaf_size= 30) #27 classifiers classifier.fit(X_train, y_train) y_pred = classifier.predict(X_val) # Predict the class labels for the provided data conf_matrix = confusion_matrix(y_val, y_pred) # What we predit <VS> what actually is on test data. res = (conf_matrix[0, 0] + conf_matrix[1, 1]) / sum(sum(conf_matrix)) # Calculate Accuracy of our predit. accuracy.append(res) f1_scores.append(list(zip(y_val, y_pred))) print('In the range of 3 to 39 we have this values of accuracy') print(accuracy) # Evaluate the Model. print('We evaluate the Matrix of Confusion') mc = confusion_matrix(y_val, y_pred) print(classification_report(y_val, y_pred)) print(mc) # Graph representation plt.figure(figsize=(10, 6), num='Knn Algorithm Facebook Network Prediction') plt.plot(index, accuracy, color='green', linestyle='dashed', marker='o', markerfacecolor='blue', markersize=10) plt.title('Accuracy ratio according to K values') plt.xlabel('K Values') plt.ylabel('Accuracy average') plt.show() #LoadLabels. nodesLabels = [] edges = [] #nodesLabels = [nodesLabels.append(eachEgo['name']) for eachEgo in centrality.arg.graph.vs] for eachEgo in centrality.arg.graph.vs: nodesLabels.append(eachEgo['name']) #edges = [edges.append(edge.tuple) for edge in centrality.arg.graph.es] #ça marche pas je ne sais pas pourquoi. for e in centrality.arg.graph.es: edges.append(e.tuple) layout = centrality.arg.graph.layout('kk', dim=3) #Prepare coordinates for Nodes and Edges. Xn=[layout[n][0] for n in range(len(centrality.arg.graph.vs))]# x-coordinates of nodes Yn=[layout[n][1] for n in range(len(centrality.arg.graph.vs))]# y-coordinates Zn=[layout[n][2] for n in range(len(centrality.arg.graph.vs))]# z-coordinates #Lists of edges. Xe=[] Ye=[] Ze=[] for e in edges: Xe+=[layout[e[0]][0],layout[e[1]][0], None]# x-coordinates of edge ends Ye+=[layout[e[0]][1],layout[e[1]][1], None] Ze+=[layout[e[0]][2],layout[e[1]][2], None] trace1=go.Scatter3d(x=Xe, y=Ye, z=Ze, mode='lines', line=go.Line(color='rgb(125,125,125)', width=1), hoverinfo='none' ) trace2=go.Scatter3d(x=Xn, y=Yn, z=Zn, mode='markers', name='Alters', marker=go.Marker(symbol='circle', color=eigenVec, size=10,colorbar=go.ColorBar( title='Node Degree' ), colorscale='Viridis', line=go.Line(color='rgb(158,18,130)', width=0.5) ), text=nodesLabels, hoverinfo='text' ) axis=dict(showbackground=True, showline=True, zeroline=False, showgrid=True, showticklabels=True, title='' ) plan = go.Layout( title="Facebook Ego-Network", width=1000, height=1000, showlegend=True, scene=go.Scene( xaxis=go.XAxis(axis), yaxis=go.YAxis(axis), zaxis=go.ZAxis(axis), ), margin=go.Margin(t=100), hovermode='closest', annotations=go.Annotations([ go.Annotation( showarrow=True, xref='paper', yref='paper', x=0, y=0.1, xanchor='left', yanchor='top', font=go.Font(size=14) ) ]),) data=go.Data([trace1, trace2]) fig=go.Figure(data=data, layout=plan) fig.show()
}, "name": "Temp", "hole": .3, "type": "pie", "direction": "clockwise", "rotation": 125, "showlegend": False, "textinfo": "label", "textposition": "inside", "hoverinfo": "none", } layout2 = go.Layout(autosize=False, width=200, height=220, margin=go.Margin(l=60, r=0, b=0, t=0, pad=4), paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)') def generate_temp_fig(temp): layout = generate_temp_layout(temp) # we don't want the boundary now base_chart['marker']['line']['width'] = 0 fig = {"data": [base_chart, meter_chart], "layout": layout} py.image.save_as(fig, 'temp_plot1.jpeg') #init_notebook_mode()
def plotGIW(viewPos_XYZ, cycGIW_XYZ, ballPos_XYZ, headTransform_4x4, xRange = [-1,1], yRange = [-1,1], zRange = [-1,1], width=800, height=600, inline=False): headShape = createHead(headTransform_4x4) giwDir = go.Scatter3d(x=[viewPos_XYZ[0],cycGIW_XYZ[0]], y=[viewPos_XYZ[2],cycGIW_XYZ[2]], z=[viewPos_XYZ[1],cycGIW_XYZ[1]], mode='lines+text', text=['','gaze'], textposition='top right', textfont=dict( family='sans serif', size=14, color=('rgb(20, 0, 145)'), ), line = dict( color=('rgb(20, 0, 145)'), width = 4) ) xyz = np.subtract(ballPos_XYZ,viewPos_XYZ) ballDir_XYZ = xyz / np.linalg.norm(xyz) ballEndPoint_XYZ = viewPos_XYZ + ballDir_XYZ*1.5 ballDir = go.Scatter3d(x=[viewPos_XYZ[0],ballEndPoint_XYZ[0]], y=[viewPos_XYZ[2],ballEndPoint_XYZ[2]], z=[viewPos_XYZ[1],ballEndPoint_XYZ[1]], mode='lines+text', text=['','ball'], textposition='top right', textfont=dict( family='sans serif', size=14, color='rgb(30, 150, 30)', ), line = dict( color = ('rgb(30, 150, 30)'), width = 4) ) layout = go.Layout(title="Gaze in World", width=width, height=height, showlegend=False, scene=go.Scene(aspectmode='manual', aspectratio=dict(x=1, y=1, z=1), xaxis=dict(range=xRange, title='x Axis'), yaxis=dict(range=yRange, title='y Axis'), zaxis=dict(range=zRange, title='z Axis'), ), margin=go.Margin(t=100), hovermode='closest', ) fig=go.Figure(data=go.Data([giwDir,ballDir,headShape]),layout=layout) return fig
def double_drops_heatmap_v2(signals, num_mocks, num_edmans, num_mocks_omitted, peptide_string, wavelength, zmin, zmax, filepath, plot_multidrops=False, plot_remainders=False): num_mocks -= num_mocks_omitted total_cycles = num_mocks + num_edmans if plot_remainders: heatmap_array_size_x = total_cycles heatmap_array_size_y = total_cycles + 1 else: heatmap_array_size_y = heatmap_array_size_x = total_cycles heatmap_array = np.array([[0 for y in range(heatmap_array_size_y)] for x in range(heatmap_array_size_x)]) for (signal, is_zero, starting_intensity), count in signals.iteritems(): if starting_intensity > 2: continue if len(signal) == 1: if signal == (('A', 0), ): continue elif plot_remainders and not is_zero: x, y = signal[0][1] - 1, heatmap_array_size_y - 1 else: continue elif len(signal) == 2: if not plot_multidrops and len(signal) > len(set(signal)): continue elif is_zero: x, y = signal[0][1] - 1, signal[1][1] - 1 else: continue elif len(signal) > 2: continue heatmap_array[x, y] += count color_channel = wavelength if color_channel not in colors: raise ("Exception: Invalid wavelength.") cs = colors[color_channel] #cs = "color space" y_cycles_header = ( ["M" + str(i + 1 + num_mocks_omitted) for i in range(num_mocks)] + ["E" + str(i + 1) for i in range(num_edmans)]) if plot_remainders: x_cycles_header = y_cycles_header + ["R"] else: x_cycles_header = y_cycles_header annotations = [] text_limit = np.amax(heatmap_array) for (y, x), count in np.ndenumerate(heatmap_array): annotations.append( dict(text=str(count), x=x_cycles_header[x], y=y_cycles_header[y], font=dict(color=( 'white' if count > (text_limit * 0.75) else 'black')), showarrow=False)) layout = graph_objs.Layout( title=("Double Drops (" + str(color_channel) + " Channel) Total: " + str(np.sum(heatmap_array)) + " - " + peptide_string), annotations=annotations, titlefont=dict(size=16), yaxis=dict(title="First Drop", titlefont=dict(size=16), ticks="", autorange='reversed'), xaxis=dict(title="Second Drop", titlefont=dict(size=16), ticks="", side='top'), margin=graph_objs.Margin(l=50, r=50, b=100, t=150, pad=4), width=700, height=735, autosize=False) data = [ graph_objs.Heatmap( z=heatmap_array, x=x_cycles_header, y=y_cycles_header, colorscale=cs, reversescale=True, zmin=(np.amin(heatmap_array) if zmin is None else zmin), zmax=(np.amax(heatmap_array) if zmax is None else zmax)) ] fig = graph_objs.Figure(data=data, layout=layout) plotly.offline.plot(fig, filename=filepath, auto_open=False)
hoverlabel=dict(bgcolor="#333", font=dict(size=18)), fill="tozeroy", name="HR" ), go.Scatter( x=avg_x, y=avg_y, hoverinfo='y', hoverlabel=dict(bgcolor='#333',font=dict(size=18)), line=dict(shape="spline", color="#b32945"), name='Average' ) ], layout=dict( autosize=True, margin=go.Margin(l=35, r=20, b=0, t=20, pad=4), yaxis=dict(range=[30, 60]), xaxis=dict( rangeslider=dict(), type="date", showspikes=True, spikemode="toaxis", spikethickness=2, spikedash="solid", ), ), ), style=dict(height="78vh"), ) ], className="section",
data = [label_trace, value_trace] layout = go.Layout(title="<b>Top Ten Hashtags Used by Russian Bots</b>", titlefont=dict(size=20, color='rgb(203,203,203)'), shapes=shapes, height=560, width=800, showlegend=False, paper_bgcolor='rgba(44,58,71,1)', plot_bgcolor='rgba(44,58,71,1)', xaxis=dict( showticklabels=False, zeroline=False, ), yaxis=dict(showticklabels=False, zeroline=False), margin=go.Margin(l=50, r=50, b=50, t=100, pad=4)) fig = dict(data=data, layout=layout) trace1 = go.Bar(y=['@realdonaldtrump'], x=[4325], orientation='h', marker=dict(color='rgba(32,155,160, 0.6)', line=dict(color='rgba(253,93,124, 1.0)', width=2))) trace2 = go.Bar(y=['@midnight'], x=[2414], orientation='h', marker=dict(color='rgba(28,119,139, 0.6)', line=dict(color='rgba(182,231,235, 1.0)', width=2)))
def plot(pca_results): # Get results pca = pca_results['pca'] var_explained = pca_results['var_explained'] sample_metadata = pca_results['sample_metadata'] color_by = pca_results.get('color_by') color_type = pca_results.get('color_type') color_column = pca_results['sample_metadata'][ color_by] if color_by else None colors = [ '#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928' ] sample_titles = [ '<b>{}</b><br>'.format(index) + '<br>'.join('<i>{key}</i>: {value}'.format(**locals()) for key, value in rowData.items()) for index, rowData in sample_metadata.iterrows() ] if not color_by: marker = dict(size=15) trace = go.Scatter3d(x=pca.components_[0], y=pca.components_[1], z=pca.components_[2], mode='markers', hoverinfo='text', text=sample_titles, marker=marker) data = [trace] elif color_by and color_type == 'continuous': marker = dict(size=15, color=color_column, colorscale=pca_results['colorscale'], showscale=True) trace = go.Scatter3d(x=pca.components_[0], y=pca.components_[1], z=pca.components_[2], mode='markers', hoverinfo='text', text=sample_titles, marker=marker) data = [trace] elif color_by and color_type == 'categorical': # Get unique categories unique_categories = color_column.unique() # Define empty list data = [] # Loop through the unique categories for i, category in enumerate(unique_categories): # Get the color corresponding to the category category_color = colors[i] # Get the indices of the samples corresponding to the category category_indices = [ i for i, sample_category in enumerate(color_column) if sample_category == category ] # Create new trace trace = go.Scatter3d( x=pca.components_[0][category_indices], y=pca.components_[1][category_indices], z=pca.components_[2][category_indices], mode='markers', hoverinfo='text', text=[sample_titles[x] for x in category_indices], name=category, marker=dict(size=15, color=category_color)) # Append trace to data list data.append(trace) colored = '' if str( color_by) == 'None' else '<i>, colored by {}</i>'.format(color_by) layout = go.Layout( title= '<b>PCA Analysis | Scatter Plot</b><br><i>Top {} variable genes</i>'. format(pca_results['nr_genes']) + colored, hovermode='closest', margin=go.Margin(l=0, r=0, b=0, t=50), width=900, scene=dict(xaxis=dict(title=var_explained[0]), yaxis=dict(title=var_explained[1]), zaxis=dict(title=var_explained[2]))) fig = go.Figure(data=data, layout=layout) return iplot(fig)
x=['Trust', 'Fear of Failure', 'Persistence', \ 'Convince Others', 'Rely on Others', 'Competition',\ 'Strong Network', 'Work Life', 'Nine-Five Job'], y=[1,1,2,1,3,4,2,2,5], name='James Drugeot', marker=go.Marker(color='rgb(155,105,84)') ), ], layout=go.Layout( title='Personality Traits Based on Survey', showlegend=True, legend=go.Legend( x=0, y=1.0 ), margin=go.Margin(l=40, r=0, t=40, b=30) ) ), style={'height': 300}, id='graph-test' ) ], style={'background':colors['background']}) # ---------------------------------------------------------------------------------- # Additional CSS app.css.append_css( {"external_url": "https://codepen.io/chriddyp/pen/bWLwgP.css"}) # ----------------------------------------------------------------------------------
def show_histogram(image): def hg_trace(name, color, hg): line = go.Scatter( x=list(range(0, 256)), y=hg, name=name, line=dict(color=(color)), mode="lines", showlegend=False, ) fill = go.Scatter( x=list(range(0, 256)), y=hg, mode="lines", name=name, line=dict(color=(color)), fill="tozeroy", hoverinfo="none", ) return line, fill hg = image.histogram() if image.mode == "RGBA": rhg = hg[0:256] ghg = hg[256:512] bhg = hg[512:768] ahg = hg[768:] data = [ *hg_trace("Red", "#FF4136", rhg), *hg_trace("Green", "#2ECC40", ghg), *hg_trace("Blue", "#0074D9", bhg), *hg_trace("Alpha", "gray", ahg), ] title = "RGBA Histogram" elif image.mode == "RGB": # Returns a 768 member array with counts of R, G, B values rhg = hg[0:256] ghg = hg[256:512] bhg = hg[512:768] data = [ *hg_trace("Red", "#FF4136", rhg), *hg_trace("Green", "#2ECC40", ghg), *hg_trace("Blue", "#0074D9", bhg), ] title = "RGB Histogram" else: data = [*hg_trace("Gray", "gray", hg)] title = "Grayscale Histogram" layout = go.Layout( autosize=True, title=title, margin=go.Margin(l=50, r=30), legend=dict(x=0, y=1.15, orientation="h"), paper_bgcolor="#31343a", plot_bgcolor="#272a31", font=dict(color="darkgray"), xaxis=dict(gridcolor="#43454a"), yaxis=dict(gridcolor="#43454a"), ) return go.Figure(data=data, layout=layout)
def _make_plot(self, data, fname, title=None): layout = None annotations = None text_angle = -35 font_size = 10 if len(data) > 1: manCount = float(sum(data[0].y)) assCount = float(sum(data[1].y)) ann = [ j for i in zip([ dict( x=xi - .1, y=yi, text="0%" if manCount == 0 else "{0:.0f}".format(100 * float(yi) / manCount) + "%", xanchor='auto', yanchor='bottom', showarrow=False, textangle=text_angle, font={"size": font_size}, ) for xi, yi in zip(range(len(data[0].x)), data[0].y) ], [ dict( x=xi + .3, y=yi, text="0%" if assCount == 0 else "{0:.0f}".format(100 * float(yi) / assCount) + "%", xanchor='auto', yanchor='bottom', showarrow=False, textangle=text_angle, font={"size": font_size}, ) for xi, yi in zip(range(len(data[1].x)), data[1].y) ]) for j in i ] layout = gobj.Layout(barmode='group', bargap=0.15, bargroupgap=0.2, title=title, legend=dict(orientation="h"), margin=gobj.Margin(l=50, r=50, b=10, t=10, pad=2), annotations=ann, width=300, height=240, yaxis=dict(autorange=True, showgrid=False, zeroline=False, showline=False, autotick=True, ticks='', showticklabels=False)) else: ann = [ dict( x=xi, y=yi, text=str(yi), xanchor='center', yanchor='bottom', showarrow=False, font={"size": font_size}, ) for xi, yi in zip(data[0].x, data[0].y) ] layout = gobj.Layout(title=title, bargap=0.30, annotations=ann, legend=dict(orientation="h"), margin=gobj.Margin(l=50, r=50, b=25, t=25, pad=2), width=300, height=240, yaxis=dict(autorange=True, showgrid=False, zeroline=False, showline=False, autotick=True, ticks='', showticklabels=False)) fig = gobj.Figure(data=data, layout=layout) setattr(self, fname, tempfile.NamedTemporaryFile(mode='wb', suffix='.png')) # Close file if on windows otherwise access denied error if __name__ == "__main__": getattr(self, fname).close() plty.image.save_as(fig, filename=getattr(self, fname).name)