def paintCovidCasesInZBS(stat): df = pd.read_csv(os.path.join( getRootPath(), "data/COVID/covid19_tia_zonas_basicas_salud_s.csv"), sep=';') df_lastweek = df.loc[df['fecha_informe'] == df['fecha_informe'][0]] df_lastweek['codigo_geometria'] = df_lastweek[ 'codigo_geometria'].str.rstrip() zbs = loadZBSJson( os.path.join( getRootPath(), "data/COVID/zonas_basicas_salud/zonas_basicas_salud.json")) fig = px.choropleth_mapbox( df_lastweek, geojson=zbs, featureidkey='properties.codigo_geo', locations='codigo_geometria', color=stat, color_continuous_scale="OrRd", mapbox_style="carto-positron", hover_name="zona_basica_salud", hover_data=[stat], center={ "lat": 40.417008, "lon": -3.703795 } #labels={'casos_confirmados_ultimos_14dias': 'Casos últimos 14 días'} ) fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0}) return fig
def paintMetroUsageInZBS(stat): if not(os.path.exists(os.path.join(getRootPath(), 'data/metro/zbsMap_metro.csv'))): preprocessMetroUsageInZBS() grouped_byzbs_sum = pd.read_csv(os.path.join(getRootPath(), 'data/metro/zbsMap_metro.csv'), dtype={'index': object}) zbs_json = loadZBSJson(os.path.join(getRootPath(), "data/COVID/zonas_basicas_salud/zonas_basicas_salud.json")) station_location = pd.read_csv(os.path.join(getRootPath(), 'data/metro/raw_data/mapas_metro.csv')) fig_loc = px.scatter_mapbox(station_location, lat="lat", lon="long", hover_name="name_Est", mapbox_style="carto-positron", ) fig = px.choropleth_mapbox(grouped_byzbs_sum, geojson=zbs_json, featureidkey='properties.codigo_geo', locations='index', color=stat, color_continuous_scale="OrRd", mapbox_style="carto-positron", hover_data = [stat], center = {"lat": 40.417008, "lon": -3.703795} #labels={'casos_confirmados_ultimos_14dias': 'Casos últimos 14 días'} ) fig.add_trace(fig_loc.data[0]) fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0}) return fig
def paintCarTrafficInZBS(stat): if not (os.path.exists( os.path.join(getRootPath(), 'data/trafico/07-2020_zbsMap.csv'))): preprocessCarTrafficInZBS() grouped_byzbs_mean = pd.read_csv( os.path.join(getRootPath(), 'data/trafico/07-2020_zbsMap.csv')) zbs_json = loadZBSJson( os.path.join( getRootPath(), "data/COVID/zonas_basicas_salud/zonas_basicas_salud.json")) fig = px.choropleth_mapbox( grouped_byzbs_mean, geojson=zbs_json, featureidkey='properties.codigo_geo', locations='index', color=stat, color_continuous_scale="OrRd", mapbox_style="carto-positron", hover_data=[stat], center={ "lat": 40.417008, "lon": -3.703795 } #labels={'casos_confirmados_ultimos_14dias': 'Casos últimos 14 días'} ) fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0}) return fig
def preprocessCarTrafficInZBS(): zbs_shp = loadZBSShapeFile( os.path.join(getRootPath(), "data/COVID/zonas_basicas_salud/zonas_basicas_salud.shp")) sensors_location = pd.read_csv(os.path.join( getRootPath(), 'data/trafico/raw_data/pmed_ubicacion_07-2020.csv'), sep=';') def groupby_zbs(id_point): select_location = sensors_location.loc[sensors_location['id'] == id_point] if len(select_location) != 1: return 'error' point = Point(select_location['longitud'], select_location['latitud']) for i in range(0, len(zbs_shp['geometry'])): if point.within(zbs_shp['geometry'][i]): return str(zbs_shp['codigo_geo'][i]) data = pd.read_csv(os.path.join(getRootPath(), 'data/trafico/raw_data/07-2020.csv'), sep=';') grouped_byid = data.groupby('id').mean() grouped_byzbs = grouped_byid.groupby(groupby_zbs) grouped_byzbs.mean().reset_index().to_csv( os.path.join(getRootPath(), 'data/trafico/07-2020_zbsMap.csv'))
def preprocessBikeTravelsSankeyZip(year, month): zip_shp = gpd.read_file( open(os.path.join(getRootPath(), "data/distritos/MADRID.json")).read()) locations = pd.read_json( open( os.path.join(getRootPath(), "data/bike/raw_data/stations_location.json"))) filepath = os.path.join( getRootPath(), 'data/bike/raw_data/' + year + month + '_movements.json') df = pd.read_json(open(filepath).read(), lines=True) def groupby_zipCode(id_point): idunplug_station = df.iloc[id_point]['idunplug_station'] idplug_station = df.iloc[id_point]['idplug_station'] lon_idunplug_station = locations[locations['id'] == idunplug_station]['longitude'] lat_idunplug_station = locations[locations['id'] == idunplug_station]['latitude'] lon_idplug_station = locations[locations['id'] == idplug_station]['longitude'] lat_idplug_station = locations[locations['id'] == idplug_station]['latitude'] unplug_station = Point(lon_idunplug_station, lat_idunplug_station) plug_station = Point(lon_idplug_station, lat_idplug_station) print(id_point) #zip_shp[unplug_station.within(zip_shp['geometry'][i])] for i in range(0, len(zip_shp['geometry'])): if unplug_station.within(zip_shp['geometry'][i]): origin_zip = zip_shp['COD_POSTAL'][i] if plug_station.within(zip_shp['geometry'][i]): dest_station = zip_shp['COD_POSTAL'][i] return origin_zip + ',' + dest_station grouped_byzip = df.groupby(groupby_zipCode) grouped_byzip_count = grouped_byzip.count().reset_index() new = grouped_byzip_count["index"].str.split(",", n=1, expand=True) grouped_byzip_count["origin_zip"] = new[0] grouped_byzip_count["dest_zip"] = new[1] grouped_byzip_count = grouped_byzip_count[[ 'origin_zip', 'dest_zip', '_id' ]] grouped_byzip_count = grouped_byzip_count.rename( columns={"_id": "travels"}) grouped_byzip_count.to_csv( os.path.join(getRootPath(), 'data/bike/' + year + month + '_movements_inZip.csv'))
def preprocessBikeTravelsEveryHour(year, month): filepath = os.path.join( getRootPath(), 'data/bike/raw_data/' + year + month + '_movements.json') df = pd.read_json(open(filepath).read(), lines=True) df['unplug_hourTime'] = df['unplug_hourTime'].str[11:-1] hour = df.groupby('unplug_hourTime').count().reset_index() hour = hour[['unplug_hourTime', '_id']] hour = hour.rename(columns={"_id": "travels"}) hour.to_csv( os.path.join(getRootPath(), 'data/bike/' + year + month + '_movements_hour.csv'))
def paintBikeTravelsEveryHour(year, month): if not (os.path.exists( os.path.join(getRootPath(), 'data/bike/' + year + month + '_movements_hour.csv'))): preprocessBikeTravelsEveryHour(year, month) groupedby_hour = pd.read_csv( os.path.join(getRootPath(), 'data/bike/' + year + month + '_movements_hour.csv')) fig = px.bar(x=groupedby_hour.index, y=groupedby_hour['travels']) return fig
def preprocessStationsLocation(): locations = pd.read_json( open( os.path.join(getRootPath(), "data/bike/raw_data/stations_location.json"))) locations = locations[['name', 'address', 'longitude', 'latitude', 'id']] locations['barrio'] = locations.apply( lambda x: get_barrio(x.longitude, x.latitude), axis=1) locations['distrito'] = locations.apply( lambda x: get_distrito(x.longitude, x.latitude), axis=1) locations['cod_postal'] = locations.apply( lambda x: get_zipcode(x.longitude, x.latitude), axis=1) locations.to_csv( os.path.join(getRootPath(), 'data/bike/stations_locations_PROCESSED.csv')) return locations
def preprocessMetroUsageInZBS(): zbs_shp = loadZBSShapeFile(os.path.join(getRootPath(), "data/COVID/zonas_basicas_salud/zonas_basicas_salud.shp")) station_location = pd.read_csv(os.path.join(getRootPath(), 'data/metro/raw_data/mapas_metro.csv')) def groupby_zbs(id_point): select_location = station_location.iloc[[id_point]] if len(select_location) != 1: return 'error' point = Point(select_location['longitud'], select_location['latitud']) for i in range(0, len(zbs_shp['geometry'])): if point.within(zbs_shp['geometry'][i]): return str(zbs_shp['codigo_geo'][i]) data = pd.read_csv(os.path.join(getRootPath(), 'data/metro/raw_data/mapas_metro.csv')) grouped_byzbs = data.groupby(groupby_zbs) grouped_byzbs_sum = grouped_byzbs.sum().reset_index() grouped_byzbs_sum.to_csv(os.path.join(getRootPath(), 'data/metro/zbsMap_metro.csv'))
def prepare_descs(): ###LOAD DATA ND_desc_BOG = pd.read_csv( os.path.join(getRootPath(), 'data/ND_descriptors_bogota.csv')) ND_desc_MED = pd.read_csv( os.path.join(getRootPath(), 'data/ND_descriptors_medellin.csv')) antenas_loc = pd.read_csv( os.path.join(getRootPath(), 'data/sites_random2.csv')) ND_desc_BOG['city'] = 'BOG' ND_desc_MED['city'] = 'MED' NDdescs_COL = ND_desc_BOG.append(ND_desc_MED, ignore_index=True) df_antennasJoin = NDdescs_COL.merge(antenas_loc, left_on='antenna', right_on='antenna_id', how='inner') return NDdescs_COL, df_antennasJoin
def get_zipcode(long_st, lat_st): zip_shp_MADRID = gpd.read_file( open(os.path.join(getRootPath(), "data/distritos/MADRID.json")).read()) lon_station = long_st lat_station = lat_st station_Point = Point(lon_station, lat_station) zipcode = 'Not Found' for i in range(0, len(zip_shp_MADRID['geometry'])): if station_Point.within(zip_shp_MADRID['geometry'][i]): zipcode = zip_shp_MADRID['COD_POSTAL'][i] return zipcode
def convert_all(data_path): """preprocess all files""" root_path = utils.getRootPath() files = [f for f in iter_files(data_path)] for f in tqdm(files): file_name = f[0] # wiki_00 input_file_path = os.path.join(root_path, f[1]) # D:\workspace\pycharm\myQA\data1\AA\wiki_01 output_file_path = input_file_path.replace('data1', 'data2') # D:\workspace\pycharm\myQA\data2\AA\wiki_01 path = os.path.split(output_file_path)[0] # D:\workspace\pycharm\myQA\data2\AA if not os.path.exists(path): os.makedirs(path) with open(output_file_path, 'w'): preprocess(input_file_path, output_file_path)
def convert_all(data_path): """Convert all files from traditional Chinese to simplified Chinese""" root_path = utils.getRootPath() files = [f for f in iter_files(data_path)] for f in tqdm(files): file_name = f[0] # wiki_00 input_file_path = os.path.join(root_path, f[1]) # D:\workspace\pycharm\myQA\data\AA\wiki_01 output_file_path = input_file_path.replace('data', 'data1') # D:\workspace\pycharm\myQA\data1\AA\wiki_01 path = os.path.split(output_file_path)[0] # D:\workspace\pycharm\myQA\data1\AA if not os.path.exists(path): os.makedirs(path) with open(output_file_path, 'w'): t2s(input_file_path, output_file_path)
def get_barrio(long_st, lat_st): zip_shp_BARRIOS = gpd.read_file( open(os.path.join(getRootPath(), "data/distritos/barrios.geojson")).read()) lon_station = long_st lat_station = lat_st station_Point = Point(lon_station, lat_station) barrio = 'Not Found' for i in range(0, len(zip_shp_BARRIOS['geometry'])): if station_Point.within(zip_shp_BARRIOS['geometry'][i]): barrio = zip_shp_BARRIOS['name'][i] return barrio
def paintTraficSensorLocations(): token = 'pk.eyJ1IjoiY3RhcmF6b25hIiwiYSI6ImNrZDkxcW1sYjBwOWkycnM4NDRpbXViYnYifQ.jK8gChNK_dzVpKlrKKfJgA' df = pd.read_csv(os.path.join( getRootPath(), 'data/trafico/raw_data/pmed_ubicacion_07-2020.csv'), sep=';') px.set_mapbox_access_token(token) fig = px.scatter_mapbox(df, lat="latitud", lon="longitud", hover_name="nombre", color_discrete_sequence=["blue"], zoom=10, height=500) return fig
def get_distrito(long_st, lat_st): zip_shp_DISTRICTS = gpd.read_file( open( os.path.join(getRootPath(), "data/distritos/distrito_geojson.geojson")).read()) lon_station = long_st lat_station = lat_st station_Point = Point(lon_station, lat_station) distrito = 'Not Found' for i in range(0, len(zip_shp_DISTRICTS['geometry'])): if station_Point.within(zip_shp_DISTRICTS['geometry'][i]): distrito = zip_shp_DISTRICTS['label'][i] return distrito
def writeLogsInFile(self,level,log_content): if os.path.exists(self.logfilePath): if os.path.exists(self.logfilePath + '\\' + getDate.getDate() + '.log'): file = open(self.logfilePath + '\\' + getDate.getDate() + '.log', "r", encoding='utf-8') line_content = file.readline() if line_content.strip() == '': file.close() with open(self.logfilePath + '\\' + getDate.getDate() + '.log', 'a+', encoding='utf-8') as fq: fq.write(getDate.getDate() + ' ' + getDate.getTime() + ' WELOG '+level.upper()+" ") fq.write(log_content) else: file.close() with open(self.logfilePath + '\\' + getDate.getDate() + '.log', 'a+', encoding='utf-8') as fq: fq.write("\n") fq.write(getDate.getDate() + ' ' + getDate.getTime() + ' WELOG '+level.upper()+" ") fq.write(log_content) else: file = open(self.logfilePath + '\\' + getDate.getDate() + '.log', "w", encoding='utf-8') # 创建文件 file.close() with open(self.logfilePath + '\\' + getDate.getDate() + '.log', 'r+', encoding='utf-8') as fq: fq.write(getDate.getDate() + ' ' + getDate.getTime() + ' WELOG '+level.upper()+" ") fq.write(log_content) else: if os.path.exists(getRootPath()+'\\logs\\'+getDate.getDate()+'.log'): file = open(getRootPath() + '\\logs\\' + getDate.getDate() + '.log', "r",encoding='utf-8') line_content=file.readline() if line_content.strip() == '': file.close() with open(getRootPath()+'\\logs\\'+getDate.getDate()+'.log','a+',encoding='utf-8') as fq: fq.write(getDate.getDate() + ' ' + getDate.getTime() + ' WELOG '+level.upper()+" ") fq.write(log_content) else: file.close() with open(getRootPath() + '\\logs\\' + getDate.getDate() + '.log', 'a+', encoding='utf-8') as fq: fq.write("\n") fq.write(getDate.getDate() + ' ' + getDate.getTime() + ' WELOG '+level.upper()+" ") fq.write(log_content) else: file=open(getRootPath()+'\\logs\\'+getDate.getDate()+'.log',"w",encoding='utf-8') #创建文件 file.close() with open(getRootPath()+'\\logs\\'+getDate.getDate()+'.log','r+',encoding='utf-8') as fq: fq.write(getDate.getDate() + ' ' + getDate.getTime() + ' WELOG '+level.upper()+" ") fq.write(log_content) # if __name__=='__main__': # logHandler.logging("good") # logHandler.logging("test")
def preprocessBikeTravelsSankeyDistritos(year, month): if not (os.path.exists( os.path.join(getRootPath(), 'data/bike/stations_locations_PROCESSED.csv'))): preprocessStationsLocation() locations = pd.read_csv( os.path.join(getRootPath(), 'data/bike/stations_locations_PROCESSED.csv')) filepath = os.path.join( getRootPath(), 'data/bike/raw_data/' + year + str(month) + '_movements.json') df = pd.read_json(open(filepath).read(), lines=True) df_movs = df[[ '_id', 'user_day_code', 'idunplug_station', 'idplug_station' ]] df_movsJoin1 = df_movs.merge(locations, left_on='idunplug_station', right_on='id', how='inner') df_movsJoin1.rename(columns={ 'name': 'unplug_stationName', 'address': 'unplug_stationAddress', 'longitude': 'unplug_stationLong', 'latitude': 'unplug_stationLat', 'barrio': 'unplug_stationBarrio', 'distrito': 'unplug_stationDistrito', 'cod_postal': 'unplug_stationZipCode' }, inplace=True) df_movsJoin1 = df_movsJoin1[[ '_id', 'user_day_code', 'idunplug_station', 'idplug_station', 'unplug_stationName', 'unplug_stationAddress', 'unplug_stationLong', 'unplug_stationLat', 'unplug_stationBarrio', 'unplug_stationDistrito', 'unplug_stationZipCode' ]] df_movsJoin2 = df_movsJoin1.merge(locations, left_on='idplug_station', right_on='id', how='inner') df_movsJoin2.rename(columns={ 'name': 'plug_stationName', 'address': 'plug_stationAddress', 'longitude': 'plug_stationLong', 'latitude': 'plug_stationLat', 'barrio': 'plug_stationBarrio', 'distrito': 'plug_stationDistrito', 'cod_postal': 'plug_stationZipCode' }, inplace=True) df_movs_loc = df_movsJoin2[[ '_id', 'user_day_code', 'idunplug_station', 'idplug_station', 'unplug_stationName', 'unplug_stationAddress', 'unplug_stationLong', 'unplug_stationLat', 'unplug_stationBarrio', 'unplug_stationDistrito', 'unplug_stationZipCode', 'plug_stationName', 'plug_stationAddress', 'plug_stationLong', 'plug_stationLat', 'plug_stationBarrio', 'plug_stationDistrito', 'plug_stationZipCode' ]] grouped_byName = df_movs_loc.groupby( ['unplug_stationDistrito', 'plug_stationDistrito']) grouped_byName_count = grouped_byName.count().reset_index() grouped_byName_count = grouped_byName_count[[ 'unplug_stationDistrito', 'plug_stationDistrito', '_id' ]] grouped_byName_count = grouped_byName_count.rename( columns={"_id": "travels"}) grouped_byName_count.to_csv( os.path.join( getRootPath(), 'data/bike/' + year + str(month) + '_movements_Distritos.csv'))
import pandas as pd import numpy as np import networkx as nx import os from utils import getRootPath import pickle from sklearn import preprocessing #Load undirected graphs with open(os.path.join(getRootPath(), 'data/Netu2refc_bogota_th1.cnf'), 'rb') as f: Net_undBog = pickle.load(f) with open(os.path.join(getRootPath(), 'data/Netu2refc_medellin_th1.cnf'), 'rb') as f: Net_undMed = pickle.load(f) #MERGE descs of BOG and MED and join with latitude and longitude def prepare_descs(): ###LOAD DATA ND_desc_BOG = pd.read_csv( os.path.join(getRootPath(), 'data/ND_descriptors_bogota.csv')) ND_desc_MED = pd.read_csv( os.path.join(getRootPath(), 'data/ND_descriptors_medellin.csv')) antenas_loc = pd.read_csv( os.path.join(getRootPath(), 'data/sites_random2.csv')) ND_desc_BOG['city'] = 'BOG'
def GetRely(cls, requestData, relyData): """ params:requestData (str,dict) 请求参数 params:relyData (str,dict) 依赖关系,格式{请求或者响应:{依赖参数的key:接口名->用例编号id}} result:requestData (dict) 根据依赖关系处理后的请求参数 """ if not requestData: # 如果请求参数为空,则不需要处理依赖 return elif requestData and not relyData: # 如果没有依赖(关联),直接返回请求参数 if isinstance(requestData, str): print(requestData) print(type(requestData)) return eval(requestData) elif isinstance(requestData, dict): return requestData else: # 如果存在依赖(关联) if isinstance(requestData, str): requestData = eval(requestData) if isinstance(relyData, str): relyData = eval(relyData) for key, value in relyData.items( ): # {"request":{"username->username1":"register->1","password->password1":"register->1"}} if key == "request": excutelog("info", "-----------关联请求参数----------") for k, v in value.items(): relyKey, requestParamsKey = k.split("->") interfaceName, caseId = v.split("->") excutelog("info", "上一个请求中关联请求参数key:-------%s" % relyKey) excutelog( "info", "当前的请求参数中需要关联的key:-------%s" % requestParamsKey) excutelog("info", "被关联的接口名字:-------%s" % interfaceName) excutelog("info", "被关联接口用例的序号:-------%s" % caseId) print('上一个请求中关联的参数key', relyKey) print('当前的请求参数中需要关联的key:', requestParamsKey) print('interfaceName:', interfaceName) print('caseId:', caseId) # 遍历API的接口名一列,根据接口名找到对应case用例的sheet名 for idx, vle in enumerate( handleExcel.getColumnsObject( getRootPath() + "\\data\\case.xlsx", "API", API_apiName)[1:], 2): print('接口序号:', idx) print('接口名称:', vle.value) if vle.value == interfaceName: print(idx, API_apiTestCaseFileName) apiCaseSheet = handleExcel.getValueOfCell( getRootPath() + "\\data\\case.xlsx", "API", columnNo=ord(API_apiTestCaseFileName) - 64, rowNo=idx) # 依赖的接口用例sheet excutelog( "info", "被关联用例所在sheet表:-------%s" % apiCaseSheet) print(apiCaseSheet) val = eval( handleExcel.getValueOfCell( getRootPath() + "\\data\\case.xlsx", apiCaseSheet, columnNo=ord(CASE_requestData) - 64, rowNo=int(caseId) + 1))[relyKey] requestData[requestParamsKey] = val print(requestData) excutelog("info", "处理完依赖关系的请求参数:-------%s" % requestData) return requestData elif key == "response": excutelog("info", "-----------关联响应body----------") for k, v in value.items(): print(k, v) interfaceName, caseId = v.split("->") relyKey, requestParamsKey = k.split("->") excutelog("info", "上一个请求响应中关联请求参数key:-------%s" % relyKey) excutelog( "info", "当前的请求参数中需要关联的key:-------%s" % requestParamsKey) excutelog("info", "被关联的接口名字:-------%s" % interfaceName) excutelog("info", "被关联接口用例的序号:-------%s" % caseId) print('响应body中的关联参数key:', relyKey) print('当前的请求参数中需要关联的key:', requestParamsKey) print('接口序号:', caseId) print('接口名称:', interfaceName) for idx, vle in enumerate( handleExcel.getColumnsObject( getRootPath() + "\\data\\case.xlsx", "API", API_apiName)[1:], 2): print(idx, vle) if vle.value == interfaceName: print(idx, API_apiTestCaseFileName) apiCaseSheet = handleExcel.getValueOfCell( getRootPath() + "\\data\\case.xlsx", "API", columnNo=ord(API_apiTestCaseFileName) - 64, rowNo=idx) excutelog( "info", "被关联用例所在sheet表:-------%s" % apiCaseSheet) print('apiCaseSheet:', apiCaseSheet) print(ord(CASE_responseData) - 64) print(apiCaseSheet) val = eval( handleExcel.getValueOfCell( apiCaseSheet, coordinate=None, columnNo=ord(CASE_responseData) - 64, rowNo=int(caseId) + 1))[relyKey] print('val:', val) requestData[requestParamsKey] = val print(requestData) excutelog("info", "处理完依赖关系的请求参数:-------%s" % requestData) return requestData
def paintBikeTravelsSankey(year, month): if not (os.path.exists( os.path.join( getRootPath(), 'data/bike/' + year + str(month) + '_movements_Distritos.csv'))): preprocessBikeTravelsSankeyDistritos(year, month) grouped_byDistrito_count = pd.read_csv( os.path.join( getRootPath(), 'data/bike/' + year + str(month) + '_movements_Distritos.csv')) barrios = list( set(list(grouped_byDistrito_count['unplug_stationDistrito']))) s = [ barrios.index(zip) for zip in list(grouped_byDistrito_count['unplug_stationDistrito']) ] t = [ barrios.index(zip) for zip in list(grouped_byDistrito_count['plug_stationDistrito']) ] v = list(grouped_byDistrito_count['travels']) # Creating colors dics dic_colors_barrios = {} colors = [ 'rgba(31, 119, 180, 0.8)', 'rgba(255, 127, 14, 0.8)', 'rgba(44, 160, 44, 0.8)', 'rgba(214, 39, 40, 0.8)', 'rgba(148, 103, 189, 0.8)', 'rgba(140, 86, 75, 0.8)', 'rgba(227, 119, 194, 0.8)', 'rgba(127, 127, 127, 0.8)', 'rgba(188, 189, 34, 0.8)', 'rgba(23, 190, 207, 0.8)' ] for barrio in barrios: index = barrios.index(barrio) dic_colors_barrios[barrios.index(barrio)] = colors[index] dic_colors_links = [] for element in s: dic_colors_links.append(dic_colors_barrios[element]) fig = go.Figure(data=[ go.Sankey(node=dict( pad=15, thickness=20, line=dict(color="black", width=0.5), label=barrios, customdata=barrios, color=colors, hovertemplate= 'El barrio %{customdata} ha tenido un total de viajes de: %{value}<extra></extra>', ), link=dict( source=s, target=t, value=v, color=dic_colors_links, customdata=barrios, hovertemplate='Desde %{source.customdata}<br />' + 'hasta %{target.customdata}<br />ha habido %{value} viajes<extra></extra>' )) ]) fig.update_layout(title_text="Viajes entre los barrios de Madrid", font_size=10) return fig