def Test_Histogram(Graph_MD): """ Returns histrogram for multi-directinal network graph **Graph_MD**. \n.. comments: Input: Graph_MD Instance of NX multi-directinal graph Return: HistSegmKnoten Vektor, mit Werten """ Edges = NX.edges(Graph_MD) KnotenNamen = NX.nodes(Graph_MD) KnotenNamenListe = M_Helfer.unique_String(KnotenNamen) NumKnotenListe = len(KnotenNamenListe) KnotenLeitung = arr.array('i', list(range(1, NumKnotenListe + 1))) count = 0 for Knoten in KnotenLeitung: KnotenLeitung[count] = 0 count = count + 1 for ii in list(range(NumKnotenListe)): KnotenName = KnotenNamenListe[ii] for edge in Edges: posS = edge[0] == KnotenName posE = edge[1] == KnotenName if posS: KnotenLeitung[ii] = KnotenLeitung[ii] + 1 if posE: KnotenLeitung[ii] = KnotenLeitung[ii] + 1 MaxKnotenLeitung = max(KnotenLeitung) HistSegmKnoten = M_MatLab.zeros('i', MaxKnotenLeitung + 1) for ii in list(range(0, MaxKnotenLeitung + 1)): HistSegmKnoten[ii] = len( M_FindPos.find_pos_ValInVector(ii, KnotenLeitung, '==')) return HistSegmKnoten
def testData(timeSeries, TypeName, PercVal, TypeVal): """ Function to determine if time series **timeSeries** should not be used (set to zero length). Two different tests can be done, given through **TypeName**, with additional values required fir **TypeName** = 'PercentAbsDiff', where **PercVal** is the percentile, where the sorted absolute difference is required to have a value of **TypeVal**. \n.. comments: Input: timeSeries list of values, time series TypeName string containing type of test: 'MedianAbsDiff': 'PercentAbsDiff': PercVal percent value [%] where on TypeVal Output: returnTimeSeries time series of flow data, same if pasted test, otherwise of length 0.""" returnTimeSeries = [] timeSeriesWork = timeSeries if ('MedianAbsDiff' in TypeName) and (len(timeSeriesWork) > 3): returnTimeSeries = testData(timeSeries, 'PercentAbsDiff', 0.5, TypeVal) elif ('PercentAbsDiff' in TypeName) and (len(timeSeriesWork) > 3): # forming diff between adjacent valus timeSeriesWork = M_MatLab.grad_Vector(timeSeriesWork) # making diff absoluite timeSeriesWork = [abs(x) for x in timeSeriesWork] # Sorting data set to then select median value, and test timeSeriesWork = sorted( [x for x in timeSeriesWork if not math.isnan(x)], reverse=True) PercVal = int(len(timeSeries) * PercVal / 100) if timeSeriesWork[PercVal] == TypeVal: returnTimeSeries = timeSeries elif len(timeSeriesWork) < 4: pass else: print('M_Helfer.testData: TypeName: ' + TypeName + ' not coded.') return returnTimeSeries
def find_MatchNetzPoint(Netz_1, CompName_1, Netz_2, CompName_2, multDist=1, testRun=False, powerVal=1): """ Finds a vector containing the positions of which EntsoG component should be linked with which point from Netz class instance. The following attributes are currently implemented: name, lat, and long. Input to method are **EntsoGCompName**, **Netz** instance, **NetzCompName**, **multDist**, **testRun**, **powerVal**. Return are the position lists for the EntsoG instance, the Netz instance, and the Goodness value (ranging 0..1). \n.. comments: Input: Netz_1: Netz Class instance CompName_1: string, of Netz_1 component name, to be used. Netz_2: instance of class Netz CompName_2: string, of Netz_2 component name, to be used. multDist: (Optional = 1) testRun: (Optional = False), for: True = will NOT carry out the long while loop False = will carry out the long while loop [] = will carry out the long while loop Return: posEntsoG: List of ints, of positions from EntsoG posNetz: List of ints, of positions from Netz GoodnessVal: list of floats, of goodness values """ # Selecting the dat based on Component from EntsoG Comp_1 = Netz_1.__dict__[CompName_1] # Selecting the dat based on Component from Netze Comp_2 = Netz_2.__dict__[CompName_2] # Initialization of variables pos_1 = [] pos_2 = [] GoodnessVal = [] posLeft_1 = [s for s in range(len(Comp_1))] posLeft_2 = [s for s in range(len(Comp_2))] Run_1 = True Run_2 = True # So that Test Runs with shorter time can be executed if testRun: Run_1 = False #script_dir = path.dirname(__file__) #logFileName = path.join(script_dir, '../Ausgabe/log_' + str(multDist) + '.csv') logFileName = '../Ausgabe/log_' + str(multDist) + '.csv' Name_Orig_1 = M_Helfer.get_NotPos(Comp_1, pos_1, 'name') Name_Orig_2 = M_Helfer.get_NotPos(Comp_2, pos_2, 'name') # Running through data set for first time, to catch all locations, where name is totally same [Name_1, lat_1, long_1] = M_Helfer.get_NotPos3(Comp_1, pos_1) [Name_2, lat_2, long_2] = M_Helfer.get_NotPos3(Comp_2, pos_2) # Getting matching location names [New_pos_1, New_pos_2] = M_Helfer.get_NameMatch(Name_1, Name_2) # Getting Distances = M_Projection.LatLong2DistanceMatrix(lat_1, long_1, lat_2, long_2) InvDistReal2 = M_MatLab.pow_Matrix(Distances, powerVal) InvDistReal = M_MatLab.multi_MatrixConst(InvDistReal2, multDist) # Schreiben von Ergebnissen in eine CSV Datei if os.path.isfile(logFileName): os.remove(logFileName) M_Helfer.txt2File( logFileName, 'EntsoG_Name;Netz_Name;NameSim;Distance;Goodness;EntsoG_pos;Netz_pos') for ii in range(len(New_pos_1)): # adding new positoins to vec of positions found pos_1.append(New_pos_1[ii]) pos_2.append(New_pos_2[ii]) GoodnessVal.append(100 - InvDistReal[New_pos_1[ii]][New_pos_2[ii]]) # removing positions that are found from vector of Pos to be found try: posLeft_1.remove(New_pos_1[ii]) except: pass try: posLeft_2.remove(New_pos_2[ii]) except: pass # writing to log file strstr = Name_Orig_1[New_pos_1[ii]] + ';' + \ Name_Orig_2[New_pos_2[ii]] + ';' + \ '100;' + \ str(Distances[New_pos_1[ii]][New_pos_2[ii]]) + ';' + \ str(100 - InvDistReal[New_pos_1[ii]][New_pos_2[ii]]) + ';' + \ str(New_pos_1[ii]) + ';' + str(New_pos_2[ii]) M_Helfer.txt2File(logFileName, strstr) # Generating un-shrunk data for later [Orig_Name_1, Orig_lat_1, Orig_long_1] = M_Helfer.get_NotPos3(Comp_1, []) [Orig_Name_2, Orig_lat_2, Orig_long_2] = M_Helfer.get_NotPos3(Comp_2, []) # Forming matrixes Name_Matrix_Orig = M_Helfer.get_NameMatrix_Fuzzy(Orig_Name_1, Orig_Name_2) Dist_Matrix_Orig = M_Projection.LatLong2DistanceMatrix( Orig_lat_1, Orig_long_1, Orig_lat_2, Orig_long_2) Dist_Matrix_Orig2 = M_MatLab.pow_Matrix(Dist_Matrix_Orig, powerVal) Dist_Matrix_Orig3 = M_MatLab.multi_MatrixConst(Dist_Matrix_Orig2, multDist) # Combining matrixes GoodnessMatrix_Orig = M_MatLab.sub_2Matrix(Name_Matrix_Orig, Dist_Matrix_Orig3) # Now going through the rest of the data set while Run_2 and Run_1: GoodnessMatrix_Shrunk = M_MatLab.shrink_Matrix(GoodnessMatrix_Orig, posLeft_1, posLeft_2) Name_Matrix_Shrunk = M_MatLab.shrink_Matrix(Name_Matrix_Orig, posLeft_1, posLeft_2) Dist_Matrix_Shrunk = M_MatLab.shrink_Matrix(Dist_Matrix_Orig, posLeft_1, posLeft_2) # determin popsitions in shrunk data sets [pos_Shrunk_1, pos_Shrunk_2 ] = M_FindPos.find_pos_ConditionInMatrix(GoodnessMatrix_Shrunk, 'max') nam = Name_Matrix_Shrunk[pos_Shrunk_1][pos_Shrunk_2] dis = Dist_Matrix_Shrunk[pos_Shrunk_1][pos_Shrunk_2] GoodnessVal.append(GoodnessMatrix_Shrunk[pos_Shrunk_1][pos_Shrunk_2]) # dtermin position in original data sets pos_Orig_1 = posLeft_1[pos_Shrunk_1] pos_Orig_2 = posLeft_2[pos_Shrunk_2] pos_1.append(pos_Orig_1) pos_2.append(pos_Orig_2) posLeft_1.remove(pos_Orig_1) posLeft_2.remove(pos_Orig_2) # For Log file strstr = Name_Orig_1[pos_Orig_1] + ';' + Name_Orig_2[pos_Orig_2] + \ ';' + str(nam) + ';' + str(dis) + ';' + \ str(GoodnessMatrix_Shrunk[pos_Shrunk_1][pos_Shrunk_2]) + ';' + \ str(pos_Orig_1) + ';' + str(pos_Orig_2) M_Helfer.txt2File(logFileName, strstr) # Check if need to stop if len(pos_1) == len(Comp_1): Run_1 = False if len(pos_2) == len(Comp_2): Run_2 = False return pos_1, pos_2, GoodnessVal
def find_Match_Attrib(Netz_1, CompName_1, AttribName_1, Netz_2, CompName_2, AttribName_2, SearchOption='single', CountryOn=False, AddInWord=0, String2Lower=False): """ Finds a vector containing the positions of which EntsoG component should be linked with which point from Netz class instance. The following attributes are currently implemented: name, lat, and long. Input to method are **EntsoGCompName**, **Netz** instance, **NetzCompName**, **multDist**, **testRun**, **powerVal**. Return are the position lists for the EntsoG instance, the Netz instance, and the Goodness value (ranging 0..1). \n.. comments: Input: Netz_1: Netz Class instance CompName_1: string, of Netz_1 component name, to be used. Netz_2: instance of class Netz CompName_2: string, of Netz_2 component name, to be used. SearchOption string indicating if entries from Netz_2 are allowed to be used multiple times,... 'multi', 'single', CountryOn [False], compares only locations in the same contry AddInWord [0], adds value if name of one set is in name of other set. String2Lower [False], if strings shall be converted to lower Return: posEntsoG: List of ints, of positions from EntsoG posNetz: List of ints, of positions from Netz GoodnessVal: list of floats, of goodness values """ # Selecting the dat based on Component from EntsoG Comp_1 = Netz_1.__dict__[CompName_1] # Selecting the dat based on Component from Netze Comp_2 = Netz_2.__dict__[CompName_2] # Initialization of variables pos_1 = [] pos_2 = [] GoodnessVal = [] posLeft_1 = [s for s in range(len(Comp_1))] posLeft_2 = [s for s in range(len(Comp_2))] Run_1 = True Run_2 = True # Dealing with country subset if CountryOn: Country_1 = M_Helfer.get_NotPos(Comp_1, pos_1, 'country_code') Country_2 = M_Helfer.get_NotPos(Comp_2, pos_2, 'country_code') Country_Matrix_Orig = M_Helfer.get_NameMatrix_Fuzzy( Country_1, Country_2) for xx in range(len(Country_Matrix_Orig)): for yy in range(len(Country_Matrix_Orig[0])): if Country_Matrix_Orig[xx][yy] >= 100: Country_Matrix_Orig[xx][yy] = 1 elif Country_1[xx] == None or Country_2[yy] == None: Country_Matrix_Orig[xx][yy] = 1 else: Country_Matrix_Orig[xx][yy] = 0 else: Country_1 = M_Helfer.get_NotPos(Comp_1, pos_1, 'country_code') Country_2 = M_Helfer.get_NotPos(Comp_2, pos_2, 'country_code') Country_Matrix_Orig = M_Helfer.get_NameMatrix_Fuzzy( Country_1, Country_2) for xx in range(len(Country_Matrix_Orig)): for yy in range(len(Country_Matrix_Orig[0])): Country_Matrix_Orig[xx][yy] = 1 if String2Lower: print('change code') # Running through data set for first time, to catch all locations, where name is totally same Name_1 = M_Helfer.get_NotPos(Comp_1, pos_1, AttribName_1) Name_2 = M_Helfer.get_NotPos(Comp_2, pos_2, AttribName_2) # Getting matching location names [New_pos_1, New_pos_2] = M_Helfer.get_NameMatch(Name_1, Name_2) # Generating un-shrunk data for later Orig_Name_1 = M_Helfer.get_NotPos(Comp_1, [], AttribName_1) Orig_Name_2 = M_Helfer.get_NotPos(Comp_2, [], AttribName_2) Name_Matrix_Orig = M_Helfer.get_NameMatrix_Fuzzy(Orig_Name_1, Orig_Name_2, AddInWord) Name_Matrix_Orig = M_MatLab.multi_2Matrix(Name_Matrix_Orig, Country_Matrix_Orig) # Combining matrixes GoodnessMatrix_Orig = Name_Matrix_Orig # Now going through the rest of the data set while Run_2 and Run_1: GoodnessMatrix_Shrunk = M_MatLab.shrink_Matrix(GoodnessMatrix_Orig, posLeft_1, posLeft_2) # determin popsitions in shrunk data sets [pos_Shrunk_1, pos_Shrunk_2 ] = M_FindPos.find_pos_ConditionInMatrix(GoodnessMatrix_Shrunk, 'max') GoodnessVal.append(GoodnessMatrix_Shrunk[pos_Shrunk_1][pos_Shrunk_2]) # dtermin position in original data sets pos_Orig_1 = posLeft_1[pos_Shrunk_1] pos_Orig_2 = posLeft_2[pos_Shrunk_2] pos_1.append(pos_Orig_1) posLeft_1.remove(pos_Orig_1) pos_2.append(pos_Orig_2) if 'single' in SearchOption: posLeft_2.remove(pos_Orig_2) # Check if need to stop if len(pos_1) == len(Comp_1): Run_1 = False if len(pos_2) == len(Comp_2): Run_2 = False return [pos_1, pos_2, GoodnessVal]
def findEdges_All(Netz_Set_1, Netz_Set_2, NodeDistDiff, LengthPercDiff): ########################################################################### # Forming networkx networks off data ########################################################################### [ _, G_Set_1, ] = M_Graph.build_nx(InfoDict='', Daten=Netz_Set_1, Method='latLongNodes', removeExtraNodes=True) [ _, G_Set_2, ] = M_Graph.build_nx(InfoDict='', Daten=Netz_Set_2, Method='latLongNodes', removeExtraNodes=True) Netz_Set_2 = M_Graph.Graph2Netz(G_Set_2) Netz_Set_1 = M_Graph.Graph2Netz(G_Set_1) ########################################################################### # Finding matching friends of Nodes ########################################################################### [pos_match_Netz_1, pos_add_Netz_1, pos_match_Netz_2, pos_add_Netz_2] = JoinNetz.match( Netz_Set_1, Netz_Set_2, compName='Nodes', threshold=15, multiSelect=True, numFuncs=1, funcs=(lambda comp_0, comp_1: M_Matching.getMatch_LatLong_CountryCode( comp_0, comp_1, method='distanceThreshold', thresholdVal=NodeDistDiff))) ########################################################################### # lists of Nodes for each network ########################################################################### # lists of Nodes for each network with friends nodeID_Set_1_Friends = M_MatLab.select_Vector( Netz_Set_1.get_Attrib('Nodes', 'name'), pos_match_Netz_1) nodeID_Set_2_Friends = M_MatLab.select_Vector( Netz_Set_2.get_Attrib('Nodes', 'name'), pos_match_Netz_2) ########################################################################### # working out which node has which friend in other data set ########################################################################### # Set 1 # Creation of matrix with distance values between points coords_Set_1 = get_coordlist(G_Set_1.nodes(), nodeIds=nodeID_Set_1_Friends) dist_matrix_Set_1 = np.matrix dist_matrix_Set_1 = CalcDistanceMatrix(coords_Set_1) # removing all zeros and replacing with Inf (indlucing diagonal and multiple # entries), as elements on box axis are not unique dist_matrix_Set_1[dist_matrix_Set_1 == 0] = np.inf # Set 2 coords_Set_2 = get_coordlist(G_Set_2.nodes(), nodeIds=nodeID_Set_2_Friends) dist_matrix_Set_2 = np.matrix dist_matrix_Set_2 = CalcDistanceMatrix(coords_Set_2) # removing all zeros and replacing with Inf (indlucing diagonal and multiple # entries), as elements on box axis are not unique dist_matrix_Set_2[dist_matrix_Set_2 == 0] = np.inf # Forming the difference in path between both data sets # Relative Difference matrix, value of [0..inf] dist_matrix_Diff = M_MatLab.relDiff_2Matrix(dist_matrix_Set_1, dist_matrix_Set_2) dist_matrix_Diff = M_MatLab.abs_Matrix(dist_matrix_Diff) # removing all nan and replacing with Inf (indlucing diagonal and # multiple entries), as while creation of this matrix np.inf were "converted" # to np.nan dist_matrix_Diff[np.isnan(dist_matrix_Diff) == True] = np.inf ########################################################################### # ??? ########################################################################### # getting the minimum relative distance value and the corresponding index # Getting the node indices, of the pipeSegments that are same in both data sets, through distance matrix Ret_DiffIndex = [] Ret_MinVal = [] Final_Ret_DiffIndex = [] Final_Ret_MinVal = [] minimum_Diff_Index = np.unravel_index( np.argmin(dist_matrix_Diff, axis=None), dist_matrix_Diff.shape) minVal = dist_matrix_Diff.min() while minVal < LengthPercDiff: Ret_DiffIndex.append(minimum_Diff_Index) Ret_MinVal.append(minVal) dist_matrix_Diff[minimum_Diff_Index[0]][minimum_Diff_Index[1]] = np.inf dist_matrix_Diff[minimum_Diff_Index[1]][minimum_Diff_Index[0]] = np.inf minimum_Diff_Index = np.unravel_index( np.argmin(dist_matrix_Diff, axis=None), dist_matrix_Diff.shape) minVal = dist_matrix_Diff.min() for idx in range(len(Ret_MinVal)): minimum_Diff_Index = Ret_DiffIndex[idx] source = nodeID_Set_1_Friends[minimum_Diff_Index[0]] target = nodeID_Set_1_Friends[minimum_Diff_Index[1]] dist1, numberNodes = getLength(G_Set_1, source=source, target=target, weight='weight') dist2, __ = getLength( G_Set_2, source=nodeID_Set_2_Friends[minimum_Diff_Index[0]], target=nodeID_Set_2_Friends[minimum_Diff_Index[1]], weight='weight') minVal = abs(dist1 - dist2) / dist1 if minVal < LengthPercDiff and numberNodes == 2: Final_Ret_DiffIndex.append(minimum_Diff_Index) Final_Ret_MinVal.append(minVal) return Final_Ret_DiffIndex, Final_Ret_MinVal, G_Set_1, G_Set_2, nodeID_Set_1_Friends, nodeID_Set_2_Friends
def read_component(DataType='', NumDataSets=1e+100, RelDirName=None): """ Method of reading in Norway not infield pipelines from shape files. **RelDirName** supplies the relative location of the shape files, whereas **DataType** specifies which component is to be read in with options 'PipeSegments' and 'Nodes' \n.. comments: Input: DataType String, specifying the component to be read in (default = '') NumDataSets: Number, indicating the maximum number of elements to be read in (default = 1e+100). RelDirName: string, containing the relative path name of where data will be loaded from Default = None Return: [] """ # init variable to return and counter ReturnComponent = [] count = 0 # start and target inCoord = 'epsg:4230' outCoord = 'epsg:4326' # Path to Shapefile FileName_Map = os.path.join(RelDirName, 'pipLine.shp') # Read in shapefile Shapes = shapefile.Reader(FileName_Map) if DataType in 'PipeLines': # go through every pipeline stored in shapefile for shape in Shapes.shapeRecords(): # only read out gas pipelines if 'Gas' == shape.record[11]: # Getting the coordinates of the PipeSegment parts = sorted(shape.shape.parts) # Joining X and Y coordinates from Shape.shape.points vec = shape.shape.points polyLine = K_Component.PolyLine(lat=[], long=[]) for x, y in vec: polyLine.long.append(x) polyLine.lat.append(y) # check if coordinates exists if len(polyLine.long) and len(polyLine.lat): # Converting to LatLong polyLine = M_Projection.XY2LatLong(polyLine, inCoord, outCoord) # Generation of PipeLine PipeLine = M_Shape.PolyLine2PipeLines(polyLine, parts, source=C_Code, country_code=C_Code) for ii in range(len(PipeLine)): PipeLine[ii].id = 'N_' + str(count) PipeLine[ii].source_id = [C_Code + '_' + str(count)] PipeLine[ii].name = shape.record[1] PipeLine[ii].node_id = [ 'N_' + str(count * 2), 'N_' + str(count * 2 + 1) ] PipeLine[ii].param.update({ 'lat_mean': M_MatLab.get_mean(PipeLine[ii].lat)[0] }) PipeLine[ii].param.update({ 'long_mean': M_MatLab.get_mean(PipeLine[ii].long)[0] }) PipeLine[ii].param.update({ 'diameter_mm': convInchToMm(shape.record[13]) }) # convert inches to mm print(convInchToMm(shape.record[13])) count = count + 1 ReturnComponent.extend(PipeLine) if count > NumDataSets: return ReturnComponent elif DataType in 'Nodes': # go through every pipeline stored in shapefile for shape in Shapes.shapeRecords(): # Only read out nodes of gas pipelines if 'Gas' == shape.record[11]: # Getting the coordinates of the PipeSegment parts = sorted(shape.shape.parts) # Joining X and Y coordinates from Shape.shape.points vec = shape.shape.points polyLine = K_Component.PolyLine(lat=[], long=[]) for x, y in vec: polyLine.long.append(x) polyLine.lat.append(y) # check if coordinates exists if len(polyLine.long) and len(polyLine.lat): # Converting to LatLong polyLine = M_Projection.XY2LatLong(polyLine, inCoord, outCoord) # Generation of PipeSegments Segments = M_Shape.PolyLine2PipeSegment( polyLine, parts, source=C_Code, country_code=C_Code) # Generation of the Nodes from PipeSegments # two Nodes per PipeSegment for seg in Segments: id = 'N_' + str(len(ReturnComponent)) name = 'N_' + str(len(ReturnComponent)) node_id = [id] source_id = [C_Code + '_' + str(len(ReturnComponent))] country_code = C_Code lat = seg.lat[0] long = seg.long[0] ReturnComponent.append( K_Component.Nodes(id=id, node_id=node_id, name=name, lat=lat, long=long, source_id=source_id, country_code=country_code, param={})) id = 'N_' + str(len(ReturnComponent)) name = 'N_' + str(len(ReturnComponent)) node_id = [id] source_id = [C_Code + '_' + str(len(ReturnComponent))] country_code = C_Code lat = seg.lat[1] long = seg.long[1] ReturnComponent.append( K_Component.Nodes(id=id, node_id=node_id, name=name, lat=lat, long=long, country_code=country_code, source_id=source_id, param={})) count = count + 1 # Terminate new data if exceeding user requests if count > NumDataSets: return ReturnComponent return ReturnComponent
def read_component(DataType='LNGs', NumDataSets=100000, requeYear=[2000], DirName=None): """ Reading in GIE LNGs data sets from API, **NumDataSets** maximum number of records to read, and **requeYear** for which year to get data. **RelDirName** is the relative path name. \n.. comments: Input: DataType: string, containing the data type to read, otions are 'LNGs' or 'Storages' NumDataSets: (Optional = 100000) number of data sets requeYear: (Optional = [2000]) list of numbers containing year [####] for which data to be retrieved RelDirName: string, containing relative dir name where GIE meta data default = 'Eingabe/GIE/' Return: ReturnComponent Instance of Component (list of single type elements) """ # dealing with private key ReturnComponent = [] pathPrivKey = os.path.join(os.getcwd(), 'Eingabe/GIE/GIE_PrivateKey.txt') if os.path.isfile(pathPrivKey) is False: print( 'ERROR: M_GIE.read_component: you will need to get a private key from the GIE API.' ) print('Please see documentation for help.') print('No data will be loaded') return ReturnComponent PrivKey = M_Helfer.getLineFromFile(pathPrivKey) if 'LNGs' in DataType: # Initialization webCall_1 = 'https://alsi.gie.eu/api/data/' eic_code = '' count = 0 filename = str(DirName / 'GIE_LNG.csv') print(' LNGs progress:') # Reading Meta data from CSV file # connecting to CSV file fid = open(filename, "r", encoding='iso-8859-15', errors='ignore') # Reading header line fid.readline() # Reading next line temp = M_Helfer.strip_accents(fid.readline()[:-1]) while (len(temp) > 0) and (count < NumDataSets): typeval = temp.split(';')[1] if 'LSO' not in typeval: country_code = temp.split(';')[0] id = temp.split(';')[2] node_id = [id] source_id = [ID_Add + str(id)] facility_code = temp.split(';')[2] name = temp.split(';')[4] name_short = temp.split(';')[5] name_short = replaceString(name_short) ReturnComponent.append( K_Component.LNGs(name=name, id=id, node_id=node_id, source_id=source_id, country_code=country_code, lat=None, long=None, param={ 'facility_code': facility_code, 'name_short': name_short, 'eic_code': eic_code })) count = count + 1 else: eic_code = temp.split(';')[2] # Reading next line temp = M_Helfer.strip_accents(fid.readline()[:-1]) # Creation of a Pool Manager http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) # Reading for all created storages the data off the web page #maxSets = min([len(ReturnComponent), NumDataSets]) maxSets = len(ReturnComponent) #for ii in range(96, 100): count = 0 for ii in range(maxSets): # Initialization workingLNGVolume = [] Store2PipeCap = [] # information from CSV file this_facility_code = ReturnComponent[ii].param['facility_code'] this_country_code = ReturnComponent[ii].country_code this_eic_code = ReturnComponent[ii].param['eic_code'] thisURL = webCall_1 + this_facility_code + '/' + this_country_code + '/' + this_eic_code # Get the data URLData = http.request('GET', thisURL, headers={'x-key': PrivKey}) # Convert the data into dict tables = [] try: tables = json.loads(URLData.data.decode('UTF-8')) except: print('ERROR: M_GIE.read_component(LNGs): reading URL failed') return [] # checking that results coming back are ok if tables.__contains__('error'): print( 'GIE load_Storages: something wrong while getting Storage data from GIE' ) #, True) print(tables) # Data allowed to be parsed else: for tt in tables: # Disecting the input for year in requeYear: if (tt['dtmi'] != '-') and (str(year) in tt['gasDayStartedOn']): workingLNGVolume.append( float(tt['dtmi']) * 1000 ) # declared total maximum inventory 1000 m^3 LNG Store2PipeCap.append( float(tt['dtrs']) ) # declared total reference sendout GWh/d (sernd out capacity) # Remove wrong data points workingLNGVolume = M_Helfer.testData(workingLNGVolume, 'PercentAbsDiff', 4, 0) Store2PipeCap = M_Helfer.testData(Store2PipeCap, 'PercentAbsDiff', 4, 0) # Update screen with dot print('.', end='') # Deriving required values from time series ReturnComponent[ii].param.update({ 'max_workingLNG_M_m3': M_MatLab.get_median(workingLNGVolume)[0] / 1000000 }) ReturnComponent[ii].param.update({ 'median_cap_store2pipe_GWh_per_d': M_MatLab.get_median(Store2PipeCap)[0] }) ReturnComponent[ii].param.update({ 'max_cap_store2pipe_GWh_per_d': M_MatLab.get_max(Store2PipeCap)[0] }) count = count + 1 if count > NumDataSets: print(' ') return ReturnComponent elif 'Storages' in DataType: # Initialization webCall_1 = 'https://agsi.gie.eu/api/data/' eic_code = '' count = 0 print(' STORAGES progress:') filename = str(DirName / 'GIE_Storages.csv') # Reading Meta data from CSV file # connecting to CSV file fid = open(filename, "r", encoding="iso-8859-15", errors="surrogateescape") # Reading hearder line fid.readline() # Reading next line temp = M_Helfer.strip_accents(fid.readline()[:-1]) while (len(temp) > 0) and (count < NumDataSets): typeval = temp.split(';')[1] if 'Storage Facility' in typeval: country_code = temp.split(';')[0] id = temp.split(';')[2] node_id = [id] source_id = [ID_Add + str(id)] facility_code = temp.split(';')[2] name = temp.split(';')[4] name_short = temp.split(';')[5] name_short = replaceString(name_short) name_short = name_short.replace(' ', '') name_short = name_short.strip() if 'OudeStatenzijl' in name_short: country_code = 'NL' elif 'KinsaleSouthwest' in name_short: country_code = 'IRL' ReturnComponent.append( K_Component.Storages(name=name, id=id, node_id=node_id, lat=None, long=None, source_id=source_id, country_code=country_code, param={ 'facility_code': facility_code, 'eic_code': eic_code, 'name_short': name_short })) count = count + 1 else: eic_code = temp.split(';')[2] # Reading next line temp = M_Helfer.strip_accents(fid.readline()[:-1]) # Creation of a Pool Manager http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) # Reading for all created storages the data off the web page maxSets = min([len(ReturnComponent), NumDataSets]) count = 0 keepPos = [] for ii in range(maxSets): # Initialization max_workingGas_M_m3 = [] Store2PipeCap = [] Pipe2StoreCap1 = [] # information from CSV file this_facility_code = ReturnComponent[ii].param['facility_code'] this_country_code = ReturnComponent[ii].country_code this_eic_code = ReturnComponent[ii].param['eic_code'] thisURL = webCall_1 + this_facility_code + '/' + this_country_code + '/' + this_eic_code # Get the data URLData = http.request('GET', thisURL, headers={'x-key': PrivKey}) # Convert the data into dict tables = [] try: tables = json.loads(URLData.data.decode('UTF-8')) # checking that results coming back are ok if tables.__contains__('error'): print( 'GIE load_Storages: something wrong while getting Storage data from GIE', True) # Data allowed to be parsed else: # print('len(tables[connectionpoints]) ' + str(len(tables['connectionpoints']))) for tt in tables: # Disecting the input for year in requeYear: if (tt['gasInStorage'] != '-') and ( str(year) in tt['gasDayStartedOn']): max_workingGas_M_m3.append( float(tt['workingGasVolume'])) Store2PipeCap.append( float(tt['injectionCapacity'])) Pipe2StoreCap1.append( float(tt['withdrawalCapacity'])) # Remove wrong data sets max_workingGas_M_m3 = M_Helfer.testData( max_workingGas_M_m3, 'PercentAbsDiff', 4, 0) Store2PipeCap = M_Helfer.testData(Store2PipeCap, 'PercentAbsDiff', 4, 0) Pipe2StoreCap = M_Helfer.testData(Pipe2StoreCap1, 'PercentAbsDiff', 4, 0) # Deriving required values from time series # wert, _ = ReturnComponent[ii].param.update({ 'max_workingGas_M_m3': M_MatLab.get_max(max_workingGas_M_m3)[0] }) ReturnComponent[ii].param.update({ 'max_cap_store2pipe_GWh_per_d': M_MatLab.get_max(Store2PipeCap)[0] }) ReturnComponent[ii].param.update({ 'max_cap_pipe2store_GWh_per_d': M_MatLab.get_max(Pipe2StoreCap)[0] }) if math.isnan(ReturnComponent[ii]. param['max_cap_pipe2store_GWh_per_d']): ReturnComponent[ii].param[ 'max_cap_pipe2store_GWh_per_d'] = None if math.isnan(ReturnComponent[ii]. param['max_cap_store2pipe_GWh_per_d']): ReturnComponent[ii].param[ 'max_cap_store2pipe_GWh_per_d'] = None if math.isnan( ReturnComponent[ii].param['max_workingGas_M_m3']): ReturnComponent[ii].param['max_workingGas_M_m3'] = None # Update screen with dot print('.', end='') keepPos.append(ii) count = count + 1 if count > NumDataSets: # Dealing with bad elemebtsm that did not return any URL results tempNetz = K_Netze.NetComp() tempNetz.Storages = ReturnComponent tempNetz.select_byPos('Storages', keepPos) ReturnComponent = tempNetz.Storages print(' ') return ReturnComponent except: print( 'Warning: M_GIE.read_component(Storages): reading URL failed' ) print(' for ', thisURL) # Dealing with bad elemebtsm that did not return any URL results tempNetz = K_Netze.NetComp() tempNetz.Storages = ReturnComponent tempNetz.select_byPos('Storages', keepPos) ReturnComponent = tempNetz.Storages print(' ') return ReturnComponent