def co_sum(tarray): """TODO""" tarray = utils.listify(tarray) somme = 0 for i in range(len(tarray)): val = tarray[i] if utils.isnan(val): continue somme += val return somme
def co_count(tarray): """TODO""" tarray = utils.listify(tarray) count = 0 for i in range(len(tarray)): val = tarray[i] if utils.isnan(val): continue count += 1 return count
def __getitem__(self, n): """TODO""" if isinstance(n, tuple): tracks = TrackCollection() for track in self: if (Utils.compLike(track.uid, n[0])) and ( Utils.compLike(track.tid, n[1]) ): tracks.addTrack(track) return tracks return TrackCollection.__collectionnify(self.__TRACES[n])
def co_max(tarray): """TODO""" tarray = utils.listify(tarray) if len(tarray) <= 0: return utils.NAN max = tarray[0] for i in range(1, len(tarray)): val = tarray[i] if utils.isnan(val): continue if val > max: max = val return max
def contains(self, timestamp): """TODO""" output = (self.minTimestamp <= timestamp) and (timestamp <= self.maxTimestamp) if not (self.pattern is None): output = output & utils.compLike(str(timestamp), self.pattern) return output
def co_median(tarray): """TODO""" tarray = utils.listify(tarray) if len(tarray) <= 0: return utils.NAN n = len(tarray) # on tri le tableau pour trouver le milieu tab_sort = [] for i in range(n): valmin = tarray[0] # Recherche du min for val in tarray: if val <= valmin: valmin = val tarray.remove(valmin) tab_sort.append(valmin) # Gestion n pair/impair if n % 2 == 1: mediane = tab_sort[(n - 1) / 2] else: mediane = 0.5 * (tab_sort[n / 2] + tab_sort[n / 2 - 1]) return mediane
def co_avg(tarray): """TODO""" tarray = utils.listify(tarray) if len(tarray) <= 0: return utils.NAN mean = 0 count = 0 for i in range(len(tarray)): val = tarray[i] if utils.isnan(val): continue count += 1 mean += val if count == 0: return utils.NAN return mean / count
def __init__(self, amps=None, kernels=None, distribution=DISTRIBUTION_NORMAL): """TODO""" if amps is None: self.amplitudes = [1] self.kernels = [DiracKernel()] else: self.amplitudes = utils.listify(amps) self.kernels = utils.listify(kernels) self.distribution = distribution if len(self.amplitudes) != len(self.kernels): print( "Error: amplitude and kernel lists must have same size in NoiseProcess" ) exit()
def plot(self, symbols=None, markersize=[4], margin=0.05, append=False): """TODO""" if symbols is None: symbols = ["r-", "g-", "b-", "c-", "m-", "y-", "k-"] if len(self) == 0: return symbols = Utils.listify(symbols) markersize = Utils.listify(markersize) if not append: (xmin, xmax, ymin, ymax) = self.bbox().asTuple() dx = margin * (xmax - xmin) dy = margin * (ymax - ymin) plt.xlim([xmin - dx, xmax + dx]) plt.ylim([ymin - dy, ymax + dy]) Ns = len(symbols) Ms = len(markersize) for i in range(len(self.__TRACES)): trace = self.__TRACES[i] X = trace.getX() Y = trace.getY() plt.plot(X, Y, symbols[i % Ns], markersize=markersize[i % Ms])
def __writeCollectionToKml(tracks, path, c1=[1, 1, 1, 1]): """TODO""" clampToGround = True f = open(path, "w") default_color = c1 f.write('<?xml version="1.0" encoding="UTF-8"?>\n') f.write('<kml xmlns="http://earth.google.com/kml/2.1">\n') f.write(" <Document>\n") print("KML writing...") for j in progressbar.progressbar(range(tracks.size())): track = tracks[j] f.write(" <Placemark>\n") f.write(" <name>" + str(track.tid) + "</name>\n") f.write(" <Style>\n") f.write(" <LineStyle>\n") f.write(" <color>" + utils.rgbToHex(default_color)[2:] + "</color>\n") f.write(" </LineStyle>\n") f.write(" </Style>\n") f.write(" <LineString>\n") f.write(" <coordinates>\n") for i in range(track.size()): f.write(" ") f.write("{:15.12f}".format(track.getObs(i).position.getX()) + ",") f.write("{:15.12f}".format(track.getObs(i).position.getY())) if not clampToGround: f.write( "," + "{:15.12f}".format(track.getObs(i).position.getZ())) f.write("\n") f.write(" </coordinates>\n") f.write(" </LineString>\n") f.write(" </Placemark>\n") f.write(" </Document>\n") f.write("</kml>\n") f.close() print("KML written in file [" + path + "]")
def plot(self, af_algo, aggregate, valmax=None, startpixel=0): """TODO""" if af_algo != "uid": cle = af_algo.__name__ + "#" + aggregate.__name__ else: cle = "uid" + "#" + aggregate.__name__ tab = np.array(self.bands[cle]) cmap = utils.getOffsetColorMap(self.color1, self.color2, 0) plt.imshow(tab, cmap=cmap) plt.title(cle) plt.colorbar() plt.show()
def __getObs(self, track, obs, k, mode): """Internal function to get all observations at epoch k in a track, from a list of analytical feature names (obs) and a mode if retrieved values must be converted to positions TODO """ y = track.getObsAnalyticalFeatures(obs, k) if mode in [ MODE_OBS_AS_2D_POSITIONS, MODE_OBS_AND_STATES_AS_2D_POSITIONS ]: if len(y) < 2: print( "Error: wrong number of observations in HMM to form 2D position" ) exit() ytemp = [utils.makeCoords(y[0], y[1], 0.0, track.getSRID())] for remain in range(2, len(y)): ytemp.append(y[remain]) y = ytemp if mode in [ MODE_OBS_AS_3D_POSITIONS, MODE_OBS_AND_STATES_AS_3D_POSITIONS ]: if len(y) < 3: print( "Error: wrong number of observations in HMM to form 3D position" ) exit() ytemp = [utils.makeCoords(y[0], y[1], y[2], track.getSRID())] for remain in range(3, len(y)): ytemp.append(y[remain]) y = ytemp return utils.unlistify(y)
def readFromGpx(path, srid="GEO"): """ Reads (multiple) tracks in .gpx file """ tracks = TrackCollection() format_old = GPSTime.getReadFormat() GPSTime.setReadFormat("4Y-2M-2D 2h:2m:2s") doc = minidom.parse(path) trks = doc.getElementsByTagName("trk") for trk in trks: trace = t.Track() trkpts = trk.getElementsByTagName("trkpt") for trkpt in trkpts: lon = float(trkpt.attributes["lon"].value) lat = float(trkpt.attributes["lat"].value) hgt = utils.NAN eles = trkpt.getElementsByTagName("ele") if eles.length > 0: hgt = float(eles[0].firstChild.data) time = "" times = trkpt.getElementsByTagName("time") if times.length > 0: time = GPSTime(times[0].firstChild.data) else: time = GPSTime() point = Obs(utils.makeCoords(lon, lat, hgt, srid), time) trace.addObs(point) tracks.addTrack(trace) # pourquoi ? # --> pour remettre le format comme il etait avant la lectre :) GPSTime.setReadFormat(format_old) collection = TrackCollection(tracks) return collection
def segmentation(track, afs_input, af_output, thresholds_max, mode_comparaison=MODE_COMPARAISON_AND): """Method to divide a track into multiple according to analytical feaures value Creates an AF with 0 if change of division, 1 otherwise""" # Gestion cas un seul AF if not isinstance(afs_input, list): afs_input = [afs_input] if not isinstance(thresholds_max, list): thresholds_max = [thresholds_max] track.createAnalyticalFeature(af_output) for i in range(track.size()): # On cumule les comparaisons pour chaque af_input comp = True for index, af_input in enumerate(afs_input): current_value = track.getObsAnalyticalFeature(af_input, i) # on compare uniquement si on peut if not utils.isnan(current_value): seuil_max = sys.float_info.max if thresholds_max != None and len(thresholds_max) >= index: seuil_max = thresholds_max[index] if mode_comparaison == MODE_COMPARAISON_AND: comp = comp and (current_value <= seuil_max) else: comp = comp or (current_value <= seuil_max) # On clot l'intervalle, on le marque a 1 if not comp: track.setObsAnalyticalFeature(af_output, i, 1) else: track.setObsAnalyticalFeature(af_output, i, 0)
def co_dominant(tarray): """TODO""" tarray = utils.listify(tarray) if len(tarray) <= 0: return utils.NAN # Dico : clé - nb occurence cles_count_dictionnary = {} # On alimente le dictionnaire for val in tarray: if val not in cles_count_dictionnary: cles_count_dictionnary[val] = 1 else: cles_count_dictionnary[val] += 1 # On cherche le plus fréquent i.e. celui qui a le max d'occurence nbocc = 0 dominant_value = "" for val in cles_count_dictionnary: if cles_count_dictionnary[val] > nbocc: nbocc = cles_count_dictionnary[val] dominant_value = val return dominant_value
def __getObs(self, track, obs, k, mode): return utils.unlistify(track.getObsAnalyticalFeatures(obs, k))
def writeToKml(track, path, type: Literal["LINE", "POINT"] = "LINE", af=None, c1=[0, 0, 1, 1], c2=[1, 0, 0, 1], name=False): """Transforms track/track collection/network into KML string :param path: file to write kml (kml returned in standard output if empty) :param type: "POINT" or "LINE" :param name: True -> label with point number (in GPS sequence) Str -> label with AF name (no name if AF value is empty or ".") :param af: AF used for coloring in POINT mode :param c1: color for min value (default blue) in POINT mode or color in "LINE" mode :param c2: color for max value (default red) in POINT mode """ # Track collection case if isinstance(track, TrackCollection): return KmlWriter.__writeCollectionToKml(track, path, c1) # Network case if isinstance(track, Network): return KmlWriter.__writeCollectionToKml(track.getAllEdgeGeoms(), path, c1) f = open(path, "w") clampToGround = True for obs in track: if obs.position.getZ() != 0: clampToGround = False break if not af is None: vmin = track.operate(Operator.Operator.MIN, af) vmax = track.operate(Operator.Operator.MAX, af) default_color = c1 if type not in ["LINE", "POINT"]: print("Error in KmlWriter: type '" + type + "' unknown") exit() if type == "LINE": f.write('<?xml version="1.0" encoding="UTF-8"?>\n') f.write('<kml xmlns="http://earth.google.com/kml/2.1">\n') f.write(" <Document>\n") f.write(" <Placemark>\n") f.write(" <name>Rover Track</name>\n") f.write(" <Style>\n") f.write(" <LineStyle>\n") f.write(" <color>" + utils.rgbToHex(default_color)[2:] + "</color>\n") f.write(" </LineStyle>\n") f.write(" </Style>\n") f.write(" <LineString>\n") f.write(" <coordinates>\n") for i in range(track.size()): f.write(" ") f.write("{:15.12f}".format(track.getObs(i).position.getX()) + ",") f.write("{:15.12f}".format(track.getObs(i).position.getY())) if not clampToGround: f.write( "," + "{:15.12f}".format(track.getObs(i).position.getZ())) f.write("\n") f.write(" </coordinates>\n") f.write(" </LineString>\n") f.write(" </Placemark>\n") f.write(" </Document>\n") f.write("</kml>\n") if type == "POINT": f.write('<?xml version="1.0" encoding="UTF-8"?>\n') f.write('<kml xmlns="http://earth.google.com/kml/2.1">\n') f.write(" <Document>\n") for i in range(track.size()): f.write(" <Placemark>") if name: if isinstance(name, str): naf = str(track.getObsAnalyticalFeature(name, i)).strip() if not (naf in ["", "."]): f.write(" <name>" + naf + "</name>") else: f.write(" <name>" + str(i) + "</name>") f.write(" <Style>") f.write(" <IconStyle>") if not af is None: v = track.getObsAnalyticalFeature(af, i) default_color = utils.interpColors(v, vmin, vmax, c1, c2) f.write(" <color>" + utils.rgbToHex(default_color)[2:] + "</color>") f.write(" <scale>0.3</scale>") f.write( " <Icon><href>http://maps.google.com/mapfiles/kml/pal2/icon18.png</href></Icon>" ) f.write(" </IconStyle>") f.write(" </Style>") f.write(" <Point>") f.write(" <coordinates>") f.write(" ") f.write("{:15.12f}".format(track.getObs(i).position.getX()) + ",") f.write("{:15.12f}".format(track.getObs(i).position.getY()) + ",") f.write("{:15.12f}".format(track.getObs(i).position.getZ())) f.write(" </coordinates>") f.write(" </Point>") f.write(" </Placemark>\n") f.write(" </Document>\n") f.write("</kml>\n") f.close() print("KML written in file [" + path + "]")
def __init__(self, constraints, combination=COMBINATION_AND): """TODO""" self.constraints = utils.listify(constraints) self.combination = combination
def plot(self, sym=["r-", "g-", "b-"]): """TODO""" sym = utils.listify(sym) for i in range(len(self)): self.constraints[i].plot(sym[i % len(sym)])
def __init__(self, grid, af_algos, aggregates, verbose=True): """TODO Example : af_algos = [algo.speed, algo.speed] cell_operators = [celloperator.co_avg, celloperator.co_max] """ if not isinstance(af_algos, list): af_algos = [af_algos] if not isinstance(aggregates, list): aggregates = [aggregates] if len(af_algos) == 0: print("Error: af_algos is empty") return 0 if len(af_algos) != len(aggregates): print("Error: af_names and aggregates must have the same number elements") return 0 self.color1 = (0, 0, 0) self.color2 = (255, 255, 255) # --------------------------------------------------------------------- # Tableau des noms self.af_names = [] # Utile pour stocker une seule fois l'af self.summarizeFields = {} # cle nom_algo#nom_agg for idx, af_algo in enumerate(af_algos): aggregate = aggregates[idx] if isinstance(af_algo, str): name = af_algo else: name = af_algo.__name__ cle = name + "#" + aggregate.__name__ if name not in self.af_names: self.af_names.append(name) if cle not in self.summarizeFields.keys(): self.summarizeFields[cle] = aggregate # --------------------------------------------------------------------- # On construit des cellules vides for i in range(grid.ncol): for j in range(grid.nrow): grid.grid[i][j] = {} for name in self.af_names: grid.grid[i][j][name] = [] self.bands = {} for cle in self.summarizeFields: self.bands[cle] = [] for i in range(grid.nrow): self.bands[cle].append([]) for j in range(grid.ncol): self.bands[cle][i].append(Raster.NO_DATA_VALUE) # --------------------------------------------------------------------- # On ajoute les valeurs des af dans les cellules for af_algo in af_algos: """ On dispatch les valeurs de l'AF dans les cellules. Avant on vérifie si l'AF existe, sinon on la calcule. """ if not isinstance(af_algo, str): af_name = af_algo.__name__ else: af_name = af_algo for trace in grid.collection.getTracks(): if not isinstance(af_algo, str): # On calcule l'AF si ce n'est pas fait trace.addAnalyticalFeature(af_algo) # On eparpille dans les cellules for i in range(trace.size()): obs = trace.getObs(i) (idx, idy) = grid.getCell(obs.position) column = math.floor(idx) line = math.floor(idy) # print (column, line) if ( 0 <= column and column < grid.ncol and 0 <= line and line < grid.nrow ): if not isinstance(af_algo, str): val = trace.getObsAnalyticalFeature(af_name, i) elif af_algo != "uid": val = trace.getObsAnalyticalFeature(af_algo, i) else: val = trace.uid # val = int(startpixel + (255 - startpixel) * (valmax - val) / valmax) grid.grid[column][line][af_name].append(val) # On calcule les agregats for cle in self.summarizeFields.keys(): for i in range(grid.nrow): for j in range(grid.ncol): ii = grid.nrow - 1 - i tabnames = cle.split("#") tarray = grid.grid[j][i][tabnames[0]] sumval = aggregate(tarray) if utils.isnan(sumval): self.bands[cle][ii][j] = 0 # elif valmax != None and val > valmax: else: self.bands[cle][ii][j] = sumval
def intersection(track1, track2, withTime=-1): if not (track1.getSRID() == track2.getSRID()): print("Error: tracks must have same SRID to compute intersections") exit() I = Track() TMP_I = [] TMP_J = [] TMP_TPS2 = [] for i in range(len(track1) - 1): x11 = track1[i].position.getX() y11 = track1[i].position.getY() x12 = track1[i + 1].position.getX() y12 = track1[i + 1].position.getY() seg1 = [x11, y11, x12, y12] for j in range(len(track2) - 1): x21 = track2[j].position.getX() y21 = track2[j].position.getY() x22 = track2[j + 1].position.getX() y22 = track2[j + 1].position.getY() seg2 = [x21, y21, x22, y22] if isSegmentIntersects(seg1, seg2): P1 = cartesienne(seg1) P2 = cartesienne(seg2) A = np.zeros((2, 2)) B = np.zeros((2, 1)) A[0, 0] = P1[0] A[0, 1] = P1[1] B[0, 0] = -P1[2] A[1, 0] = P2[0] A[1, 1] = P2[1] B[1, 0] = -P2[2] X = np.linalg.solve(A, B) x = X[0, 0] y = X[1, 0] p = Utils.makeCoords(x, y, 0, track1.getSRID()) # Linear interpolation on track 1 w1 = p.distance2DTo(track1[i].position) w2 = p.distance2DTo(track1[i + 1].position) p.setZ((w1 * track1[i + 1].position.getZ() + w2 * track1[i].position.getZ()) / (w1 + w2)) t1 = track1[i].timestamp.toAbsTime() t2 = track1[i].timestamp.toAbsTime() ta = (w1 * t2 + w2 * t1) / (w1 + w2) # Linear interpolation on track 2 w1 = p.distance2DTo(track2[j].position) w2 = p.distance2DTo(track2[j + 1].position) t1 = track2[i].timestamp.toAbsTime() t2 = track2[i].timestamp.toAbsTime() tb = (w1 * t2 + w2 * t1) / (w1 + w2) # Add intersection if (withTime == -1) or (abs(tb - ta) < withTime): I.addObs(Obs(p, GPSTime.readUnixTime(ta))) TMP_TPS2.append(GPSTime.readUnixTime(tb)) TMP_I.append(i) TMP_J.append(j) if I.size() > 0: I.createAnalyticalFeature("timestamp2", TMP_TPS2) I.createAnalyticalFeature("id1", TMP_I) I.createAnalyticalFeature("id2", TMP_J) return I
def noise(track, sigma=[1], kernel=[Kernel.DiracKernel()], distribution=DISTRIBUTION_NORMAL, mode='linear', force=False): """Track noising with Cholesky factorization of gaussian process covariance matrix: .. math:: h(x2-x1)=\\exp-\\left(\\frac{x2-x1}{scope}\\right)^2 If :math:`X` is a gaussian white noise, :math:`Cov(LX) = L^t*L` => if :math:`L` is a Cholesky factorization of a semi-postive-definite matrix :math:`S`, :math:`then Cov(LX) = L^T*L = S` and :math:`Y=LX`` has :math:`S` as covariance matrix. :param track: the track to be smoothed (input track is not modified) :param sigma: noise amplitude(s) (in observation coordinate units) :param kernel: noise autocovariance function(s) :param mode: 'linear' (default), 'circular' or 'euclidian' :param force: force definite-positive matrix with removal of negative eigen values""" sigma = utils.listify(sigma) kernel = utils.listify(kernel) if len(sigma) != len(kernel): sys.exit( "Error: amplitude and kernel arrays must have same size in 'noise' function" ) N = track.size() track.compute_abscurv() noised_track = track.copy() for n in range(len(sigma)): SIGMA_S = utils.makeCovarianceMatrixFromKernel(kernel[n], track, force=force, mode=mode) SIGMA_S += np.identity(N) * 1e-12 SIGMA_S *= sigma[n]**2 / SIGMA_S[0, 0] # Cholesky decomposition L = np.linalg.cholesky(SIGMA_S) # Noise simulation if distribution == DISTRIBUTION_NORMAL: Xx = np.random.normal(0.0, 1.0, N) Xy = np.random.normal(0.0, 1.0, N) Xz = np.random.normal(0.0, 1.0, N) if distribution == DISTRIBUTION_UNIFORM: Xx = np.random.uniform(-1.73205, 1.73205, N) Xy = np.random.uniform(-1.73205, 1.73205, N) Xz = np.random.uniform(-1.73205, 1.73205, N) if distribution == DISTRIBUTION_LAPLACE: Xx = np.random.laplace(0.0, 0.5, N) Xy = np.random.laplace(0.0, 0.5, N) Xz = np.random.laplace(0.0, 0.5, N) Yx = np.matmul(L, Xx) Yy = np.matmul(L, Xy) Yz = np.matmul(L, Xz) # Building noised track for i in range(N): pt = noised_track.getObs(i).position pt.setX(pt.getX() + Yx[i]) pt.setY(pt.getY() + Yy[i]) pt.setZ(pt.getZ() + Yz[i]) obs = Obs(pt, track.getObs(i).timestamp) if mode == 'circular': noised_track.loop() return noised_track
def __init__(self, selectors, combination=COMBINATION_AND): """TODO""" self.selectors = utils.listify(selectors) self.combination = combination