示例#1
0
文件: cpo.py 项目: TDRCH/plumetracker
    def update_centroid_speed(self):
        """
        Updates the centroid speed of the plume and its track. This must be
        run after updating the duration.
        :return:
        """

        # Only get a centroid speed if we have a previous centroid and we
        # haven't merged recently
        if self.duration < datetime.timedelta(hours=0.25):
            self.speed_centroid = np.nan
            self.track_speed_centroid.append(self.speed_centroid)
        #elif self.merged == True:
        #    if (self.dates_observed[-1] - self.merge_date) < \
        #           datetime.timedelta(hours=0.25):
        #        self.speed_centroid = np.nan
        #        self.track_speed_centroid.append(self.speed_centroid)

        else:
            centroid_distance = utilities.haversine(
                self.centroid_lon, self.centroid_lat,
                self.track_centroid_lon[-2], self.track_centroid_lat[-2])
            secs_since_previous = (self.dates_observed[-1] -
                                   self.dates_observed[-2]).seconds

            if secs_since_previous == 0:
                print 'WARNING: Found a timestep value of zero seconds at',
                self.dates_observed[-1]
                self.track_speed_centroid.append(np.nan)
            else:
                self.speed_centroid = (centroid_distance * 1000 /
                                       secs_since_previous)
                self.track_speed_centroid.append(self.speed_centroid)
示例#2
0
文件: cpo.py 项目: TDRCH/plumetracker
    def check_conv_distance(self, lats, lons, clouds):
        """
        Checks how far the plume is from a deep convection cloud.
        This should be done at the beginning of the plume life cycle to
        assess its association with deep convection.
        :param clouds:
        :return:
        """

        # Get clouds from the BT field rather than the cloud mask - then we
        # can also use the BT field to get the 'pinkness' value of the plume

        edge_cloud_bool = feature.canny(clouds, sigma=0.2)
        cloud_lats = lats[edge_cloud_bool == 1]
        cloud_lons = lons[edge_cloud_bool == 1]
        centroid_lat = self.centroid_lat
        centroid_lon = self.centroid_lon
        cloud_latlons_array = np.dstack((cloud_lats, cloud_lons))[0]

        distance, index = spatial.cKDTree(cloud_latlons_array).query(
            [centroid_lat, centroid_lon])

        nearest_coord = cloud_latlons_array[index]
        self.conv_distance = utilities.haversine(centroid_lon, centroid_lat,
                                                 nearest_coord[1],
                                                 nearest_coord[0])
示例#3
0
 def closest_stops(self, max_dist=0.2):
     """
     Make a new dictionary stop_neighbors.
     key = stop_id, 
     val = pandas Series of stop distances for stops closer than max_dist
             (indexed by stopid)
     
     Takes a while to run...
     """
     self.stop_neighbors = {}
           
     for row in self.stops.iterrows():
         stop_id = row[0]
         stop_info = row[1]  
         # calculate distance between this stop and every other stop
         dists = self.stops.apply(lambda x: ut.haversine( (x['stop_lon'], \
                             x['stop_lat']), (stop_info['stop_lon'],\
                             stop_info['stop_lat'])), axis=1)
         # only keep the stops within max_dist
         dists = dists[dists <= max_dist]
     
         # don't connect the stop to itself, duh
         dists = dists.drop(stop_id)
         
         self.stop_neighbors[stop_id] = dists
示例#4
0
文件: cpo.py 项目: TDRCH/plumetracker
    def update_GPE_speed(self):
        """
        Updates the greatest plume extent speed of the plume at the end of
        its lifecycle
        :return:
        """

        # Distance of each pixel in the maximum extent from the source region
        plume_distances = [
            utilities.haversine(self.maximum_extent_lons[j],
                                self.maximum_extent_lats[j],
                                self.track_centroid_lon[0],
                                self.track_centroid_lat[0])
            for j in np.arange(0, len(self.maximum_extent_lons))
        ]

        greatest_plume_distance = np.max(plume_distances)

        date_of_max_extent = self.maximum_extent_time
        secs_to_max_extent = (date_of_max_extent -
                              self.dates_observed[0]).seconds
        if secs_to_max_extent == 0:
            self.speed_gpe = np.nan
        else:
            self.speed_gpe = (greatest_plume_distance *
                              1000) / secs_to_max_extent
示例#5
0
 def get_closest_stop(self, lat, lon):
     """
     INPUT: latitude, longitude
     OUTPUT: stop_id of closest stop to lat&lon
     """
     dist = self.stops.apply(lambda x: \
             ut.haversine((x['stop_lon'],x['stop_lat']), (lon, lat)),\
             axis=1)
     return dist.argmin()
示例#6
0
文件: mappers.py 项目: essoen/TDT4305
def calculate_distance(o):
    checkins = sorted(o[1], key=lambda x: datetime.strptime(str(x['local_time']), "%Y-%m-%d %H:%M:%S"))
    distance = 0.0
    for i in range(len(checkins) - 1):
        c_1 = checkins[i]
        c_2 = checkins[i + 1]
        distance += (haversine( float(c_1['lat']), float(c_1['lon']), 
            float(c_2['lat']), float(c_2['lon']) ))

    return (o[0], o[1], distance)
示例#7
0
文件: cpo.py 项目: TDRCH/plumetracker
    def update_most_likely_source(self):
        """
        Identifies the source to which the plume was nearest at the point of
        emission. Uses sources defined by AW13. Only assigns sources to
        plumes within the CWS region. Plumes with a source not within 50km
        of a source are not assigned any most likely source. Note: a better
        approach to this would be to assign box regions for each source.
        :return:
        """

        # Hard coded emission sources taken from AW13
        source_dict = {
            'A': (26, -6.5),
            'B': (30, -6),
            'C': (23, -5),
            'D': (24.5, -5),
            'E': (21, 1.75),
            'F': (34, 0),
            'G': (20, 0.5),
            'H': (21.5, 1),
            'I': (26.5, 1.5),
            'J': (22.5, 2),
            'K': (24, 3),
            'L': (19.5, 3),
            'M': (21, 3),
            'N': (20, 5),
            'O': (20, 7.5)
        }

        CWS_min_lat = 15
        CWS_max_lat = 35
        CWS_min_lon = -17
        CWS_max_lon = 15

        #if self.merged == True:
        #    source_lat = self.pre_merge_track_centroid_lat[0]
        #    source_lon = self.pre_merge_track_centroid_lon[0]
        #else:
        source_lat = self.track_centroid_lat[0]
        source_lon = self.track_centroid_lon[0]

        # Find the distance of the detected source to known source regions
        source_distances = np.asarray([
            utilities.haversine(source_lon, source_lat, source_dict[j][1],
                                source_dict[j][0]) for j in source_dict
        ])

        dict_indices = np.asarray([j for j in source_dict])

        if np.min(source_distances) > 50:
            self.most_likely_source = None
        else:
            smallest_distance = source_distances == np.min(source_distances)
            self.most_likely_source = dict_indices[smallest_distance][0]
示例#8
0
文件: cpo.py 项目: TDRCH/plumetracker
    def check_conv_distance_2(self, sdf_plumes, lats, lons, clouds):
        """
        Checks how far the plume is from a deep convection cloud.
        This should be done at the beginning of the plume life cycle to
        assess its association with deep convection.
        Attempt #2: I draw a buffer around the plume, iteratively increasing it
        :param self:
        :param sdf_plumes:
        :param lats:
        :param lons:
        :param clouds:
        :return:
        """

        found_convection = False
        buffered_plume = sdf_plumes == self.plume_id

        while found_convection == False:
            # Create a 2-deep buffer around the plume
            print 'here'
            convolution = scipy.signal.convolve2d(buffered_plume,
                                                  np.ones((10, 10)),
                                                  mode='same')
            check_grid = convolution > 0
            print 'here 1'
            # Get only the edges of the clouds
            edge_cloud_bool = feature.canny(clouds, sigma=0.2)
            cloud_lats = lats[edge_cloud_bool == 1]
            cloud_lons = lons[edge_cloud_bool == 1]
            cloud_latlons_array = np.dstack((cloud_lats, cloud_lons))[0]
            print 'here 2'
            # Check if any cloud lats/lons are within the buffer
            check_lats = lats[check_grid]
            check_lons = lons[check_grid]
            check_edge_cloud_bool = edge_cloud_bool[check_grid]
            check_edge_cloud_lats = check_lats[check_edge_cloud_bool]
            check_edge_cloud_lons = check_lons[check_edge_cloud_bool]
            print 'here 3'
            # If cloud has been found, stop the search
            if len(check_edge_cloud_lats) > 0:
                found_convection = True
            print 'here 4'
            # Otherwise we build a larger buffer
            buffered_plume = convolution

        smallest_distance = \
            np.min([utilities.haversine(self.centroid_lon,
                                        self.centroid_lat,
                                        check_edge_cloud_lons[j],
                                        check_edge_cloud_lats[j])
                    for j in np.arange(0, len(check_edge_cloud_lons))])

        print smallest_distance
示例#9
0
文件: mappers.py 项目: essoen/TDT4305
def find_nearest_city_and_country(o, countries):
    lat1, lon1 = float(o['lat']), float(o['lon'])

    start = 10000000000.0
    country, city = '', ''
    for c in countries:
        dist = haversine(lat1, lon1, float(c[1]), float(c[2]))
        if dist < start:
            country = c[4]
            city = c[0]
            start = dist

    o['city'] = city
    o['country'] = country
    return o
示例#10
0
 def calc_distance(self, a, b):
     return haversine((float(a[1]), float(a[0])),
                      (float(b[1]), float(b[0])))
示例#11
0
文件: cpo.py 项目: TDRCH/plumetracker
    def update_leading_edge_4(self, sdf_plumes, lons, lats):
        """
        All those previous version were joke versions - this is the true
        version, the only true version
        :param sdf_plumes:
        :return:
        """

        plume_bool = sdf_plumes == self.plume_id
        edge_plume_bool = feature.canny(plume_bool, sigma=0.2)

        #print np.unique(plume_bool)
        #print np.unique(edge_plume_bool)

        edge_lons = lons[edge_plume_bool]
        edge_lats = lats[edge_plume_bool]

        # No leading edge if the duration of the plume is too low
        if self.duration < datetime.timedelta(hours=0.75):
            self.leading_edge_lon = None
            self.leading_edge_lat = None
            self.track_edges_lon.append(edge_lons)
            self.track_edges_lat.append(edge_lats)

        # Same applies for duration since a merge
        #if self.merged == True:
        #    duration_since_merge = self.dates_observed[-1] - self.merge_date
        #    if duration_since_merge < datetime.timedelta(hours=0.75):
        #        self.leading_edge_lon = None
        #        self.leading_edge_lat = None
        #        self.track_edges_lon.append(edge_lons)
        #        self.track_edges_lat.append(edge_lats)

        else:
            previous_edge_lons = self.track_edges_lon[-1]
            previous_edge_lats = self.track_edges_lat[-1]

            # We now need the edge with the greatest distance from its
            # nearest neighbour

            # So for each edge pixel, calculate the distance to all previous
            # edge pixels
            edge_distances = []

            for i in np.arange(0, len(edge_lats)):
                distances = []
                for j in np.arange(0, len(previous_edge_lats)):
                    distances.append(
                        utilities.haversine(edge_lons[i], edge_lats[i],
                                            previous_edge_lons[j],
                                            previous_edge_lats[j]))
                edge_distances.append(np.min(distances))

            #print np.where(edge_distances == np.max(edge_distances))[0]
            greatest_edge_distance_indices = np.where(
                edge_distances == np.max(edge_distances))[0]

            # Take the 5 largest distances as the leading edge
            sorted_distances = np.asarray(deepcopy(edge_distances))
            sorted_distance_indices = sorted_distances.argsort()[-5:][::-1]

            self.leading_edge_lon = [
                edge_lons[j] for j in sorted_distance_indices
            ]

            self.leading_edge_lat = [
                edge_lats[j] for j in sorted_distance_indices
            ]

            self.track_edges_lon.append(edge_lons)
            self.track_edges_lat.append(edge_lats)

            # Remove edges from the track which are older than 2 timesteps
            if len(self.track_edges_lon) > 3:
                del self.track_edges_lon[0]

            if len(self.track_edges_lat) > 3:
                del self.track_edges_lat[0]
示例#12
0
    ]))
iexp = mat[:, 8]
lat = mat[:, 1]
lon = mat[:, 2]
depth = mat[:, 3]

MomentTensors = [
    utilities.MomentTensor(mts_6[i, :], iexp[i], lon[i], lat[i], depth[i])
    for i in range(len(mts_6))
]

max_distance = 200.
initial_point = (-150.96, 55, 10)

closest_tensor = MomentTensors[np.argmin(
    [utilities.haversine(initial_point, MT.pos) for MT in MomentTensors])]
print(closest_tensor.pos)

accepted_tensors = [closest_tensor]
rejected_tensors = []

# while len(accepted_tensors) + len(rejected_tensors) < len(MomentTensors):
for _ in range(10):
    testing_tensors = []
    for MT in MomentTensors:
        if MT in rejected_tensors or MT in accepted_tensors:
            continue
        for MT_accepted in accepted_tensors:
            if utilities.haversine(MT_accepted.pos,
                                   MT.pos) < max_distance and MT.depth < 25:
                testing_tensors.append(MT)
示例#13
0
    target = watches.ix[i, "TO"]

    #if the airports are in the smallest enclosing circle
    #if this route exists as an empty leg append that routes demand
    if source in supplyGraph and target in supplyGraph:

        long1 = airportCoords[source]['long']
        lat1 = airportCoords[source]['lat']
        long2 = airportCoords[target]['long']
        lat2 = airportCoords[target]['lat']

        midpointLong = ((long1 + long2) / 2.0)
        midpointLat = ((lat1 + lat2) / 2.0)

        #get the Haversine distance to the centroid of the empty leg points:
        d = util.haversine(midpointLong, midpointLat, meanLongSupply,
                           meanLatSupply)
        distances.append((source, target, {'dist': d}))

distances.sort(key=lambda tup: tup[2])  #sorts in place

demandGraph = nx.MultiDiGraph()
demandGraph.add_edges_from(distances)

completedTrips = 0
totCost = 0
completed = []

#---------------------------------------------------------------
#go through the edges in a sorted order and take care of the demand that matches empty legs
edges = sorted(demandGraph.edges(data=True),
               key=lambda edge: edge[2]['dist'],
	
	#if the airports are in the smallest enclosing circle
	#if this route exists as an empty leg append that routes demand
	if source in  supplyGraph and target in supplyGraph:
	
		
		long1 = airportCoords[source]['long']
		lat1 = airportCoords[source]['lat']
		long2 = airportCoords[target]['long']
		lat2 = airportCoords[target]['lat']
		
		midpointLong = ((long1+long2)/2.0)
		midpointLat = ((lat1+lat2)/2.0)
	
		#get the Haversine distance to the centroid of the empty leg points:
		d = util.haversine(midpointLong,midpointLat,meanLongSupply,meanLatSupply)
		distances.append((source,target,{'dist':d}))
		
			
distances.sort(key=lambda tup: tup[2])  #sorts in place

demandGraph = nx.MultiDiGraph()
demandGraph.add_edges_from(distances)

completedTrips = 0
totCost = 0
completed = []

#---------------------------------------------------------------
#go through the edges in a sorted order and take care of the demand that matches empty legs
edges = sorted(demandGraph.edges(data=True),key=lambda edge:edge[2]['dist'],reverse=False)