Пример #1
0
    def is_between_candidate(
            anc_a_points: tuple, anc_b_points: tuple, target_points: tuple,
            occ_thresh: float, min_forbidden_occ_ratio: float,
            target_anchor_intersect_ratio_thresh: float) -> (bool, bool):
        """
        Check whether a target object lies in the convex hull of the two anchors.
        @param anc_a_points: The vertices of the first anchor's 2d top face.
        @param anc_b_points: The vertices of the second anchor's 2d top face.
        @param target_points: The vertices of the target's 2d top face.
        @param occ_thresh: By considering the target intersection ratio with the convexhull of the two anchor,
        which is calculated by dividing the target intersection area to the target's area, if the ratio is
        bigger than the occ_thresh, then we consider this target is between the two anchors.
        @param min_forbidden_occ_ratio: used to create a range of intersection area ratios wherever any target
        object occupies the convexhull with a ratio within this range, we consider this case is ambiguous and we
        ignore generating between references with such combination of possible targets and those two anchors
        @param target_anchor_intersect_ratio_thresh: The max allowed target-to-anchor intersection ratio, if the target
        is intersecting with any of the anchors with a ratio above this thresh, we should ignore generating between
        references for such combinations

        @return: (bool, bool) --> (target_lies_in_convex_hull_statisfying_constraints, bad_target_anchor_combination)
        """
        bad_comb = False
        forbidden_occ_range = [min_forbidden_occ_ratio, occ_thresh - 0.001]
        intersect_ratio_thresh = target_anchor_intersect_ratio_thresh

        # Get the convex hull of all points of the two anchors
        convex_hull = MultiPoint(anc_a_points + anc_b_points).convex_hull

        # Get anchor a, b polygons
        polygon_a = MultiPoint(anc_a_points).convex_hull
        polygon_b = MultiPoint(anc_b_points).convex_hull
        polygon_t = MultiPoint(target_points).convex_hull

        # Candidate should fall completely/with a certain ratio in the convex_hull polygon
        occ_ratio = convex_hull.intersection(polygon_t).area / polygon_t.area
        if occ_ratio < occ_thresh:  # The object is not in the convex-hull enough to be considered between
            if forbidden_occ_range[0] < occ_ratio < forbidden_occ_range[1]:
                # but also should not be causing any ambiguities for other candidate targets
                bad_comb = True
            return False, bad_comb

        # Candidate target should never be intersecting any of the anchors
        if polygon_t.intersection(
                polygon_a).area / polygon_t.area > intersect_ratio_thresh:
            bad_comb = True
            return False, bad_comb

        if polygon_t.intersection(
                polygon_b).area / polygon_t.area > intersect_ratio_thresh:
            bad_comb = True
            return False, bad_comb

        return True, bad_comb
Пример #2
0
    def _calculate_shading(weight: float,
                           shadows: list,
                           site_points: MultiPoint,
                           heat_map: np.ndarray,
                           gridcell_width: float,
                           gridcell_height: float,
                           normalize_by_area=False) -> None:
        """
        Update the heat_map with shading losses in POA irradiance
        :param weight: loss to apply to shaded cells
        :param shadows: list of shadow (Multi)Polygons for each blade angle
        :param site_points: points of solar panels
        :param heat_map: array with shading losses
        :param gridcell_width: width of cells in the heat map
        :param gridcell_height: height of cells in the heat map
        :param normalize_by_area: if True, normalize weight per cell by how much area is shaded
        """
        if not shadows:
            return

        module_width_half = gridcell_width / 2
        module_height_half = gridcell_height / 2
        for shadow in shadows:
            if normalize_by_area:
                intersecting_points = site_points.intersection(
                    shadow.buffer(
                        np.linalg.norm([gridcell_height, gridcell_width])))
            else:
                intersecting_points = site_points.intersection(shadow)
            if intersecting_points:
                if isinstance(intersecting_points, Point):
                    intersecting_points = (intersecting_points, )
                # break up into separate instructions for minor speed up by vectorization
                xs = np.array([pt.x for pt in intersecting_points])
                ys = np.array([pt.y for pt in intersecting_points])
                x_ind = (xs - site_points.bounds[0]) / gridcell_width
                y_ind = (ys - site_points.bounds[1]) / gridcell_height
                x_ind = np.round(x_ind).astype(int)
                y_ind = np.round(y_ind).astype(int)
                for n in range(len(intersecting_points)):
                    x = x_ind[n]
                    y = y_ind[n]
                    pt = intersecting_points[n]
                    if normalize_by_area:
                        cell = box(pt.x - module_width_half,
                                   pt.y - module_height_half,
                                   pt.x + module_width_half,
                                   pt.y + module_height_half)
                        intersection = cell.intersection(shadow)
                        area_weight = intersection.area / cell.area
                        heat_map[y, x] += weight * area_weight
                    else:
                        heat_map[y, x] += weight
Пример #3
0
def intersect(poly_1,poly_2):
  poly_1 = np.array(poly_1)
  poly_1 = MultiPoint(poly_1).convex_hull
  poly_2 = np.array(poly_2)
  poly_2 = MultiPoint(poly_2).convex_hull
  intersect_area = poly_2.intersection(poly_1).area
  return intersect_area
Пример #4
0
def project_camera( corners, cam_model ):
    """ -------------------------------------------------------------------------------------------------------------
    Project a 3D bounding box into camera (image) space (the origin of the camera space is top left corner).

    This function is taken from post_process_coords() in
    nuscenes-devkit/python-sdk/nuscenes/scripts/export_2d_annotations_as_json.py
    
    corners:        [np.array] the 8 corners of the 3D box
    cam_model:      ???

    return:         [tuple of int] ??? INT OR FLOAT
                    coordinates of the 2D box, None if the object is outside the camera frame
    ------------------------------------------------------------------------------------------------------------- """
    front       = np.argwhere( corners[ 2, :] > 0 )         # check which corners are in front of the camera
    corners     = corners[ :, front.flatten() ]             # and take those only

    corners_p   = view_points( corners, cam_model, True )   # project the 3D corners in camera space
    corners_p   = corners_p.T[ :, : 2 ]                     # take only X and Y in camera space

    poly        = MultiPoint( corners_p.tolist() ).convex_hull
    img         = box( 0, 0, img_size[ 0 ], img_size[ 1 ] )
    if not poly.intersects( img ):
        return None                                         # return None if the projection is out of the camera frame

    inters  = poly.intersection( img )
    coords  = np.array( [ c for c in inters.exterior.coords ] )
    min_x   = min( coords[ :, 0 ] )
    min_y   = min( coords[ :, 1 ] )
    max_x   = max( coords[ :, 0 ] )
    max_y   = max( coords[ :, 1 ] )

    return min_x, min_y, max_x, max_y
Пример #5
0
 def _calculate_shading(weight: float, shadows: list,
                        site_points: MultiPoint,
                        heat_map: np.ndarray) -> None:
     """
     Update the heat_map with shading losses in POA irradiance
     :param weight: loss to apply to shaded cells
     :param shadows: list of shadow (Multi)Polygons for each blade angle
     :param site_points: points of solar panels
     :param heat_map: array with shading losses
     """
     if not shadows:
         return
     for shadow in shadows:
         intersecting_points = site_points.intersection(shadow)
         if intersecting_points:
             if isinstance(intersecting_points, Point):
                 intersecting_points = (intersecting_points, )
             # break up into separate instructions for minor speed up by vectorization
             xs = np.array([pt.x for pt in intersecting_points])
             ys = np.array([pt.y for pt in intersecting_points])
             x_ind = (xs - site_points.bounds[0]) / module_width
             y_ind = (ys - site_points.bounds[1]) / module_height
             x_ind = np.round(x_ind).astype(int)
             y_ind = np.round(y_ind).astype(int)
             for x, y in zip(x_ind, y_ind):
                 heat_map[y, x] += weight
Пример #6
0
def sample_geoseries(geoseries: gpd.GeoSeries,
                     count: int,
                     overestimate: float = 2) -> gpd.GeoSeries:
    """Creates random geographic point samples inside a polygon/multipolygon

    Args:
        geoseries: geometry dataset (e.g. `gdf['geometry']`) with polygons/multipolygons
        count: number of samples to generate
        overestimate: scaler to generate extra samples
            to toss points outside of the polygon/inside it's bounds

    Returns:
        points: Point geometry geoseries
    """
    if type(geoseries) is gpd.GeoDataFrame:
        geoseries = geoseries.geometry

    polygon = geoseries.unary_union
    xmin, ymin, xmax, ymax = polygon.bounds
    ratio = polygon.area / polygon.envelope.area

    samples = np.random.uniform((xmin, ymin), (xmax, ymax),
                                (int(count / ratio * overestimate), 2))
    multipoint = MultiPoint(samples)
    multipoint = multipoint.intersection(polygon)
    sample_array = np.zeros((len(multipoint.geoms), 2))
    for idx, point in enumerate(multipoint.geoms):
        sample_array[idx] = (point.x, point.y)

    xy = sample_array[np.random.choice(len(sample_array), count)]
    points = xy_to_geoseries(xy[:, 0], xy[:, 1], crs=geoseries.crs)

    return points
Пример #7
0
def post_process_coords(
    corner_coords: List, imsize: Tuple[int, int] = (1600, 900)
) -> Union[Tuple[float, float, float, float], None]:
    """
    Get the intersection of the convex hull of the reprojected bbox corners and the image canvas, return None if no
    intersection.
    :param corner_coords: Corner coordinates of reprojected bounding box.
    :param imsize: Size of the image canvas.
    :return: Intersection of the convex hull of the 2D box corners and the image canvas.
    """
    polygon_from_2d_box = MultiPoint(corner_coords).convex_hull
    img_canvas = box(0, 0, imsize[0], imsize[1])

    if polygon_from_2d_box.intersects(img_canvas):
        img_intersection = polygon_from_2d_box.intersection(img_canvas)
        intersection_coords = np.array(
            [coord for coord in img_intersection.exterior.coords])

        min_x = min(intersection_coords[:, 0])
        min_y = min(intersection_coords[:, 1])
        max_x = max(intersection_coords[:, 0])
        max_y = max(intersection_coords[:, 1])

        return min_x, min_y, max_x, max_y
    else:
        return None
Пример #8
0
def CalculateSafeArea(points, f):
    k = len(points) - f
    area = MultiPoint(points).convex_hull
    for subset in combinations(points, k):
        #print subset
        subarea = MultiPoint(subset).convex_hull
        area = area.intersection(subarea)
    return area
Пример #9
0
def create_map(n=5e-6):
    ocean = gpd.read_file(
        'C://Users//potis//Desktop//b//ne_10m_ocean_scale_rank.shp')
    ocean = ocean.to_crs('ESRI:54030')
    result = []
    for i in range(len(ocean)):
        xmin, ymin, xmax, ymax = ocean.bounds.loc[i].T.values
        x = np.arange(np.floor(xmin * n) / n, np.ceil(xmax * n) / n, 1 / n)
        y = np.arange(np.floor(ymin * n) / n, np.ceil(ymax * n) / n, 1 / n)

        grid = np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))])
        points = MultiPoint(grid)

        if ocean.geometry[i].is_valid:
            result.append(points.intersection(ocean.geometry.loc[i]))
        else:
            result.append(points.intersection(ocean.geometry.loc[i].buffer(0)))
        results = [j for i in result for j in i]
    return gpd.GeoDataFrame(results, columns=['geometry'], crs=ocean.crs)
    def _anno_to_2d_bbox(self, anno, pc_file, cam_front, lidar_top,
                         ego_pose_cam, ego_pose_lidar, cam_intrinsic):
        # Make pixel indexes 0-based
        dists = []
        nusc_box = self.nusc.get_box(anno['token'])

        # Move them to the ego-pose frame.
        nusc_box.translate(-np.array(ego_pose_cam['translation']))
        nusc_box.rotate(Quaternion(ego_pose_cam['rotation']).inverse)

        # Move them to the calibrated sensor frame.
        nusc_box.translate(-np.array(cam_front['translation']))
        nusc_box.rotate(Quaternion(cam_front['rotation']).inverse)

        dists.append(np.linalg.norm(nusc_box.center))
        # Filter out the corners that are not in front of the calibrated sensor.
        #Corners is a 3x8 matrix, first four corners are the ones facing forward, last 4 are ons facing backward
        #(0,1) top, forward
        #(2,3) bottom, forward
        #(4,5) top, backward
        #(6,7) bottom, backward
        corners_3d = nusc_box.corners()
        #Getting first 4 values of Z
        dists.append(np.mean(corners_3d[2, :4]))
        # z is height of object for ego pose or lidar
        # y is height of object for camera frame
        #TODO: Discover why this is taking the Z axis
        in_front = np.argwhere(corners_3d[2, :] > 0).flatten()
        corners_3d = corners_3d[:, in_front]
        #print(corners_3d)
        #above    = np.argwhere(corners_3d[2, :] > 0).flatten()
        #corners_3d = corners_3d[:, above]
        # Project 3d box to 2d.
        corner_coords = view_points(corners_3d, cam_intrinsic,
                                    True).T[:, :2].tolist()
        #print(corner_coords)
        # Keep only corners that fall within the image.

        polygon_from_2d_box = MultiPoint(corner_coords).convex_hull
        img_canvas = box(0, 0, self._imwidth - 1, self._imheight - 1)

        if polygon_from_2d_box.intersects(img_canvas):
            img_intersection = polygon_from_2d_box.intersection(img_canvas)
            intersection_coords = np.array(
                [coord for coord in img_intersection.exterior.coords])

            min_x = min(intersection_coords[:, 0])
            min_y = min(intersection_coords[:, 1])
            max_x = max(intersection_coords[:, 0])
            max_y = max(intersection_coords[:, 1])
            #print('contained pts {}'.format(contained_points))
            return [min_x, min_y, max_x, max_y], dists
        else:
            return None, dists
Пример #11
0
def _sample_points_(cnt,
                    n_points=None,
                    spacing=None,
                    mode='uniform_random',
                    random_seed=None):
    '''sample point within an oblong contour with shapely
    [deprecated]
    '''
    if (n_points is None) and (spacing is None):
        raise ValueError('either `spacing` or `n_points` must be specified')

    if isinstance(cnt, Polygon):
        pg = cnt
    else:
        pg = Polygon(cnt)

    center, sz, angle_ = cv2.minAreaRect(np.asarray(pg.boundary, dtype=int))
    w, h = sz

    if n_points is None:
        n_points = h * w / spacing**2

    pg_rot = rotate(pg, -angle_)
    x0, y0, x1, y1 = pg_rot.bounds

    # calculate area ratio and increment number of sampled points accordingly
    area_ratio = pg_rot.area / Polygon([(x0, y0), (x0, y1), (x1, y1),
                                        (x1, y0)]).area
    n_points = int(np.ceil(n_points / area_ratio))

    if mode == 'grid':
        points_rot = sample_grid(w,
                                 h,
                                 n_points=n_points,
                                 spacing=spacing,
                                 angle=angle_)
        points_rot = points_rot + np.r_[x0, y0]
    elif mode == 'rotated_grid':
        points_rot = sample_grid(w, h, n_points=n_points, spacing=spacing)
        points_rot = points_rot + np.r_[x0, y0]
    elif mode == 'uniform_random':
        np.random.seed(random_seed)
        points_rot = np.random.rand(
            n_points,
            2,
        ) * np.r_[w, h] + np.r_[x0, y0]
    else:
        raise ValueError('unknown mode:%s' % mode)

    points_rot = MultiPoint(asMultiPoint(points_rot))
    points_rot = points_rot.intersection(pg_rot)
    points = rotate(points_rot, angle_)
    return points
Пример #12
0
def generate_sample_points(sample_area, spacing):
    sample_bound = sample_area.bounds
    x = np.arange(sample_bound[0] - spacing, sample_bound[2] + spacing,
                  spacing)
    y = np.arange(sample_bound[1] - spacing, sample_bound[3] + spacing,
                  spacing)
    pts = np.dstack(np.meshgrid(x, y)).reshape(-1, 2)
    pts = np.hstack((pts, np.zeros((pts.shape[0], 1))))
    pts = MultiPoint(pts)
    pts = pts.intersection(sample_area)
    pts = np.array(mapping(pts)['coordinates'])
    return pts
    def _identical_systemic_reward(self, world):
        ''' reward all agents the same for perimeter forming around reward function
        Notes:
         - computes surface integral over surface defined by convex hull of agents
        '''
        assert self.identical_rewards == True
        assert world.identical_rewards == True

        # reward function is designed for a single landmark case
        assert len(world.landmarks) == 1
        assert len(self.scenario_landmarks) == 1

        # get convex hull created by agents
        landmark_circle = Point(world.landmarks[0].state.p_pos).buffer(
            world.landmarks[0].size)
        agent_positions = [ag.state.p_pos for ag in world.agents]
        agent_hull = MultiPoint(agent_positions).convex_hull
        if agent_hull.geom_type == 'LineString':
            # if hull is degenerate (e.g. all points collinear), then set integral to zero
            return [0.0] * self.num_agents

        assert agent_hull.geom_type == 'Polygon', "Unexpected geometry type for convex hull: {}".format(
            agent_hull.geom_type)
        assert agent_hull.is_valid, "Invalid Polygon for convex hull"
        assert landmark_circle.is_valid, "Invalid Polygon for convex hull"

        # find intersection of convex hull and landmark
        landmark_coverage = agent_hull.intersection(landmark_circle)

        # compute reward
        A = agent_hull.area
        B = landmark_circle.area
        AB = landmark_coverage.area
        w_p = _PRECISION_WEIGHT
        assert w_p >= 0.0
        w_c = _COVERAGE_WEIGHT
        assert w_c >= 0.0
        reward_signal = w_p * AB / A - w_c * (B - AB) / B
        assert (reward_signal >= -w_c and reward_signal <= w_p)

        # normalize by number of agents
        reward_signal /= float(self.num_agents)

        # assign to all agents
        reward_n = [reward_signal] * self.num_agents

        return reward_n
Пример #14
0
def grid_points(shape):
    bds = shape.bounds
    i = 24

    lon_min = (bds[0] * i) // 1 / i
    lon_max = (bds[2] * i + 1) // 1 / i
    lat_min = (bds[1] * i) // 1 / i
    lat_max = (bds[3] * i + 1) // 1 / i

    grid = []

    for lon in np.arange(lon_min, lon_max, 1 / i):
        for lat in np.arange(lat_min, lat_max, 1 / i):
            grid.append(Point(lon, lat))
    grid = MultiPoint(grid)

    point_list = list(grid.intersection(shape))

    return point_list
Пример #15
0
    def intersect_polygon(self):  # Returns coordinates
        """
        Creates grid with dimensions contained in self.gridsize;
        Creates an intersection with the polygon;
        
        Returned value: set of coordinates detailing only the grid
        points which were overlaid on the polygon
        """
        # Calculate bounds of the grid around the polygon
        x_min, y_min, x_max, y_max = self.polygon.bounds
        x_min = round_to_multiple(x_min, self.x_spacing)
        x_max = round_to_multiple(x_max, self.x_spacing)
        y_min = round_to_multiple(y_min, self.y_spacing)
        y_max = round_to_multiple(y_max, self.y_spacing)

        # Initialise large grid of points spaced using x & y spacing
        # This is to "quantise" our grid
        x_grid = np.linspace(x_min, x_max,
                             int((x_max - x_min) / self.x_spacing) + 2)
        y_grid = np.linspace(y_min, y_max,
                             int((y_max - y_min) / self.y_spacing) + 1)
        bounds_grid = np.transpose(
            [np.tile(x_grid, len(y_grid)),
             np.repeat(y_grid, len(x_grid))])
        points = MultiPoint(bounds_grid)

        # Shapely intersect grid with polygon and store lattice coordinates in "lattice"
        # (Shapely isn't well documented enough and there might have been a better way
        # to do this)
        result = points.intersection(self.polygon)
        lattice = np.array(result.__geo_interface__["coordinates"])

        # Normalise values to integer values, e.g. 0.34 -> 1
        lattice[:, 0] /= self.x_spacing
        lattice[:, 1] /= self.y_spacing
        lattice = np.around(lattice).astype(int)

        # Move grid to begin at [0,0,(0)]
        x_min = lattice[:, 0].min()
        y_min = lattice[:, 1].min()
        lattice[:, 0] -= x_min
        lattice[:, 1] -= y_min
        return lattice
Пример #16
0
def compute_intersection_area(
    A_east_min_vec,
    A_east_max_vec,
    A_north_min_vec,
    A_north_max_vec,
    B_east_min,
    B_east_max,
    B_north_min,
    B_north_max,
):

    B_coords = tuple(
        zip(
            [B_north_min, B_north_min, B_north_max, B_north_max],
            [B_east_min, B_east_max, B_east_min, B_east_max],
        ))
    B_polygon_obj = MultiPoint(B_coords).convex_hull

    num_areas = A_east_min_vec.shape[0]
    intersection_areas = np.zeros(num_areas)
    for area_idx in np.arange(num_areas):
        A_coords = tuple(
            zip(
                [
                    A_north_min_vec[area_idx],
                    A_north_min_vec[area_idx],
                    A_north_max_vec[area_idx],
                    A_north_max_vec[area_idx],
                ],
                [
                    A_east_min_vec[area_idx],
                    A_east_max_vec[area_idx],
                    A_east_min_vec[area_idx],
                    A_east_max_vec[area_idx],
                ],
            ))
        A_polygon_obj = MultiPoint(A_coords).convex_hull

        intersection_areas[area_idx] = B_polygon_obj.intersection(
            A_polygon_obj).area

    return intersection_areas
Пример #17
0
 def getZones(tpolygon,d):
   ozones=[]
   zones=[]
   for unique in udensity:
     mps=MultiPoint(xy[density==unique]).buffer(d)
     _d = DF.getD_l(unique,minGrowth,d)
     _d = np.maximum(minDensity,_d) 
     ozone=mps.intersection(tpolygon)
     
     zone=ozone.buffer(-_d*0.2).buffer(_d*0.2).removeHoles(cArea(_d*0.2)).simplify(_d*0.01)
     
     if not zone.is_empty:
       ozones.append(zone.getExterior().union(mps.buffer(-_d*0.2)))
     #  ozones.append(mps)
       zones.append(zone)
   
   ozones=cascaded_union(ozones)
   zones=cascaded_union(zones)
   # zones.plot("o-")
   return zones,ozones
 def visible_points(self, points, return_coords=True):
     """
     Determine if points are visible - lying inside polygon
     :param points: inptu points
     :param return_coords: True to return coordinates of points, False to return indicies pf visible points in
     input array
     :return: see above
     """
     if not isinstance(points, MultiPoint):
         points = np.array(points)
         multipoint = MultiPoint(
             points
         )  # Convert all points at once to shapely format for performance
     else:
         multipoint = points
     visible = multipoint.intersection(self.geometry)
     if return_coords:
         return np.array(visible).reshape((-1, 2))
     else:
         return visible
Пример #19
0
        def check_next_to():
            if self.type != 'closest':
                return False

            # if not close to each other, return false
            if self.target.distance_from_other_object(self.anchor,
                                                      optimized=True) > 1.2:
                return False

            # Get the anchor and the target convex hull
            anchor_face = self.anchor.get_bbox().z_faces()[0]  # Top face
            anchor_points = tuple(map(tuple,
                                      anchor_face[:, :2]))  # x, y coordinates

            target_z_face = self.target.get_bbox().z_faces()[0]
            target_points = tuple(map(tuple, target_z_face[:, :2]))

            anchor_target_polygon = MultiPoint(anchor_points +
                                               target_points).convex_hull

            # Loop over the scan object and check no one intersects this convex hull
            for o in self.scan().three_d_objects:
                if o.instance_label in ['wall', 'floor']:
                    continue

                if o.object_id in [
                        self.anchor.object_id, self.target.object_id
                ]:
                    continue

                o_z_face = o.get_bbox().z_faces()[0]
                o_points = tuple(map(tuple, o_z_face[:, :2]))
                o_polygon = MultiPoint(o_points).convex_hull

                # if it is found in the area between the two objects, return false
                if o_polygon.intersection(
                        anchor_target_polygon).area / o_polygon.area > 0.5:
                    return False

            return True
Пример #20
0
    def assign_label(self, tile_starts, tile_ends, regions, region_labels):
        ''' calculates overlap of tile with xml regions and creates dictionary based on unique labels '''

        tile_box = [tile_starts[0],
                    tile_starts[1]], [tile_starts[0], tile_ends[1]
                                      ], [tile_ends[0], tile_starts[1]
                                          ], [tile_ends[0], tile_ends[1]]
        tile_box = list(tile_box)
        tile_box = MultiPoint(tile_box).convex_hull

        tile_label = {}
        # create a dictionary of label/value pairs: returns percent of tile containing unique
        for label in set(region_labels):

            # grab regions that correspond to this label
            label_list = [i for i, e in enumerate(region_labels) if e == label]
            labels = tuple(regions[i] for i in label_list)

            # loop over every region associated with a given label, sum the overlap
            box_label = False  # initialize
            ov = 0  # initialize
            for reg in labels:
                poly = Polygon(reg)
                if poly.is_valid == False:
                    poly = poly.buffer(0)
                poly_label = tile_box.intersects(poly)
                if poly_label == True:
                    box_label = True
                    ov_reg = tile_box.intersection(poly)
                    ov += ov_reg.area / tile_box.area

            if box_label == True:
                tile_label[label] = ov

        # p.s. if you are curious, you can plot the polygons by the following
        #   plt.plot(*poly.exterior.xy) and plt.plot(*tile_box.exterior.xy)
        return tile_label
def get_cuboid2d_visibility(cuboid2d, img_width, img_height):
    cuboid_poly = MultiPoint(cuboid2d).convex_hull
    img_poly = MultiPoint([(0, 0), (0, img_height), (img_width, img_height),
                           (img_width, 0)]).convex_hull
    return cuboid_poly.intersection(img_poly).area / cuboid_poly.area
Пример #22
0
class PolygonAnnotatedCoordinates(GeoDataFrameWrapper):
    """
    Class for retrieving ground truth cluster labels from a set of coordinate points and polygons.
    From the provided 2-dim. coordinates only points within the ground truth region will be considered.
    """
    def __init__(self,
                 coordinates: TCoordinates,
                 groundTruthPolygons: Union[str, Sequence[Polygon],
                                            GeoDataFrame],
                 noiseLabel: Optional[int] = -1):
        """
        :param coordinates: coordinates of points. These points should be spread over an area larger or equal to
            the ground truth area
        :param groundTruthPolygons: sequence of polygons, GeoDataFrame or path to a shapefile containing such a sequence.
            The polygons represent the ground truth for clustering.
            *Important*: the first polygon in the sequence is assumed to be the region within
            which ground truth was provided and has to cover all remaining polygons. This also means that all non-noise
            clusters in that region should be covered by a polygon
        :param noiseLabel: label to associate with noise or None
        """

        # The constructor might seem bloated but it really mostly does input validation for the polygons
        coordinates = extractCoordinatesArray(coordinates)
        if isinstance(groundTruthPolygons, str):
            polygons: Sequence[Polygon] = gp.read_file(
                groundTruthPolygons).geometry.values
        elif isinstance(groundTruthPolygons, GeoDataFrame):
            polygons: Sequence[Polygon] = groundTruthPolygons.geometry.values
        else:
            polygons = groundTruthPolygons
        self.regionPolygon = polygons[0]
        self.noiseLabel = noiseLabel
        self.clusterPolygons = MultiPolygon(polygons[1:])
        self.noisePolygon = self.regionPolygon.difference(self.clusterPolygons)

        self.regionMultipoint = MultiPoint(coordinates).intersection(
            self.regionPolygon)
        if self.regionMultipoint.is_empty:
            raise Exception(
                f"The ground truth region contains no datapoints. "
                f"This can happen if you have provided unsuitable coordinates")
        self.noiseMultipoint = self.regionMultipoint.intersection(
            self.noisePolygon)
        if self.noiseLabel is None and not self.noisePolygon.is_empty:
            raise Exception(
                f"No noise_label was provided but there is noise: {len(self.noiseMultipoint)} datapoints"
                f"in annotated area do not belong to any cluster polygon")
        self.clustersMultipoints = []
        intermediatePolygon = Polygon()
        for i, clusterPolygon in enumerate(self.clusterPolygons, start=1):
            if not intermediatePolygon.intersection(clusterPolygon).is_empty:
                raise Exception(
                    f"The polygons should be non-intersecting: polygon {i} intersects with previous polygons"
                )
            intermediatePolygon = intermediatePolygon.union(clusterPolygon)
            clusterMultipoint = self.regionMultipoint.intersection(
                clusterPolygon)
            if clusterMultipoint.is_empty:
                raise Exception(
                    f"The annotated cluster for polygon {i} is empty - check your data!"
                )
            self.clustersMultipoints.append(clusterMultipoint)

    def toGeoDF(self, crs='epsg:3857', include_noise=True):
        """
        :return: GeoDataFrame with clusters as MultiPoint instance indexed by the clusters' identifiers
        """
        clusters = self.clustersMultipoints
        firstLabel = 0
        if self.noiseLabel is not None and include_noise:
            clusters = [self.noiseMultipoint] + clusters
            firstLabel = self.noiseLabel
        gdf = gp.GeoDataFrame(
            {
                "geometry":
                clusters,
                "identifier":
                list(range(firstLabel, firstLabel + len(clusters), 1))
            },
            crs=crs)
        gdf.set_index("identifier", drop=True, inplace=True)
        return gdf

    def plot(self, includeNoise=True, **kwargs):
        """
        Plots the ground truth clusters

        :param includeNoise:
        :param kwargs:
        :return:
        """
        gdf = self.toGeoDF(include_noise=includeNoise)
        gdf["color"] = np.random.random(len(gdf))
        if includeNoise and self.noiseLabel is not None:
            gdf.loc[self.noiseLabel, "color"] = 0
        gdf.plot(column="color", **kwargs)

    def getCoordinatesLabels(self):
        """
        Extract cluster coordinates and labels as numpy arrays from the provided ground truth region and
        cluster polygons

        :return: tuple of arrays of the type (coordinates, labels)
        """
        coords, labels = [], []
        for row in self.toGeoDF(include_noise=True).itertuples():
            clusterMultipoint, label = row.geometry, row.Index
            coords += [[p.x, p.y] for p in clusterMultipoint]
            labels += [label] * len(clusterMultipoint)
        return np.array(coords), np.array(labels)
Пример #23
0
def readshp(trnfile, inDataset, pos):
    #  projection info from input image
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    gt = list(geotransform)
    imsr = osr.SpatialReference()
    imsr.ImportFromWkt(projection)

    trnDriver = ogr.GetDriverByName('ESRI Shapefile')
    trnDatasource = trnDriver.Open(trnfile, 0)
    trnLayer = trnDatasource.GetLayer()
    trnsr = trnLayer.GetSpatialRef()
    #  coordinate transformation from training to image projection
    ct = osr.CoordinateTransformation(trnsr, imsr)
    #  image bands
    rasterBands = []
    for b in pos:
        rasterBands.append(inDataset.GetRasterBand(b))
#  number of classes
    K = 1
    feature = trnLayer.GetNextFeature()
    while feature:
        classid = feature.GetField('CLASS_ID')
        if int(classid) > K:
            K = int(classid)
        feature = trnLayer.GetNextFeature()
    trnLayer.ResetReading()
    K += 1
    #  loop through the polygons
    Gs = []  # train observations (data matrix)
    ls = []  # class labels (lists)
    classnames = []
    classids = set()
    print 'reading training data...'
    for i in range(trnLayer.GetFeatureCount()):
        feature = trnLayer.GetFeature(i)
        classid = str(feature.GetField('CLASS_ID'))
        classname = feature.GetField('CLASS_NAME')
        if classid not in classids:
            classnames.append(classname)
        classids = classids | set(classid)
        #      label for this ROI
        y = int(classid)
        l = [0 for i in range(K)]
        l[y] = 1.0
        polygon = feature.GetGeometryRef()
        #      transform to same projection as image
        polygon.Transform(ct)
        #      convert to a Shapely object
        poly = shapely.wkt.loads(polygon.ExportToWkt())
        #      transform the boundary to pixel coords in numpy
        bdry = np.array(poly.boundary)
        bdry[:, 0] = bdry[:, 0] - gt[0]
        bdry[:, 1] = bdry[:, 1] - gt[3]
        GT = np.mat([[gt[1], gt[2]], [gt[4], gt[5]]])
        bdry = bdry * np.linalg.inv(GT)
        #      polygon in pixel coords
        polygon1 = asPolygon(bdry)
        #      raster over the bounding rectangle
        minx, miny, maxx, maxy = map(int, list(polygon1.bounds))
        pts = []
        for i in range(minx, maxx + 1):
            for j in range(miny, maxy + 1):
                pts.append((i, j))
        multipt = MultiPoint(pts)
        #      intersection as list
        intersection = np.array(multipt.intersection(polygon1),
                                dtype=np.int).tolist()
        #      cut out the bounded image cube
        cube = np.zeros((maxy - miny + 1, maxx - minx + 1, len(rasterBands)))
        k = 0
        for band in rasterBands:
            cube[:, :, k] = band.ReadAsArray(minx, miny, maxx - minx + 1,
                                             maxy - miny + 1)
            k += 1


#      get the training vectors
        for (x, y) in intersection:
            Gs.append(cube[y - miny, x - minx, :])
            ls.append(l)
        polygon = None
        polygon1 = None
        feature.Destroy()
    trnDatasource.Destroy()
    Gs = np.array(Gs)
    ls = np.array(ls)
    return (Gs, ls, K, classnames)
Пример #24
0
def main():      
    gdal.AllRegister()
    path = auxil.select_directory('Input directory')
    if path:
        os.chdir(path)        
#  input image    
    infile = auxil.select_infile(title='Image file') 
    if infile:                   
        inDataset = gdal.Open(infile,GA_ReadOnly)
        cols = inDataset.RasterXSize
        rows = inDataset.RasterYSize    
        bands = inDataset.RasterCount
        projection = inDataset.GetProjection()
        geotransform = inDataset.GetGeoTransform()
        if geotransform is not None:
            gt = list(geotransform) 
        else:
            print 'No geotransform available'
            return       
        imsr = osr.SpatialReference()  
        imsr.ImportFromWkt(projection)      
    else:
        return  
    pos =  auxil.select_pos(bands)   
    if not pos:
        return
    N = len(pos) 
    rasterBands = [] 
    for b in pos:
        rasterBands.append(inDataset.GetRasterBand(b)) 
#  training algorithm
    trainalg = auxil.select_integer(1,msg='1:Maxlike,2:Backprop,3:Congrad,4:SVM') 
    if not trainalg:
        return           
#  training data (shapefile)      
    trnfile = auxil.select_infile(filt='.shp',title='Train shapefile')
    if trnfile:
        trnDriver = ogr.GetDriverByName('ESRI Shapefile')
        trnDatasource = trnDriver.Open(trnfile,0)
        trnLayer = trnDatasource.GetLayer() 
        trnsr = trnLayer.GetSpatialRef()             
    else:
        return     
    tstfile = auxil.select_outfile(filt='.tst', title='Test results file') 
    if not tstfile:
        print 'No test output'      
#  outfile
    outfile, outfmt = auxil.select_outfilefmt(title='Classification file')   
    if not outfile:
        return                   
    if trainalg in (2,3,4):
#      class probabilities file, hidden neurons
        probfile, probfmt = auxil.select_outfilefmt(title='Probabilities file')
    else:
        probfile = None     
    if trainalg in (2,3):    
        L = auxil.select_integer(8,'Number of hidden neurons')    
        if not L:
            return                  
#  coordinate transformation from training to image projection   
    ct= osr.CoordinateTransformation(trnsr,imsr) 
#  number of classes    
    K = 1
    feature = trnLayer.GetNextFeature() 
    while feature:
        classid = feature.GetField('CLASS_ID')
        if int(classid)>K:
            K = int(classid)
        feature = trnLayer.GetNextFeature() 
    trnLayer.ResetReading()    
    K += 1       
    print '========================='
    print 'supervised classification'
    print '========================='
    print time.asctime()    
    print 'image:    '+infile
    print 'training: '+trnfile  
    if trainalg == 1:
        print 'Maximum Likelihood'
    elif trainalg == 2:
        print 'Neural Net (Backprop)'
    elif trainalg ==3:
        print 'Neural Net (Congrad)'
    else:
        print 'Support Vector Machine'               
#  loop through the polygons    
    Gs = [] # train observations
    ls = [] # class labels
    classnames = '{unclassified'
    classids = set()
    print 'reading training data...'
    for i in range(trnLayer.GetFeatureCount()):
        feature = trnLayer.GetFeature(i)
        classid = str(feature.GetField('CLASS_ID'))
        classname  = feature.GetField('CLASS_NAME')
        if classid not in classids:
            classnames += ',   '+ classname
        classids = classids | set(classid)        
        l = [0 for i in range(K)]
        l[int(classid)] = 1.0
        polygon = feature.GetGeometryRef()
#      transform to same projection as image        
        polygon.Transform(ct)  
#      convert to a Shapely object            
        poly = shapely.wkt.loads(polygon.ExportToWkt())
#      transform the boundary to pixel coords in numpy        
        bdry = np.array(poly.boundary) 
        bdry[:,0] = bdry[:,0]-gt[0]
        bdry[:,1] = bdry[:,1]-gt[3]
        GT = np.mat([[gt[1],gt[2]],[gt[4],gt[5]]])
        bdry = bdry*np.linalg.inv(GT) 
#      polygon in pixel coords        
        polygon1 = asPolygon(bdry)
#      raster over the bounding rectangle        
        minx,miny,maxx,maxy = map(int,list(polygon1.bounds))  
        pts = [] 
        for i in range(minx,maxx+1):
            for j in range(miny,maxy+1): 
                pts.append((i,j))             
        multipt =  MultiPoint(pts)   
#      intersection as list              
        intersection = np.array(multipt.intersection(polygon1),dtype=np.int).tolist()
#      cut out the bounded image cube               
        cube = np.zeros((maxy-miny+1,maxx-minx+1,len(rasterBands)))
        k=0
        for band in rasterBands:
            cube[:,:,k] = band.ReadAsArray(minx,miny,maxx-minx+1,maxy-miny+1)
            k += 1
#      get the training vectors
        for (x,y) in intersection:         
            Gs.append(cube[y-miny,x-minx,:])
            ls.append(l)   
        polygon = None
        polygon1 = None            
        feature.Destroy()  
    trnDatasource.Destroy() 
    classnames += '}'
    m = len(ls)       
    print str(m) + ' training pixel vectors were read in' 
    Gs = np.array(Gs) 
    ls = np.array(ls)
#  stretch the pixel vectors to [-1,1] for ffn
    maxx = np.max(Gs,0)
    minx = np.min(Gs,0)
    for j in range(N):
        Gs[:,j] = 2*(Gs[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0 
#  random permutation of training data
    idx = np.random.permutation(m)
    Gs = Gs[idx,:] 
    ls = ls[idx,:]     
#  setup output datasets 
    driver = gdal.GetDriverByName(outfmt)    
    outDataset = driver.Create(outfile,cols,rows,1,GDT_Byte) 
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    if geotransform is not None:
        outDataset.SetGeoTransform(tuple(gt))
    if projection is not None:
        outDataset.SetProjection(projection) 
    outBand = outDataset.GetRasterBand(1) 
    if probfile:
        driver = gdal.GetDriverByName(probfmt)    
        probDataset = driver.Create(probfile,cols,rows,K,GDT_Byte) 
        if geotransform is not None:
            probDataset.SetGeoTransform(tuple(gt))
        if projection is not None:
            probDataset.SetProjection(projection)  
        probBands = [] 
        for k in range(K):
            probBands.append(probDataset.GetRasterBand(k+1))         
    if tstfile:
#  train on 2/3 training examples         
        Gstrn = Gs[0:2*m//3,:]
        lstrn = ls[0:2*m//3,:] 
        Gstst = Gs[2*m//3:,:]  
        lstst = ls[2*m//3:,:]    
    else:
        Gstrn = Gs
        lstrn = ls         
    if   trainalg == 1:
        classifier = sc.Maxlike(Gstrn,lstrn)
    elif trainalg == 2:
        classifier = sc.Ffnbp(Gstrn,lstrn,L)
    elif trainalg == 3:
        classifier = sc.Ffncg(Gstrn,lstrn,L)
    elif trainalg == 4:
        classifier = sc.Svm(Gstrn,lstrn)         
            
    print 'training on %i pixel vectors...' % np.shape(Gstrn)[0]
    start = time.time()
    result = classifier.train()
    print 'elapsed time %s' %str(time.time()-start) 
    if result:
        if trainalg in [2,3]:
            cost = np.log10(result)  
            ymax = np.max(cost)
            ymin = np.min(cost) 
            xmax = len(cost)      
            plt.plot(range(xmax),cost,'k')
            plt.axis([0,xmax,ymin-1,ymax])
            plt.title('Log(Cross entropy)')
            plt.xlabel('Epoch')              
#      classify the image           
        print 'classifying...'
        start = time.time()
        tile = np.zeros((cols,N))    
        for row in range(rows):
            for j in range(N):
                tile[:,j] = rasterBands[j].ReadAsArray(0,row,cols,1)
                tile[:,j] = 2*(tile[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0               
            cls, Ms = classifier.classify(tile)  
            outBand.WriteArray(np.reshape(cls,(1,cols)),0,row)
            if probfile:
                Ms = np.byte(Ms*255)
                for k in range(K):
                    probBands[k].WriteArray(np.reshape(Ms[k,:],(1,cols)),0,row)
        outBand.FlushCache()
        print 'elapsed time %s' %str(time.time()-start)
        outDataset = None
        inDataset = None      
        if probfile:
            for probBand in probBands:
                probBand.FlushCache() 
            probDataset = None
            print 'class probabilities written to: %s'%probfile   
        K =  lstrn.shape[1]+1                     
        if (outfmt == 'ENVI') and (K<19):
#          try to make an ENVI classification header file            
            hdr = header.Header() 
            headerfile = outfile+'.hdr'
            f = open(headerfile)
            line = f.readline()
            envihdr = ''
            while line:
                envihdr += line
                line = f.readline()
            f.close()         
            hdr.read(envihdr)
            hdr['file type'] ='ENVI Classification'
            hdr['classes'] = str(K)
            classlookup = '{0'
            for i in range(1,3*K):
                classlookup += ', '+str(str(ctable[i]))
            classlookup +='}'    
            hdr['class lookup'] = classlookup
            hdr['class names'] = classnames
            f = open(headerfile,'w')
            f.write(str(hdr))
            f.close()             
        print 'thematic map written to: %s'%outfile
        if trainalg in [2,3]:
            print 'please close the cross entropy plot to continue'
            plt.show()
        if tstfile:
            with open(tstfile,'w') as f:
                print >>f, 'FFN test results for %s'%infile
                print >>f, time.asctime()
                print >>f, 'Classification image: %s'%outfile
                print >>f, 'Class probabilities image: %s'%probfile
                print >>f, lstst.shape[0],lstst.shape[1]
                classes, _ = classifier.classify(Gstst)
                labels = np.argmax(lstst,axis=1)+1
                for i in range(len(classes)):
                    print >>f, classes[i], labels[i]              
                f.close()
                print 'test results written to: %s'%tstfile
        print 'done'
    else:
        print 'an error occured' 
        return 
Пример #25
0
    def draw(self, vsk: vsketch.Vsketch) -> None:
        print(os.getcwd())
        vsk.size("a6", landscape=False, center=False)
        vsk.scale(1)
        vsk.penWidth(self.pen_width)

        glyph_poly = load_glyph(self.font, self.glyph, self.face_index)

        # normalize glyph size
        bounds = glyph_poly.bounds
        scale_factor = min(
            (vsk.width - 2 * self.glyph_margin) / (bounds[2] - bounds[0]),
            (vsk.height - 2 * self.glyph_margin) / (bounds[3] - bounds[1]),
        )
        glyph_poly = scale(glyph_poly, scale_factor, scale_factor)
        bounds = glyph_poly.bounds
        glyph_poly = translate(
            glyph_poly,
            vsk.width / 2 - bounds[0] - (bounds[2] - bounds[0]) / 2,
            vsk.height / 2 - bounds[1] - (bounds[3] - bounds[1]) / 2 +
            self.glyph_voffset,
        )

        if self.draw_glyph:
            vsk.strokeWeight(self.glyph_weight)
            if self.fill_glyph:
                vsk.fill(1)
            vsk.geometry(glyph_poly)

            if self.fill_glyph and self.glyph_chroma:
                angle = self.glyph_chroma_angle / 180.0 * math.pi
                glyph_poly_chroma1 = translate(
                    glyph_poly,
                    -self.glyph_chroma_offset * math.cos(angle),
                    -self.glyph_chroma_offset * math.sin(angle),
                ).difference(glyph_poly)
                glyph_poly_chroma2 = translate(
                    glyph_poly,
                    self.glyph_chroma_offset * math.cos(angle),
                    self.glyph_chroma_offset * math.sin(angle),
                ).difference(glyph_poly)

                vsk.strokeWeight(1)
                vsk.stroke(2)
                vsk.fill(2)
                vsk.geometry(glyph_poly_chroma1)
                vsk.stroke(3)
                vsk.fill(3)
                vsk.geometry(glyph_poly_chroma2)

                glyph_poly = unary_union(
                    [glyph_poly, glyph_poly_chroma1, glyph_poly_chroma2])

            vsk.strokeWeight(1)
            vsk.stroke(1)
            vsk.noFill()

        glyph_shadow = None
        if self.glyph_shadow:
            angle = self.glyph_chroma_angle / 180.0 * math.pi
            glyph_shadow = translate(
                glyph_poly,
                self.glyph_chroma_offset * math.cos(angle),
                self.glyph_chroma_offset * math.sin(angle),
            ).difference(glyph_poly)
            vsk.fill(3)
            vsk.stroke(3)
            vsk.geometry(glyph_shadow)
            vsk.noFill()
            vsk.stroke(1)
            glyph_poly = glyph_poly.union(glyph_shadow)

        if self.glyph_weight == 1:
            glyph_poly_ext = glyph_poly.buffer(
                self.glyph_space,
                join_style=JOIN_STYLE.mitre,
            )
            glyph_poly_int = glyph_poly.buffer(
                -self.glyph_space_inside,
                join_style=JOIN_STYLE.mitre,
            )
        else:
            buf_len = (self.glyph_weight - 1) / 2 * self.pen_width
            glyph_poly_ext = glyph_poly.buffer(
                buf_len * 2 + self.glyph_space,
                join_style=JOIN_STYLE.mitre,
            )
            glyph_poly_int = glyph_poly.buffer(
                -buf_len - self.glyph_space_inside,
                join_style=JOIN_STYLE.mitre,
            )

        if glyph_shadow is not None:
            glyph_poly_int = glyph_poly_int.difference(glyph_shadow)

        # horizontal stripes
        if self.draw_h_stripes:
            count = round(
                (vsk.height - 2 * self.margin) / self.h_stripes_pitch)
            corrected_pitch = (vsk.height - 2 * self.margin) / count
            hstripes = MultiLineString([[
                (self.margin, self.margin + i * corrected_pitch),
                (vsk.width - self.margin, self.margin + i * corrected_pitch),
            ] for i in range(count + 1)])

            vsk.geometry(hstripes.difference(glyph_poly_ext))

            if self.h_stripes_inside:
                inside_stripes = translate(hstripes, 0, corrected_pitch /
                                           2).intersection(glyph_poly_int)
                vsk.geometry(inside_stripes)

                if self.h_stripes_inside_chroma:
                    chroma_offset = math.sqrt(2) * self.pen_width
                    vsk.stroke(2)
                    vsk.geometry(
                        translate(inside_stripes, -chroma_offset,
                                  -chroma_offset))
                    vsk.stroke(3)
                    vsk.geometry(
                        translate(inside_stripes, chroma_offset,
                                  chroma_offset))
                    vsk.stroke(1)

        # concentric
        if self.draw_concentric:
            circle_count = int(
                math.ceil(
                    math.hypot(vsk.width, vsk.height) / 2 /
                    self.concentric_pitch))
            circles = unary_union([
                Point(vsk.width / 2, vsk.height / 2).buffer(
                    (i + 1) * self.concentric_pitch,
                    resolution=int(1 * (i + 1) * self.concentric_pitch),
                ).exterior for i in range(circle_count)
            ])
            vsk.geometry(
                circles.difference(glyph_poly_ext).intersection(
                    box(
                        self.margin,
                        self.margin,
                        vsk.width - self.margin,
                        vsk.height - self.margin,
                    )))

        # dots
        vsk.fill(1)
        if self.draw_dots or self.draw_cut_circles:
            v_pitch = self.pitch * math.tan(math.pi / 3) / 2
            h_count = int((vsk.width - 2 * self.margin) // self.pitch)
            v_count = int((vsk.height - 2 * self.margin) // v_pitch)
            h_offset = (vsk.width - h_count * self.pitch) / 2
            v_offset = (vsk.height - v_count * v_pitch) / 2

            dot_array = []
            for j in range(v_count + 1):
                odd_line = j % 2 == 1
                for i in range(h_count + (0 if odd_line else 1)):
                    dot = Point(
                        h_offset + i * self.pitch +
                        (self.pitch / 2 if odd_line else 0),
                        v_offset + j * v_pitch,
                    ).buffer(self.thickness / 2)

                    if self.draw_dots:
                        if not dot.buffer(
                                self.thickness / 2).intersects(glyph_poly_ext):
                            dot_array.append(dot)
                    else:
                        dot_array.append(dot)

            dots = unary_union(dot_array)

            if self.draw_dots:
                vsk.geometry(dots)

            if self.draw_cut_circles:
                if self.cut_circles_inside:
                    op_func = lambda geom: geom.intersection(glyph_poly_int)
                else:
                    op_func = lambda geom: geom.difference(glyph_poly_ext)

                vsk.geometry(op_func(dots))

                if self.cut_circle_chroma:
                    angle = math.pi / 6
                    dist = self.pitch * 0.1
                    vsk.fill(2)
                    vsk.stroke(2)
                    vsk.geometry(
                        op_func(
                            translate(dots, -dist * math.cos(angle), -dist *
                                      math.sin(angle)).difference(dots)))
                    vsk.fill(3)
                    vsk.stroke(3)
                    vsk.geometry(
                        op_func(
                            translate(dots, dist * math.cos(angle),
                                      dist *
                                      math.sin(angle)).difference(dots)))
                    vsk.fill(1)
                    vsk.stroke(1)

        vsk.stroke(4)  # apply line sort, see finalize()
        if self.draw_dot_matrix:
            h_count = int(
                (vsk.width - 2 * self.margin) // self.dot_matrix_pitch) + 1
            v_count = int(
                (vsk.height - 2 * self.margin) // self.dot_matrix_pitch) + 1
            h_pitch = (vsk.width - 2 * self.margin) / (h_count - 1)
            v_pitch = (vsk.height - 2 * self.margin) / (v_count - 1)

            mp = MultiPoint([
                (self.margin + i * h_pitch, self.margin + j * v_pitch)
                for i, j in itertools.product(range(h_count), range(v_count))
                if vsk.random(1) < self.dot_matrix_density
            ])

            if self.draw_dot_matrix_inside:
                mp = mp.intersection(glyph_poly_int)
            else:
                mp = mp.difference(glyph_poly_ext)

            vsk.geometry(mp)
            vsk.vpype("color -l4 black")

        vsk.vpype("color -l1 black color -l2 cyan color -l3 magenta")
Пример #26
0
def main():
    gdal.AllRegister()
    path = auxil.select_directory('Input directory')
    if path:
        os.chdir(path)
#  input image
    infile = auxil.select_infile(title='Image file')
    if infile:
        inDataset = gdal.Open(infile, GA_ReadOnly)
        cols = inDataset.RasterXSize
        rows = inDataset.RasterYSize
        bands = inDataset.RasterCount
        projection = inDataset.GetProjection()
        geotransform = inDataset.GetGeoTransform()
        if geotransform is not None:
            gt = list(geotransform)
        else:
            print 'No geotransform available'
            return
        imsr = osr.SpatialReference()
        imsr.ImportFromWkt(projection)
    else:
        return
    pos = auxil.select_pos(bands)
    if not pos:
        return
    N = len(pos)
    rasterBands = []
    for b in pos:
        rasterBands.append(inDataset.GetRasterBand(b))
#  training algorithm
    trainalg = auxil.select_integer(1,
                                    msg='1:Maxlike,2:Backprop,3:Congrad,4:SVM')
    if not trainalg:
        return
#  training data (shapefile)
    trnfile = auxil.select_infile(filt='.shp', title='Train shapefile')
    if trnfile:
        trnDriver = ogr.GetDriverByName('ESRI Shapefile')
        trnDatasource = trnDriver.Open(trnfile, 0)
        trnLayer = trnDatasource.GetLayer()
        trnsr = trnLayer.GetSpatialRef()
    else:
        return
    tstfile = auxil.select_outfile(filt='.tst', title='Test results file')
    if not tstfile:
        print 'No test output'
#  outfile
    outfile, outfmt = auxil.select_outfilefmt(title='Classification file')
    if not outfile:
        return
    if trainalg in (2, 3, 4):
        #      class probabilities file, hidden neurons
        probfile, probfmt = auxil.select_outfilefmt(title='Probabilities file')
    else:
        probfile = None
    if trainalg in (2, 3):
        L = auxil.select_integer(8, 'Number of hidden neurons')
        if not L:
            return
#  coordinate transformation from training to image projection
    ct = osr.CoordinateTransformation(trnsr, imsr)
    #  number of classes
    K = 1
    feature = trnLayer.GetNextFeature()
    while feature:
        classid = feature.GetField('CLASS_ID')
        if int(classid) > K:
            K = int(classid)
        feature = trnLayer.GetNextFeature()
    trnLayer.ResetReading()
    K += 1
    print '========================='
    print 'supervised classification'
    print '========================='
    print time.asctime()
    print 'image:    ' + infile
    print 'training: ' + trnfile
    if trainalg == 1:
        print 'Maximum Likelihood'
    elif trainalg == 2:
        print 'Neural Net (Backprop)'
    elif trainalg == 3:
        print 'Neural Net (Congrad)'
    else:
        print 'Support Vector Machine'
#  loop through the polygons
    Gs = []  # train observations
    ls = []  # class labels
    classnames = '{unclassified'
    classids = set()
    print 'reading training data...'
    for i in range(trnLayer.GetFeatureCount()):
        feature = trnLayer.GetFeature(i)
        classid = str(feature.GetField('CLASS_ID'))
        classname = feature.GetField('CLASS_NAME')
        if classid not in classids:
            classnames += ',   ' + classname
        classids = classids | set(classid)
        l = [0 for i in range(K)]
        l[int(classid)] = 1.0
        polygon = feature.GetGeometryRef()
        #      transform to same projection as image
        polygon.Transform(ct)
        #      convert to a Shapely object
        poly = shapely.wkt.loads(polygon.ExportToWkt())
        #      transform the boundary to pixel coords in numpy
        bdry = np.array(poly.boundary)
        bdry[:, 0] = bdry[:, 0] - gt[0]
        bdry[:, 1] = bdry[:, 1] - gt[3]
        GT = np.mat([[gt[1], gt[2]], [gt[4], gt[5]]])
        bdry = bdry * np.linalg.inv(GT)
        #      polygon in pixel coords
        polygon1 = asPolygon(bdry)
        #      raster over the bounding rectangle
        minx, miny, maxx, maxy = map(int, list(polygon1.bounds))
        pts = []
        for i in range(minx, maxx + 1):
            for j in range(miny, maxy + 1):
                pts.append((i, j))
        multipt = MultiPoint(pts)
        #      intersection as list
        intersection = np.array(multipt.intersection(polygon1),
                                dtype=np.int).tolist()
        #      cut out the bounded image cube
        cube = np.zeros((maxy - miny + 1, maxx - minx + 1, len(rasterBands)))
        k = 0
        for band in rasterBands:
            cube[:, :, k] = band.ReadAsArray(minx, miny, maxx - minx + 1,
                                             maxy - miny + 1)
            k += 1
#      get the training vectors
        for (x, y) in intersection:
            Gs.append(cube[y - miny, x - minx, :])
            ls.append(l)
        polygon = None
        polygon1 = None
        feature.Destroy()
    trnDatasource.Destroy()
    classnames += '}'
    m = len(ls)
    print str(m) + ' training pixel vectors were read in'
    Gs = np.array(Gs)
    ls = np.array(ls)
    #  stretch the pixel vectors to [-1,1] for ffn
    maxx = np.max(Gs, 0)
    minx = np.min(Gs, 0)
    for j in range(N):
        Gs[:, j] = 2 * (Gs[:, j] - minx[j]) / (maxx[j] - minx[j]) - 1.0
#  random permutation of training data
    idx = np.random.permutation(m)
    Gs = Gs[idx, :]
    ls = ls[idx, :]
    #  setup output datasets
    driver = gdal.GetDriverByName(outfmt)
    outDataset = driver.Create(outfile, cols, rows, 1, GDT_Byte)
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    if geotransform is not None:
        outDataset.SetGeoTransform(tuple(gt))
    if projection is not None:
        outDataset.SetProjection(projection)
    outBand = outDataset.GetRasterBand(1)
    if probfile:
        driver = gdal.GetDriverByName(probfmt)
        probDataset = driver.Create(probfile, cols, rows, K, GDT_Byte)
        if geotransform is not None:
            probDataset.SetGeoTransform(tuple(gt))
        if projection is not None:
            probDataset.SetProjection(projection)
        probBands = []
        for k in range(K):
            probBands.append(probDataset.GetRasterBand(k + 1))
    if tstfile:
        #  train on 2/3 training examples
        Gstrn = Gs[0:2 * m // 3, :]
        lstrn = ls[0:2 * m // 3, :]
        Gstst = Gs[2 * m // 3:, :]
        lstst = ls[2 * m // 3:, :]
    else:
        Gstrn = Gs
        lstrn = ls
    if trainalg == 1:
        classifier = sc.Maxlike(Gstrn, lstrn)
    elif trainalg == 2:
        classifier = sc.Ffnbp(Gstrn, lstrn, L)
    elif trainalg == 3:
        classifier = sc.Ffncg(Gstrn, lstrn, L)
    elif trainalg == 4:
        classifier = sc.Svm(Gstrn, lstrn)

    print 'training on %i pixel vectors...' % np.shape(Gstrn)[0]
    start = time.time()
    result = classifier.train()
    print 'elapsed time %s' % str(time.time() - start)
    if result:
        if trainalg in [2, 3]:
            cost = np.log10(result)
            ymax = np.max(cost)
            ymin = np.min(cost)
            xmax = len(cost)
            plt.plot(range(xmax), cost, 'k')
            plt.axis([0, xmax, ymin - 1, ymax])
            plt.title('Log(Cross entropy)')
            plt.xlabel('Epoch')


#      classify the image
        print 'classifying...'
        start = time.time()
        tile = np.zeros((cols, N))
        for row in range(rows):
            for j in range(N):
                tile[:, j] = rasterBands[j].ReadAsArray(0, row, cols, 1)
                tile[:, j] = 2 * (tile[:, j] - minx[j]) / (maxx[j] -
                                                           minx[j]) - 1.0
            cls, Ms = classifier.classify(tile)
            outBand.WriteArray(np.reshape(cls, (1, cols)), 0, row)
            if probfile:
                Ms = np.byte(Ms * 255)
                for k in range(K):
                    probBands[k].WriteArray(np.reshape(Ms[k, :], (1, cols)), 0,
                                            row)
        outBand.FlushCache()
        print 'elapsed time %s' % str(time.time() - start)
        outDataset = None
        inDataset = None
        if probfile:
            for probBand in probBands:
                probBand.FlushCache()
            probDataset = None
            print 'class probabilities written to: %s' % probfile
        K = lstrn.shape[1] + 1
        if (outfmt == 'ENVI') and (K < 19):
            #          try to make an ENVI classification header file
            hdr = header.Header()
            headerfile = outfile + '.hdr'
            f = open(headerfile)
            line = f.readline()
            envihdr = ''
            while line:
                envihdr += line
                line = f.readline()
            f.close()
            hdr.read(envihdr)
            hdr['file type'] = 'ENVI Classification'
            hdr['classes'] = str(K)
            classlookup = '{0'
            for i in range(1, 3 * K):
                classlookup += ', ' + str(str(ctable[i]))
            classlookup += '}'
            hdr['class lookup'] = classlookup
            hdr['class names'] = classnames
            f = open(headerfile, 'w')
            f.write(str(hdr))
            f.close()
        print 'thematic map written to: %s' % outfile
        if trainalg in [2, 3]:
            print 'please close the cross entropy plot to continue'
            plt.show()
        if tstfile:
            with open(tstfile, 'w') as f:
                print >> f, 'FFN test results for %s' % infile
                print >> f, time.asctime()
                print >> f, 'Classification image: %s' % outfile
                print >> f, 'Class probabilities image: %s' % probfile
                print >> f, lstst.shape[0], lstst.shape[1]
                classes, _ = classifier.classify(Gstst)
                labels = np.argmax(lstst, axis=1) + 1
                for i in range(len(classes)):
                    print >> f, classes[i], labels[i]
                f.close()
                print 'test results written to: %s' % tstfile
        print 'done'
    else:
        print 'an error occured'
        return
Пример #27
0
def to_stations(da: Var,
                *,
                stations: pd.DataFrame,
                datastore: DataStore = None,
                chunks: dict = None,
                **kwargs) -> xr.DataArray:

    if isinstance(da, camps.Variable):
        da = da(datastore=datastore, chunks=chunks, **kwargs)
    elif isinstance(da, xr.DataArray):
        if chunks:
            da = da.camps.chunk(chunks)

    # Determine x and y
    # Use Projected crs as common crs for grid and station

    try:
        x = da.camps.projx.name
        y = da.camps.projy.name
    except KeyError:
        # projected coordinates do not exist
        if da.camps.grid_mapping:
            # try to make them from grid_mapping and lat/lons
            da = da.metpy.assign_crs(da.camps.grid_mapping)
            da = da.metpy.assign_y_x()
            da = da.drop('metpy_crs')
            x = da.camps.projx.name
            y = da.camps.projy.name
        else:
            # exception for mercator data without grid_mapping
            lat = da.camps.latitude
            if lat.ndim == 1:
                y = lat.name
            lon = da.camps.longitude
            if lon.ndim == 1:
                x = lon.name

            # ensure longitude expressed as degrees east from prime meridian with -179 and 180 bounds
            lon_attrs = lon.attrs
            da[lon.name] = xr.where(lon > 180, lon - 360, lon)
            da[lon.name].attrs = lon_attrs

            # Add the lat lon grid mapping since it didn't have one
            # Latitude and longitude on the WGS 1984 datum
            lat_lon_wgs84 = {
                'grid_mapping_name': "latitude_longitude",
                'longitude_of_prime_meridian': 0.0,
                'semi_major_axis': 6378137.0,
                'inverse_flattening': 298.257223563
            }
            gm = xr.DataArray()
            gm.attrs.update(lat_lon_wgs84)
            da = da.assign_coords({'lat_lon_wgs84': gm})
            da.attrs['grid_mapping'] = 'lat_lon_wgs84'

    pyproj_crs = CFProjection(da.camps.grid_mapping).to_pyproj()
    stations['x'], stations['y'] = Proj(pyproj_crs)(stations.lon.values,
                                                    stations.lat.values)

    # rechunk so that multiple chunks don't span x and y dims
    if da.chunks is not None:
        da = da.chunk({x: -1, y: -1})
        da = da.unify_chunks()

    # load x,y data
    da[x].load()
    da[y].load()

    # make horizonontal space 1-D by stacking x and y
    stacked = da.stack(xy=(x, y))
    gridxy = np.column_stack((stacked[x].data, stacked[y].data))

    stationxy = np.column_stack((stations.x, stations.y))

    tree = cKDTree(gridxy)  # fast nearest neighbor search algorith
    dist_ix = tree.query(
        stationxy)  # find distance to nearest and index of nearest

    # make a grid polygon
    from shapely.geometry import Polygon, MultiPoint, Point
    unit_y = xr.DataArray(np.ones(da[y].shape), dims=da[y].dims)
    unit_x = xr.DataArray(np.ones(da[x].shape), dims=da[x].dims)
    edge_x = edge(da[x] * unit_y)  # broadcast constant x along the y dim
    edge_y = edge(unit_x * da[y])  # broadcast constant y along the x dim

    xy = zip(edge_x, edge_y)
    grid_polygon = Polygon(xy)

    # make station points
    xy = zip(stations.x.values, stations.y.values)
    station_points = MultiPoint(list(xy))

    # determine which stations lie outside grid domain (polygon)
    stations['point_str'] = pd.Series([str(p) for p in station_points])
    stations['ix'] = stations.index
    points_outside_grid = station_points - station_points.intersection(
        grid_polygon)
    if not points_outside_grid.is_empty:
        if isinstance(points_outside_grid, Point):
            points_outside_grid = [points_outside_grid]
        ix_stations_outside_grid = stations.set_index('point_str').loc[[
            str(p) for p in points_outside_grid
        ]].ix
    else:
        ix_stations_outside_grid = list()  # let be empty list

    print(len(ix_stations_outside_grid))

    def nearest_worker(da: xr.DataArray, *, x, y, ix, ix_nan) -> xr.DataArray:
        da = da.stack(station=(x,
                               y))  # squash the horizontal space dims into one

        da = da.isel(station=ix)
        da = da.drop_vars('station')  # remove station coord
        da.loc[{
            'station': ix_nan
        }] = np.nan  # use integer index location to set stations outside grid to missing

        return da

    # make template for expressing the change in shape in map_blocks
    template = da.copy()
    # reshape action
    template = template.stack(
        station=(x, y))  # combine the lat/lon dims into one dim called station
    template = template.isel(
        station=[0] *
        len(stations))  # select only the first; this removes the dim station
    template = template.drop(
        'station'
    )  # drop the multiindex lat/lon coord associated with 'station' from the 0th grid point

    mb_kwargs = dict(x=x, y=y, ix=dist_ix[1], ix_nan=ix_stations_outside_grid)
    da = xr.map_blocks(nearest_worker, da, kwargs=mb_kwargs, template=template)

    # remove any metadata that may be leftover from the grid
    da = da.drop_vars([x, y], errors='ignore')

    # configure station metadata
    # prep station coord with numeric index called 'station'
    stations = stations.reset_index()
    stations.index.set_names('station', inplace=True)
    # assign the new coords
    da = da.assign_coords({'platform_id': stations.call})
    da.platform_id.attrs['standard_name'] = 'platform_id'

    # assign the new coords with numeric index
    da = da.assign_coords({'lat': stations.lat})
    da.lat.attrs['standard_name'] = 'latitude'
    da.lat.attrs['units'] = 'degrees_north'
    da = da.assign_coords({'lon': stations.lon})
    da.lon.attrs['standard_name'] = 'longitude'
    da.lon.attrs['units'] = 'degrees_east'
    # drop the numeric index;
    da = da.reset_index('station', drop=True)

    return da
Пример #28
0
def main():  
    gdal.AllRegister()
    path = auxil.select_directory('Choose input directory')
    if path:
        os.chdir(path)        
#  input image    
    infile = auxil.select_infile(title='Choose image file') 
    if infile:                   
        inDataset = gdal.Open(infile,GA_ReadOnly)     
        cols = inDataset.RasterXSize
        rows = inDataset.RasterYSize    
        bands = inDataset.RasterCount
        projection = inDataset.GetProjection()
        geotransform = inDataset.GetGeoTransform()
        if geotransform is not None:
            gt = list(geotransform) 
        else:
            print 'No geotransform available'
            return       
        imsr = osr.SpatialReference()  
        imsr.ImportFromWkt(projection)      
    else:
        return  
    pos =  auxil.select_pos(bands)  
    if not pos:
        return
    N = len(pos) 
    rasterBands = [] 
    for b in pos:
        rasterBands.append(inDataset.GetRasterBand(b)) 
#  training data (shapefile)      
    trnfile = auxil.select_infile(filt='.shp',title='Choose train shapefile')
    if trnfile:
        trnDriver = ogr.GetDriverByName('ESRI Shapefile')
        trnDatasource = trnDriver.Open(trnfile,0)
        trnLayer = trnDatasource.GetLayer() 
        trnsr = trnLayer.GetSpatialRef()             
    else:
        return
#  hidden neurons
    L = auxil.select_integer(8,'number of hidden neurons')    
    if not L:
        return
#  outfile
    outfile, fmt = auxil.select_outfilefmt()   
    if not outfile:
        return     
#  coordinate transformation from training to image projection   
    ct= osr.CoordinateTransformation(trnsr,imsr) 
#  number of classes    
    feature = trnLayer.GetNextFeature() 
    while feature:
        classid = feature.GetField('CLASS_ID')
        feature = trnLayer.GetNextFeature() 
    trnLayer.ResetReading()    
    K = int(classid)+1       
    print '========================='
    print '       ffncg'
    print '========================='
    print time.asctime()    
    print 'image:    '+infile
    print 'training: '+trnfile          
#  loop through the polygons    
    Gs = [] # train observations
    ls = [] # class labels
    print 'reading training data...'
    for i in range(trnLayer.GetFeatureCount()):
        feature = trnLayer.GetFeature(i)
        classid = feature.GetField('CLASS_ID')
        l = [0 for i in range(K)]
        l[int(classid)] = 1.0
        polygon = feature.GetGeometryRef()
#      transform to same projection as image        
        polygon.Transform(ct)  
#      convert to a Shapely object            
        poly = shapely.wkt.loads(polygon.ExportToWkt())
#      transform the boundary to pixel coords in numpy        
        bdry = np.array(poly.boundary) 
        bdry[:,0] = bdry[:,0]-gt[0]
        bdry[:,1] = bdry[:,1]-gt[3]
        GT = np.mat([[gt[1],gt[2]],[gt[4],gt[5]]])
        bdry = bdry*np.linalg.inv(GT) 
#      polygon in pixel coords        
        polygon1 = asPolygon(bdry)
#      raster over the bounding rectangle        
        minx,miny,maxx,maxy = map(int,list(polygon1.bounds))  
        pts = [] 
        for i in range(minx,maxx+1):
            for j in range(miny,maxy+1): 
                pts.append((i,j))             
        multipt =  MultiPoint(pts)   
#      intersection as list              
        intersection = np.array(multipt.intersection(polygon1),dtype=np.int).tolist()
#      cut out the bounded image cube               
        cube = np.zeros((maxy-miny+1,maxx-minx+1,len(rasterBands)))
        k=0
        for band in rasterBands:
            cube[:,:,k] = band.ReadAsArray(minx,miny,maxx-minx+1,maxy-miny+1)
            k += 1
#      get the training vectors
        for (x,y) in intersection:         
            Gs.append(cube[y-miny,x-minx,:])
            ls.append(l)   
        polygon = None
        polygon1 = None            
        feature.Destroy()  
    trnDatasource.Destroy() 
    m = len(ls)       
    print str(m) + ' training pixel vectors were read in' 
    Gs = np.array(Gs) 
    ls = np.array(ls)
#  stretch the pixel vectors to [-1,1]
    maxx = np.max(Gs,0)
    minx = np.min(Gs,0)
    for j in range(N):
        Gs[:,j] = 2*(Gs[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0 
#  random permutation of training data
    idx = np.random.permutation(m)
    Gs = Gs[idx,:] 
    ls = ls[idx,:]     
#  setup output dataset 
    driver = gdal.GetDriverByName(fmt)    
    outDataset = driver.Create(outfile,cols,rows,1,GDT_Byte) 
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    if geotransform is not None:
        outDataset.SetGeoTransform(tuple(gt))
    if projection is not None:
        outDataset.SetProjection(projection) 
    outBand = outDataset.GetRasterBand(1) 
#  train on 9/10 training examples         
    Gstrn = Gs[0:9*m//10,:]
    lstrn = ls[0:9*m//10,:]
    affn = Ffncg(Gstrn,lstrn,L)
    print 'training on %i pixel vectors...' % np.shape(Gstrn)[0]
    start = time.time()
    cost = affn.train(epochs=epochs)
    print 'elapsed time %s' %str(time.time()-start) 
    if cost is not None:
#        cost = np.log10(cost)  
        ymax = np.max(cost)
        ymin = np.min(cost) 
        xmax = len(cost)      
        plt.plot(range(xmax),cost,'k')
        plt.axis([0,xmax,ymin-1,ymax])
        plt.title('Cross entropy')
        plt.xlabel('Epoch')              
#      classify the image           
        print 'classifying...'
        tile = np.zeros((cols,N))    
        for row in range(rows):
            for j in range(N):
                tile[:,j] = rasterBands[j].ReadAsArray(0,row,cols,1)
                tile[:,j] = 2*(tile[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0 
            cls, _ = affn.classify(tile)  
            outBand.WriteArray(np.reshape(cls,(1,cols)),0,row)
        outBand.FlushCache()
        outDataset = None
        inDataset = None  
        print 'thematic map written to: ' + outfile
        print 'please close the cross entropy plot to continue'
        plt.show()
    else:
        print 'an error occured' 
        return 
    
    print 'submitting cross-validation to multyvac'    
    start = time.time()
    jid = mv.submit(traintst,Gs,ls,L,_layer='ms_image_analysis')  
    print 'submission time: %s' %str(time.time()-start)
    start = time.time()    
    job = mv.get(jid)
    result = job.get_result(job) 
    
    
    print 'execution time: %s' %str(time.time()-start)      
    print 'misclassification rate: %f' %np.mean(result)
    print 'standard deviation:     %f' %np.std(result)         
    print '--------done---------------------'       
Пример #29
0
def main():    
    usage = '''
Usage: 
---------------------------------------------------------
python %s  [-p bandPositions] [- a algorithm] [-L number of hidden neurons]   
[-P generate class probabilities image] filename trainShapefile

bandPositions is a list, e.g., -p [1,2,4]  

algorithm  1=MaxLike
           2=NNet(backprop)
           3=NNet(congrad)
           4=SVM

If the input file is named 

         path/filenbasename.ext then

The output classification file is named 

         path/filebasename_class.ext

the class probabilities output file is named

         path/filebasename_classprobs.ext
         
and the test results file is named

         path/filebasename_<classifier>.tst
--------------------------------------------------------''' %sys.argv[0]
    options, args = getopt.getopt(sys.argv[1:],'hnPp:a:L:')
    pos = None
    probs = False   
    L = 8
    trainalg = 1
    graphics = True
    for option, value in options:
        if option == '-h':
            print usage
            return
        elif option == '-p':
            pos = eval(value)
        elif option == '-n':
            graphics = False            
        elif option == '-a':
            trainalg = eval(value)
        elif option == '-L':
            L = eval(value)    
        elif option == '-P':
            probs = True                              
    if len(args) != 2: 
        print 'Incorrect number of arguments'
        print usage
        sys.exit(1)      
    if trainalg == 1:
        algorithm = 'MaxLike'
    elif trainalg == 2:
        algorithm = 'NNet(Backprop)'
    elif trainalg == 3:
        algorithm =  'NNet(Congrad)'
    else:
        algorithm = 'SVM'              
    infile = args[0]  
    trnfile = args[1]      
    gdal.AllRegister() 
    if infile:                   
        inDataset = gdal.Open(infile,GA_ReadOnly)
        cols = inDataset.RasterXSize
        rows = inDataset.RasterYSize    
        bands = inDataset.RasterCount
        projection = inDataset.GetProjection()
        geotransform = inDataset.GetGeoTransform()
        if geotransform is not None:
            gt = list(geotransform) 
        else:
            print 'No geotransform available'
            return       
        imsr = osr.SpatialReference()  
        imsr.ImportFromWkt(projection)    
    else:
        return  
    if pos is None: 
        pos = range(1,bands+1)
    N = len(pos)    
    rasterBands = [] 
    for b in pos:
        rasterBands.append(inDataset.GetRasterBand(b))     
#  output files
    path = os.path.dirname(infile)
    basename = os.path.basename(infile)
    root, ext = os.path.splitext(basename)
    outfile = '%s/%s_class%s'%(path,root,ext)  
    tstfile = '%s/%s_%s.tst'%(path,root,algorithm)            
    if (trainalg in (2,3,4)) and probs:
#      class probabilities file
        probfile = '%s/%s_classprobs%s'%(path,root,ext) 
    else:
        probfile = None        
#  training data        
    trnDriver = ogr.GetDriverByName('ESRI Shapefile')
    trnDatasource = trnDriver.Open(trnfile,0)
    trnLayer = trnDatasource.GetLayer() 
    trnsr = trnLayer.GetSpatialRef()             
#  coordinate transformation from training to image projection   
    ct = osr.CoordinateTransformation(trnsr,imsr) 
#  number of classes    
    K = 1
    feature = trnLayer.GetNextFeature() 
    while feature:
        classid = feature.GetField('CLASS_ID')
        if int(classid)>K:
            K = int(classid)
        feature = trnLayer.GetNextFeature() 
    trnLayer.ResetReading()    
    K += 1        
#  es kann losgehen    
    print '========================='
    print 'supervised classification'
    print '========================='
    print time.asctime()    
    print 'image:     '+infile
    print 'training:  '+trnfile  
    print 'algorithm: '+algorithm             
#  loop through the polygons    
    Gs = [] # train observations
    ls = [] # class labels
    classnames = '{unclassified'
    classids = set()
    print 'reading training data...'
    for i in range(trnLayer.GetFeatureCount()):
        feature = trnLayer.GetFeature(i)
        classid = str(feature.GetField('CLASS_ID'))
        classname  = feature.GetField('CLASS_NAME')
        if classid not in classids:
            classnames += ',   '+ classname
        classids = classids | set(classid)     
#      label for this ROI           
        l = [0 for i in range(K)]
        l[int(classid)] = 1.0
        polygon = feature.GetGeometryRef()
#      transform to same projection as image        
        polygon.Transform(ct)  
#      convert to a Shapely object            
        poly = shapely.wkt.loads(polygon.ExportToWkt())
#      transform the boundary to pixel coords in numpy        
        bdry = np.array(poly.boundary) 
        bdry[:,0] = bdry[:,0]-gt[0]
        bdry[:,1] = bdry[:,1]-gt[3]
        GT = np.mat([[gt[1],gt[2]],[gt[4],gt[5]]])
        bdry = bdry*np.linalg.inv(GT) 
#      polygon in pixel coords        
        polygon1 = asPolygon(bdry)
#      raster over the bounding rectangle        
        minx,miny,maxx,maxy = map(int,list(polygon1.bounds))  
        pts = [] 
        for i in range(minx,maxx+1):
            for j in range(miny,maxy+1): 
                pts.append((i,j))             
        multipt =  MultiPoint(pts)   
#      intersection as list              
        intersection = np.array(multipt.intersection(polygon1),dtype=np.int).tolist()
#      cut out the bounded image cube               
        cube = np.zeros((maxy-miny+1,maxx-minx+1,len(rasterBands)))
        k=0
        for band in rasterBands:
            cube[:,:,k] = band.ReadAsArray(minx,miny,maxx-minx+1,maxy-miny+1)
            k += 1
#      get the training vectors
        for (x,y) in intersection:         
            Gs.append(cube[y-miny,x-minx,:])
            ls.append(l)   
        polygon = None
        polygon1 = None            
        feature.Destroy()  
    trnDatasource.Destroy() 
    classnames += '}'
    m = len(ls)       
    print str(m) + ' training pixel vectors were read in' 
    Gs = np.array(Gs) 
    ls = np.array(ls)
#  stretch the pixel vectors to [-1,1] for ffn
    maxx = np.max(Gs,0)
    minx = np.min(Gs,0)
    for j in range(N):
        Gs[:,j] = 2*(Gs[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0 
#  random permutation of training data
    idx = np.random.permutation(m)
    Gs = Gs[idx,:] 
    ls = ls[idx,:]     
#  train on 2/3 training examples         
    Gstrn = Gs[0:2*m//3,:]
    lstrn = ls[0:2*m//3,:] 
    Gstst = Gs[2*m//3:,:]  
    lstst = ls[2*m//3:,:]               
#  setup output datasets 
    driver = inDataset.GetDriver() 
    outDataset = driver.Create(outfile,cols,rows,1,GDT_Byte) 
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    if geotransform is not None:
        outDataset.SetGeoTransform(tuple(gt))
    if projection is not None:
        outDataset.SetProjection(projection) 
    outBand = outDataset.GetRasterBand(1) 
    if probfile:   
        probDataset = driver.Create(probfile,cols,rows,K,GDT_Byte) 
        if geotransform is not None:
            probDataset.SetGeoTransform(tuple(gt))
        if projection is not None:
            probDataset.SetProjection(projection)  
        probBands = [] 
        for k in range(K):
            probBands.append(probDataset.GetRasterBand(k+1))         
#  initialize classifier  
    if   trainalg == 1:
        classifier = sc.Maxlike(Gstrn,lstrn)
    elif trainalg == 2:
        classifier = sc.Ffnbp(Gstrn,lstrn,L)
    elif trainalg == 3:
        classifier = sc.Ffncg(Gstrn,lstrn,L)
    elif trainalg == 4:
        classifier = sc.Svm(Gstrn,lstrn)         
#  train it            
    print 'training on %i pixel vectors...' % np.shape(Gstrn)[0]
    start = time.time()
    result = classifier.train()
    print 'elapsed time %s' %str(time.time()-start) 
    if result:
        if (trainalg in [2,3]) and graphics:
            cost = np.log10(result)  
            ymax = np.max(cost)
            ymin = np.min(cost) 
            xmax = len(cost)      
            plt.plot(range(xmax),cost,'k')
            plt.axis([0,xmax,ymin-1,ymax])
            plt.title('Log(Cross entropy)')
            plt.xlabel('Epoch')              
#      classify the image           
        print 'classifying...'
        start = time.time()
        tile = np.zeros((cols,N),dtype=np.float32)    
        for row in range(rows):
            for j in range(N):
                tile[:,j] = rasterBands[j].ReadAsArray(0,row,cols,1)
                tile[:,j] = 2*(tile[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0               
            cls, Ms = classifier.classify(tile)  
            outBand.WriteArray(np.reshape(cls,(1,cols)),0,row)
            if probfile:
                Ms = np.byte(Ms*255)
                for k in range(K):
                    probBands[k].WriteArray(np.reshape(Ms[k,:],(1,cols)),0,row)
        outBand.FlushCache()
        print 'elapsed time %s' %str(time.time()-start)
        outDataset = None
        inDataset = None      
        if probfile:
            for probBand in probBands:
                probBand.FlushCache() 
            probDataset = None
            print 'class probabilities written to: %s'%probfile   
        K =  lstrn.shape[1]+1                     
        print 'thematic map written to: %s'%outfile
        if trainalg in [2,3]:
            plt.show()
        if tstfile:
            with open(tstfile,'w') as f:               
                print >>f, algorithm +'test results for %s'%infile
                print >>f, time.asctime()
                print >>f, 'Classification image: %s'%outfile
                print >>f, 'Class probabilities image: %s'%probfile
                print >>f, lstst.shape[0],lstst.shape[1]
                classes, _ = classifier.classify(Gstst)
                labels = np.argmax(lstst,axis=1)+1
                for i in range(len(classes)):
                    print >>f, classes[i], labels[i]              
                f.close()
                print 'test results written to: %s'%tstfile
        print 'done'
    else:
        print 'an error occured' 
        return 
Пример #30
0
def main():    
    usage = '''
Usage: 
---------------------------------------------------------
python %s  [-p bandPositions] [- a algorithm] [-L number of hidden neurons]   
[-P generate class probabilities image] filename trainShapefile

bandPositions is a list, e.g., -p [1,2,4]  

algorithm  1=MaxLike
           2=NNet(backprop)
           3=NNet(congrad)
           4=SVM

If the input file is named 

         path/filenbasename.ext then

The output classification file is named 

         path/filebasename_class.ext

the class probabilities output file is named

         path/filebasename_classprobs.ext
         
and the test results file is named

         path/filebasename_<classifier>.tst
--------------------------------------------------------''' %sys.argv[0]
    options, args = getopt.getopt(sys.argv[1:],'hnPp:a:L:')
    pos = None
    probs = False   
    L = 8
    graphics = True
    trainalg = 1
    for option, value in options:
        if option == '-h':
            print usage
            return
        elif option == '-p':
            pos = eval(value)
        elif option == '-n':
            graphics = False            
        elif option == '-a':
            trainalg = eval(value)
        elif option == '-L':
            L = eval(value)    
        elif option == '-P':
            probs = True                              
    if len(args) != 2: 
        print 'Incorrect number of arguments'
        print usage
        sys.exit(1)      
    if trainalg == 1:
        algorithm = 'MaxLike'
    elif trainalg == 2:
        algorithm = 'NNet(Backprop)'
    elif trainalg == 3:
        algorithm =  'NNet(Congrad)'
    elif trainalg == 4:
        algorithm = 'SVM'              
    infile = args[0]  
    trnfile = args[1]      
    gdal.AllRegister() 
    if infile:                   
        inDataset = gdal.Open(infile,GA_ReadOnly)
        cols = inDataset.RasterXSize
        rows = inDataset.RasterYSize    
        bands = inDataset.RasterCount
        projection = inDataset.GetProjection()
        geotransform = inDataset.GetGeoTransform()
        if geotransform is not None:
            gt = list(geotransform) 
        else:
            print 'No geotransform available'
            return       
        imsr = osr.SpatialReference()  
        imsr.ImportFromWkt(projection)    
    else:
        return  
    if pos is None: 
        pos = range(1,bands+1)
    N = len(pos)    
    rasterBands = [] 
    for b in pos:
        rasterBands.append(inDataset.GetRasterBand(b))     
#  output files
    path = os.path.dirname(infile)
    basename = os.path.basename(infile)
    root, ext = os.path.splitext(basename)
    outfile = '%s/%s_class%s'%(path,root,ext)  
    tstfile = '%s/%s_%s.tst'%(path,root,algorithm)            
    if (trainalg in (2,3,4)) and probs:
#      class probabilities file
        probfile = '%s/%s_classprobs%s'%(path,root,ext) 
    else:
        probfile = None        
#  training data        
    trnDriver = ogr.GetDriverByName('ESRI Shapefile')
    trnDatasource = trnDriver.Open(trnfile,0)
    trnLayer = trnDatasource.GetLayer() 
    trnsr = trnLayer.GetSpatialRef()             
#  coordinate transformation from training to image projection   
    ct = osr.CoordinateTransformation(trnsr,imsr) 
#  number of classes    
    K = 1
    feature = trnLayer.GetNextFeature() 
    while feature:
        classid = feature.GetField('CLASS_ID')
        if int(classid)>K:
            K = int(classid)
        feature = trnLayer.GetNextFeature() 
    trnLayer.ResetReading()    
    K += 1       
#  es kann losgehen    
    print '========================='
    print 'supervised classification'
    print '========================='
    print time.asctime()    
    print 'image:     '+infile
    print 'training:  '+trnfile  
    print 'algorithm: '+algorithm             
#  loop through the polygons    
    Gs = [] # train observations
    ls = [] # class labels
    classnames = '{unclassified'
    classids = set()
    print 'reading training data...'
    for i in range(trnLayer.GetFeatureCount()):
        feature = trnLayer.GetFeature(i)
        classid = str(feature.GetField('CLASS_ID'))
        classname  = feature.GetField('CLASS_NAME')
        if classid not in classids:
            classnames += ',   '+ classname
        classids = classids | set(classid)     
#      label for this ROI           
        l = [0 for i in range(K)]
        l[int(classid)] = 1.0
        polygon = feature.GetGeometryRef()
#      transform to same projection as image        
        polygon.Transform(ct)  
#      convert to a Shapely object            
        poly = shapely.wkt.loads(polygon.ExportToWkt())
#      transform the boundary to pixel coords in numpy        
        bdry = np.array(poly.boundary) 
        bdry[:,0] = bdry[:,0]-gt[0]
        bdry[:,1] = bdry[:,1]-gt[3]
        GT = np.mat([[gt[1],gt[2]],[gt[4],gt[5]]])
        bdry = bdry*np.linalg.inv(GT) 
#      polygon in pixel coords        
        polygon1 = asPolygon(bdry)
#      raster over the bounding rectangle        
        minx,miny,maxx,maxy = map(int,list(polygon1.bounds))  
        pts = [] 
        for i in range(minx,maxx+1):
            for j in range(miny,maxy+1): 
                pts.append((i,j))             
        multipt =  MultiPoint(pts)   
#      intersection as list              
        intersection = np.array(multipt.intersection(polygon1),dtype=np.int).tolist()
#      cut out the bounded image cube               
        cube = np.zeros((maxy-miny+1,maxx-minx+1,len(rasterBands)))
        k=0
        for band in rasterBands:
            cube[:,:,k] = band.ReadAsArray(minx,miny,maxx-minx+1,maxy-miny+1)
            k += 1
#      get the training vectors
        for (x,y) in intersection:         
            Gs.append(cube[y-miny,x-minx,:])
            ls.append(l)   
        polygon = None
        polygon1 = None            
        feature.Destroy()  
    trnDatasource.Destroy() 
    classnames += '}'
    m = len(ls)       
    print str(m) + ' training pixel vectors were read in' 
    Gs = np.array(Gs) 
    ls = np.array(ls)
#  stretch the pixel vectors to [-1,1] (for ffn)
    maxx = np.max(Gs,0)
    minx = np.min(Gs,0)
    for j in range(N):
        Gs[:,j] = 2*(Gs[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0   
#  random permutation of training data
    idx = np.random.permutation(m)
    Gs = Gs[idx,:] 
    ls = ls[idx,:]             
#  setup output datasets 
    driver = inDataset.GetDriver() 
    outDataset = driver.Create(outfile,cols,rows,1,GDT_Byte) 
    projection = inDataset.GetProjection()
    geotransform = inDataset.GetGeoTransform()
    if geotransform is not None:
        outDataset.SetGeoTransform(tuple(gt))
    if projection is not None:
        outDataset.SetProjection(projection) 
    outBand = outDataset.GetRasterBand(1) 
    if probfile:   
        probDataset = driver.Create(probfile,cols,rows,K,GDT_Byte) 
        if geotransform is not None:
            probDataset.SetGeoTransform(tuple(gt))
        if projection is not None:
            probDataset.SetProjection(projection)  
        probBands = [] 
        for k in range(K):
            probBands.append(probDataset.GetRasterBand(k+1))         
#  initialize classifier  
    if   trainalg == 1:
        classifier = sc.Maxlike(Gs,ls)
    elif trainalg == 2:
        classifier = sc.Ffnbp(Gs,ls,L)
    elif trainalg == 3:
        classifier = sc.Ffncg(Gs,ls,L)
    elif trainalg == 4:
        classifier = sc.Svm(Gs,ls)         
#  train it            
    print 'training on %i pixel vectors...' % np.shape(Gs)[0]
    start = time.time()
    result = classifier.train()
    print 'elapsed time %s' %str(time.time()-start) 
    if result:
        if (trainalg in [2,3]) and graphics:
            cost = np.log10(result)  
            ymax = np.max(cost)
            ymin = np.min(cost) 
            xmax = len(cost)      
            plt.plot(range(xmax),cost,'k')
            plt.axis([0,xmax,ymin-1,ymax])
            plt.title('Log(Cross entropy)')
            plt.xlabel('Epoch')   
            plt.show()
#      classify the image           
        print 'classifying...'
        start = time.time()
        tile = np.zeros((cols,N),dtype=np.float32)    
        for row in range(rows):
            for j in range(N):
                tile[:,j] = rasterBands[j].ReadAsArray(0,row,cols,1)
                tile[:,j] = 2*(tile[:,j]-minx[j])/(maxx[j]-minx[j]) - 1.0               
            cls, Ms = classifier.classify(tile)  
            outBand.WriteArray(np.reshape(cls,(1,cols)),0,row)
            if probfile:
                Ms = np.byte(Ms*255)
                for k in range(K):
                    probBands[k].WriteArray(np.reshape(Ms[k,:],(1,cols)),0,row)
        outBand.FlushCache()
        print 'elapsed time %s' %str(time.time()-start)
        outDataset = None
        inDataset = None      
        if probfile:
            for probBand in probBands:
                probBand.FlushCache() 
            probDataset = None
            print 'class probabilities written to: %s'%probfile   
        K =  ls.shape[1]+1                     
        print 'thematic map written to: %s'%outfile
    else:
        print 'an error occured' 
        return 
#  cross-validation
    start = time.time()
    rc = Client()   
    print 'submitting cross-validation to %i IPython engines'%len(rc)  
    m = np.shape(Gs)[0]
    traintest = []
    for i in range(10):
        sl = slice(i*m//10,(i+1)*m//10)
        traintest.append( (np.delete(Gs,sl,0),np.delete(ls,sl,0), \
                                     Gs[sl,:],ls[sl,:],L,trainalg) )
    v = rc[:]   
    v.execute('import auxil.supervisedclass as sc') 
    result = v.map(crossvalidate,traintest).get()   
    print 'parallel execution time: %s' %str(time.time()-start)      
    print 'misclassification rate: %f' %np.mean(result)
    print 'standard deviation:     %f' %np.std(result)         
                points.append(l)
            a = f2.readline().split(sep = '\t')

            points1 = list()
            a[0] = int(a[0])
            a[1] = int(a[1])
            a[2] = int(a[2])
            a[3] = int(a[3])

            points1.append([a[0], a[1]])
            points1.append([a[2],a[1]])
            points1.append([a[2], a[3]])
            points1.append([a[0], a[3]])
            polygon = MultiPoint(points).convex_hull
            polygon1 = MultiPoint(points1).convex_hull
            intersect = polygon.intersection(polygon1).area
            if polygon1.area>0:
                probability = intersect/polygon1.area
            else:
                probability = 0
            f1.write(str(probability)+'\n')
        else:
            src_pts = np.float32([x.pt for x in matched1]).reshape(-1, 1, 2)
            dst_pts = np.float32([x.pt for x in matched2]).reshape(-1, 1, 2)
            M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 6.0)
            matchesMask = mask.ravel().tolist()

            pts = np.float32([[167, 290], [167, 1016], [719, 1016], [719, 290]]).reshape(-1, 1, 2)
            img1 = cv.polylines(img1, [np.int32(pts)], True, 255, 3, cv.LINE_AA)
            dst = cv.perspectiveTransform(pts, M)