示例#1
0
def solve(f, R, t, r, g):
    # make the circle
    outerCircle = Point(0, 0).buffer(R, 1 << 12)
    # make the ring
    innerCircle = Point(0, 0).buffer(R - t - f, 1 << 12)
    # make the bars
    bars = []
    leftMin = -r - (2 * r + g) * (int(R / (2 * r + g)) + 1) 
    left = leftMin
    while left < R:
        bars.append(box(left, -R, left + 2 * r + 2 * f, R))
        left += 2 * r + g
    
    bottom = leftMin
    while bottom < R:
        bars.append(box(-R, bottom, R, bottom + 2 * r + 2 * f))
        bottom += 2 * r + g
        
    # get the union
    union = outerCircle.difference(innerCircle)
    for bar in bars:
        union = union.union(bar)
    # intersection with prev shape
    finalPattern = union.intersection(outerCircle)    
    # calc area ratio
    result = finalPattern.area / outerCircle.area
    return '%.6f' % result
示例#2
0
def solve(f, R, t, r, g):
    # make the circle
    outerCircle = Point(0, 0).buffer(R, 1 << 12)
    # make the ring
    innerCircle = Point(0, 0).buffer(R - t - f, 1 << 12)
    # make the bars
    bars = []
    leftMin = -r - (2 * r + g) * (int(R / (2 * r + g)) + 1)
    left = leftMin
    while left < R:
        bars.append(box(left, -R, left + 2 * r + 2 * f, R))
        left += 2 * r + g

    bottom = leftMin
    while bottom < R:
        bars.append(box(-R, bottom, R, bottom + 2 * r + 2 * f))
        bottom += 2 * r + g

    # get the union
    union = outerCircle.difference(innerCircle)
    for bar in bars:
        union = union.union(bar)
    # intersection with prev shape
    finalPattern = union.intersection(outerCircle)
    # calc area ratio
    result = finalPattern.area / outerCircle.area
    return '%.6f' % result
示例#3
0
def comparer():
    '''
    Create an image and compare the interpolation from different methods
    '''
    img_shape = (512, 512)
    frame_bounds = [-256, 256, -256, 256]
    my_sgeom = SGeom(box(-100, -50, 100, 50))
    strat = lscad.MountainStrategy()
    param_dict = {'hmax': 100., 'sigma': (0, 0)}
    img = fk_img.img_from_sgeom(img_shape,
                                frame_bounds,
                                my_sgeom,
                                z_map_strategy=strat,
                                param_dict=param_dict)
    x_s, y_s = img.arrf.sampling_vectors()
    r, c = img.xy_to_rc(100, 50)
    interp = interp2d(x_s, y_s, np.flipud(img.arr))
    bivar1 = RectBivariateSpline(x=x_s, y=y_s, z=np.flipud(img.arr))
    bivar2 = RectBivariateSpline(x=y_s, y=x_s, z=np.flipud(img.arr))
    print interp(100, 50)
    print img.arr[r, c]
    print bivar1(50, 100), bivar1(100, 50)
    print bivar2(50, 100), bivar2(100, 50)
    img.plot()
    plt.scatter(100, 50)
    plt.show()
def tile_to_bbox(zoom, x, y):
    """
    Transform a TMS/Slippy Map style tile protocol (z/x/y) to a web mercator
    bounding box.

    Ref:
        https://github.com/IzAndCuddles/gdal2tiles/blob/structure/gdal2tiles.py#L120-L146  # noqa

    Args:
        zoom (int): Zoom level for the tile
        x (int): x coordinate of tile origin
        y (int): y coordinate of tile origin

    Returns:
        A Shapely geometry in EPSG:3857 defining the bounding box of the requested
        tile
    """
    mapSize = 20037508.34789244 * 2
    origin_x = -20037508.34789244
    origin_y = 20037508.34789244
    size = mapSize / 2**zoom

    min_x = origin_x + x * size
    min_y = origin_y - (y + 1) * size
    max_x = origin_x + (x + 1) * size
    max_y = origin_y - y * size

    return box(min_x, min_y, max_x, max_y, ccw=False)
def subdivide_polygon(geom, factor):
    """
    Divide a geometry such that no piece is greater than the size of
    `factor`, in units of the coordinate system.

    Args:
        geom: GeoJson-like polygon to subdivide
        factor: The number of SRS units to divide the geom bounds by,
            to provide subgeometries who's extent does not exceed that size.
    Returns:
        List of GeoJson-like polygons that `geom` is composed of
    """
    bounds = np.asarray(geom.bounds)
    xmin, ymin, xmax, ymax = np.floor_divide(bounds, factor).astype(int)

    children = []
    for i in range(xmin, xmax + 1):
        for j in range(ymin, ymax + 1):
            sub_poly = box(i * factor, j * factor, (i + 1) * factor,
                           (j + 1) * factor)
            overlap_poly = geom.intersection(sub_poly)

            if not overlap_poly.is_empty:
                children.append(overlap_poly)

    return children
示例#6
0
文件: test_nc.py 项目: NCPP/ocgis
    def test_system_rotated_pole_spherical_subsetting(self):
        """Test rotated pole coordinates are left alone during a subset (no mask applied)."""

        def _run_mask_test_(target):
            for vn in ['rlat', 'rlon']:
                self.assertIsNone(target[vn].get_mask())

        rd = RequestDataset(metadata=self.fixture_rotated_spherical_metadata)
        field = rd.create_field()
        _run_mask_test_(field)
        for ctr, vn in enumerate(['lat', 'lon', 'rlat', 'rlon']):
            var = field[vn]
            var.get_value()[:] = np.arange(var.size).reshape(var.shape) + (ctr * 10)
        path = self.get_temporary_file_path('foo.nc')
        field.write(path)

        new_field = RequestDataset(path).create_field()
        _run_mask_test_(new_field)
        subset_geom = box(*new_field.grid.extent)
        subset_field = new_field.grid.get_intersects(subset_geom, optimized_bbox_subset=True).parent
        _run_mask_test_(subset_field)

        path2 = self.get_temporary_file_path('foo2.nc')
        subset_field.write(path2)
        in_subset_field = RequestDataset(path2).create_field()
        _run_mask_test_(in_subset_field)
示例#7
0
 def test_filtered_read_file_with_gdf_boundary(self):
     full_df_shape = self.df.shape
     nybb_filename = geopandas.datasets.get_path('nybb')
     bbox = geopandas.GeoDataFrame(geometry=[
         box(1031051.7879884212, 224272.49231459625, 1047224.3104931959,
             244317.30894023244)
     ],
                                   crs=self.crs)
     filtered_df = read_file(nybb_filename, bbox=bbox)
     filtered_df_shape = filtered_df.shape
     assert full_df_shape != filtered_df_shape
     assert filtered_df_shape == (2, 5)
示例#8
0
def create_rect():
    img_shape = (512, 512)
    frame_bounds = [-256, 256, -256, 256]
    my_sgeom = SGeom(box(-100, -50, 100, 50))
    strat = lscad.MountainStrategy()
    param_dict = {'hmax': 100., 'sigma': (0, 0)}
    img = fk_img.img_from_sgeom(img_shape,
                                frame_bounds,
                                my_sgeom,
                                z_map_strategy=strat,
                                param_dict=param_dict)
    img.plot()
    plt.show()
def create_RGInterp():
    img_shape = (512, 512)
    frame_bounds = [-256, 256, -256, 256]
    my_sgeom = SGeom(box(-100, -50, 100, 50))
    strat = lscad.MountainStrategy()
    param_dict = {'hmax': 100., 'sigma': (0, 0)}
    img = fk_img.img_from_sgeom(img_shape,
                                frame_bounds,
                                my_sgeom,
                                z_map_strategy=strat,
                                param_dict=param_dict)
    x, y = img.arrf.sampling_vectors()
    img_xy = interpolate.RegularGridInterpolator((x, y), np.flipud(img.arr))
    return img_xy
示例#10
0
def interp_array():
    img_shape = (512, 512)
    frame_bounds = [-256, 256, -256, 256]
    my_sgeom = SGeom(box(-100, -50, 100, 50))
    strat = lscad.MountainStrategy()
    param_dict = {'hmax': 100., 'sigma': (0, 0)}
    img = fk_img.img_from_sgeom(img_shape,
                                frame_bounds,
                                my_sgeom,
                                z_map_strategy=strat,
                                param_dict=param_dict)
    x_s, y_s = img.arrf.sampling_vectors()
    bivar = RectBivariateSpline(x=x_s, y=y_s, z=np.flipud(img.arr))
    x_n = np.linspace(0, 100, 10)
    y_n = np.linspace(100, 200, 10)
    print bivar(x_n, y_n, grid=False)
    print bivar(0, y_n)
示例#11
0
def regu_interp():
    # Comparison between RegularGridInterpolator and RectBivariateSpline
    img_shape = (512, 512)
    frame_bounds = [-256, 256, -256, 256]
    my_sgeom = SGeom(box(-100, -50, 100, 50))
    strat = lscad.MountainStrategy()
    param_dict = {'hmax': 100., 'sigma': (0, 0)}
    img = fk_img.img_from_sgeom(img_shape,
                                frame_bounds,
                                my_sgeom,
                                z_map_strategy=strat,
                                param_dict=param_dict)
    x_s, y_s = img.arrf.sampling_vectors()
    reg = RegularGridInterpolator((x_s, y_s), np.flipud(img.arr))
    bivar = RectBivariateSpline(x=x_s, y=y_s, z=np.flipud(img.arr))
    spl = initialize(40)
    pts = spl(np.linspace(0, 1, 10))
    print pts[0]
    print bivar(245, 244)
    print reg([245, 244])
示例#12
0
def create_2ob():
    img_shape = (512, 512)
    frame_bounds = [-256, 256, -256, 256]
    R = 25
    my_sgeom_1 = circle(R, -100, 0)
    my_sgeom_2 = SGeom(box(0, 0, 100, 50))
    my_geom = MultiPolygon([my_sgeom_1.geom, my_sgeom_2.geom])
    my_sgeom = SGeom(my_geom, label='circle_square')
    strat = lscad.MountainStrategy()
    param_dict = {'hmax': 100., 'sigma': (0, 0)}
    img = fk_img.img_from_sgeom(img_shape,
                                frame_bounds,
                                my_sgeom,
                                z_map_strategy=strat,
                                param_dict=param_dict)
    gcoll = GeomCollection()
    gcoll.set_geom(0, my_sgeom)
    imsh = ImShape(image=img, geom_coll=gcoll)
    imsh.plot()
    plt.show()
示例#13
0
def create_rect():
    '''
    Create a image in which the maximum pixels form a rectangular
    '''
    img_shape = (512, 512)
    frame_bounds = [-256, 256, -256, 256]
    my_sgeom = SGeom(box(-100, -50, 100, 50))
    strat = lscad.MountainStrategy()
    param_dict = {'hmax': 100., 'sigma': (0, 0)}
    img = fk_img.img_from_sgeom(img_shape,
                                frame_bounds,
                                my_sgeom,
                                z_map_strategy=strat,
                                param_dict=param_dict)
    x, y = img.arrf.sampling_vectors()
    img_xy = interpolate.RectBivariateSpline(x, y, np.flipud(img.arr))
    gcoll = GeomCollection()
    gcoll.set_geom(0, my_sgeom)
    imsh = ImShape(image=img, geom_coll=gcoll)

    return img_xy, imsh
示例#14
0
def create_2ob():
    '''
    Create a image in which the maximum pixels form 2 objects
    '''
    img_shape = (512, 512)
    frame_bounds = [-256, 256, -256, 256]
    R = 25
    my_sgeom_1 = circle(R, -100, 0)
    my_sgeom_2 = SGeom(box(0, 0, 100, 50))
    my_geom = MultiPolygon([my_sgeom_1.geom, my_sgeom_2.geom])
    my_sgeom = SGeom(my_geom, label='circle_square')
    strat = lscad.MountainStrategy()
    param_dict = {'hmax': 100., 'sigma': (0, 0)}
    img = fk_img.img_from_sgeom(img_shape,
                                frame_bounds,
                                my_sgeom,
                                z_map_strategy=strat,
                                param_dict=param_dict)
    x, y = img.arrf.sampling_vectors()
    img_xy = interpolate.RectBivariateSpline(x, y, np.flipud(img.arr))
    gcoll = GeomCollection()
    gcoll.set_geom(0, my_sgeom)
    imsh = ImShape(image=img, geom_coll=gcoll)
    return img_xy, imsh
示例#15
0
def process(vector_path, height, width, transform, NODATA=None):

    # Spatial index to efficiently find triangles
    p = index.Property()
    idx = index.Index(properties=p, interleaved=True)

    polys = []

    def add_triangle(p1, p2, p3):

        # create plane from points
        # from http://kitchingroup.cheme.cmu.edu/blog/2015/01/18/Equation-of-a-plane-through-three-points/

        # convert to numpy vectors
        p1 = np.array([p1[0], p1[1], p1[2]])
        p2 = np.array([p2[0], p2[1], p2[2]])
        p3 = np.array([p3[0], p3[1], p3[2]])

        v1 = p3 - p1
        v2 = p2 - p1

        cp = np.cross(v1, v2)
        a, b, c = cp
        d = np.dot(cp, p3)

        # create 2d poly
        poly = Polygon([(p1[0], p1[1]), (p2[0], p2[1]), (p3[0], p3[1])])

        if poly.area >= 0:
            pid = len(polys)
            polys.append(poly)

            idx.insert(pid, poly.bounds, obj=(poly, (a, b, c, d)))

    # Read vector data only for selected rectangle
    xmin, ymin = (0, 0) * transform
    xmax, ymax = (width, height) * transform

    ds = ogr.Open(vector_path)
    layer = ds.GetLayer(0)
    layer.SetSpatialFilterRect(xmin, ymin, xmax, ymax)

    for feature in layer:
        geom = feature.GetGeometryRef()

        if geom.GetGeometryType() == ogr.wkbTINZ:
            for triangle in geom:
                for ring in triangle:
                    assert ring.GetPointCount() == 4
                    assert ring.GetPoint(0) == ring.GetPoint(3)
                    add_triangle(p1=ring.GetPoint(0),
                                 p2=ring.GetPoint(1),
                                 p3=ring.GetPoint(2))

        elif geom.GetGeometryName() == "MULTIPOLYGON":
            for poly in geom:
                for ring in poly:
                    # Check assumptions: each polygon is a triangle
                    assert ring.GetPointCount() == 4
                    assert ring.GetPoint(0) == ring.GetPoint(3)
                    add_triangle(p1=ring.GetPoint(0),
                                 p2=ring.GetPoint(1),
                                 p3=ring.GetPoint(2))
        else:
            print("?", geom.GetGeometryName(),
                  feature.GetGeometryRef().ExportToWkt())

    # Interpolate

    res = np.zeros((width, height))

    for h in range(height):
        for w in range(width):

            x, y = (h + 0.5, w + 0.5) * transform

            _xmin, _ymin = (h, w) * transform
            _xmax, _ymax = (h + 1, w + 1) * transform
            _geom = box(_xmin, _ymin, _xmax, _ymax).centroid

            hits = idx.intersection(_geom.bounds, objects=True)

            z = float('-inf')

            # We select the highest z value for each triangle x,y intersects
            for hit in hits:
                if hit.object[0].intersects(_geom):

                    a, b, c, d = hit.object[1]
                    if not c == 0.0:
                        _z = (d - a * x - b * y) / c
                        z = max(_z, z)
                    else:
                        print(hits)

            if z == float('-inf'):
                z = NODATA
            res[w, h] = z

    return res
示例#16
0
def predict_patch(input_image,
                  model,
                  output_dir,
                  input_dir,
                  batch_size=2,
                  input_size=299,
                  threshold=0.5,
                  num_workers=1,
                  remove_tiles=False,
                  hist=False):
    """
    Patch prediction function. Outputs shapefiles for counts and locations.

    :param input_image: filename raster image from tile_raster
    :param model: pytorch model
    :param input_dir: directory with input tiles
    :param output_dir: output directory name
    :param batch_size: number of images per mini-batch
    :param input_size: size of input images
    :param threshold: threshold for occupancy
    :param num_workers: number of workers on dataloader
    :param remove_tiles: Remove the tiles folder from the filesystem.
    :return:
    """

    # crop and normalize images
    data_transforms = transforms.Compose([
        transforms.CenterCrop(input_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485], [0.229])
    ])

    # load dataset
    dataset = ImageFolderTest(input_dir, data_transforms)

    # separate into batches with dataloader
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             num_workers=num_workers)

    # check for GPU support
    use_gpu = torch.cuda.is_available()

    # store predictions and filenames
    predicted_cnts = []
    predicted_locs = []
    fnames = []

    # set training flag to False
    model.train(False)

    # keep track of running time
    since = time.time()
    sigmoid = torch.nn.Sigmoid()

    with torch.no_grad():

        for data in dataloader:

            # get the inputs
            inputs, filenames = data

            # gpu support
            if use_gpu:
                inputs = inputs.cuda()

            # do a forward pass to get predictions
            # detection models

            out_dict = model(inputs)
            # if statement prevents iterations over 0-d tensors
            if out_dict['count'].dim() != 0:
                locs = out_dict['heatmap'].cpu().detach()
                counts = out_dict['count']
                if 'occupancy' in out_dict:
                    counts = counts * torch.Tensor([
                        ele > threshold
                        for ele in sigmoid(out_dict['occupancy'])
                    ]).cuda()
                pred_cnt_batch = [
                    round(float(count.item())) for count in counts
                ]
                # find predicted location
                locs = [
                    get_xy_locs(loc, max(0, int(pred_cnt_batch[idx])))
                    for idx, loc in enumerate(locs.numpy())
                ]
                # save batch predictions
                predicted_cnts.extend(pred_cnt_batch)
                predicted_locs.extend(locs)
                # add filename
                fnames.extend(filenames)

    pred_locations = {'x': [], 'y': [], 'filenames': []}

    for idx, batch in enumerate(predicted_locs):
        for pnt in batch:
            pred_locations['x'].append(pnt[0])
            pred_locations['y'].append(pnt[1])
            pred_locations['filenames'].append(fnames[idx])
    pred_locations = pd.DataFrame(pred_locations)

    pred_counts = pd.DataFrame({
        'predictions': [ele for ele in predicted_cnts],
        'filenames': [ele for ele in fnames]
    })

    time_elapsed = time.time() - since
    print('Testing complete in %dh %dm %ds' %
          (time_elapsed // 3600, time_elapsed // 60, time_elapsed % 60))

    # save shapefile for counts / classes
    shapefile_path = '%s/predicted_shapefiles/' % output_dir
    os.makedirs(shapefile_path)

    # load affine matrix
    affine_matrix = rasterio.Affine(*[
        ele for ele in pd.read_csv('%s/affine_matrix.csv' %
                                   (input_dir))['transform']
    ])

    # create geopandas DataFrames to store counts per patch and seal locations
    output_shpfile = gpd.GeoDataFrame()

    # setup projection for output
    output_shpfile.crs = from_epsg(3031)

    if hist:
        # generate empty rows
        for idx, fname in enumerate(fnames):
            up, left, down, right = [
                int(ele) for ele in fname.split('_')[-5:-1]
            ]
            coords = [
                point * affine_matrix
                for point in [[down, left], [down, right], [up, left],
                              [up, right]]
            ]
            output_shpfile = output_shpfile.append(
                pd.Series(
                    {
                        'geometry':
                        box(minx=min([point[0] for point in coords]),
                            miny=min([point[1] for point in coords]),
                            maxx=max([point[0] for point in coords]),
                            maxy=max([point[1] for point in coords])),
                        'count':
                        0
                    },
                    name=fname))

        # add predicted counts
        for row in pred_counts.iterrows():
            fname = row[1]['filenames']
            output_shpfile.loc[fname, 'count'] += row[1]['predictions']
        output_shpfile.to_file(shapefile_path + 'prediction.shp'.format())
    print('Writing Prediction')
    if len(pred_locations) > 0:
        # create geopandas DataFrame to store classes and counts per patch
        output_shpfile_locs = gpd.GeoDataFrame()

        # setup projection for output
        output_shpfile_locs.crs = from_epsg(3031)

        # add locations
        for row in pred_locations.iterrows():
            fname = row[1]['filenames']
            up, left, down, right = [
                int(ele) for ele in fname.split('_')[-5:-1]
            ]
            x, y = [up + row[1]['y'], left + row[1]['x']]
            x_loc, y_loc = [x, y] * affine_matrix
            output_shpfile_locs = output_shpfile_locs.append(pd.Series({
                'geometry':
                Point(x_loc, y_loc),
                'x':
                x,
                'y':
                y
            }),
                                                             ignore_index=True)

        # add scene name
        output_shpfile_locs = output_shpfile_locs.join(
            pd.DataFrame({'scene': [input_image] * len(output_shpfile_locs)}))

        # save shapefile
        output_shpfile_locs.to_file(
            shapefile_path +
            'locations.shp'.format(os.path.basename(output_dir)))

        # remove tiles
        if remove_tiles:
            shutil.rmtree('{}/'.format(input_dir))

    print('Total predicted in %s: ' % os.path.basename(input_dir),
          sum(pred_counts['predictions']))
示例#17
0
def main():
    # unroll arguments
    args = parser.parse_args()
    input_image = args.input_image
    output_folder = args.dest_folder
    scales = [model_archs[args.class_architecture]['input_size']] * 3
    output_folder = './{}/tiles/images/'.format(output_folder)

    # check for pre-existing tiles and subties
    if os.path.exists('./{}/tiles'.format(args.dest_folder)):
        shutil.rmtree('./{}/tiles'.format(args.dest_folder))

    if os.path.exists('./{}/sub-tiles'.format(args.dest_folder)):
        shutil.rmtree('./{}/sub-tiles'.format(args.dest_folder))

    # tile raster into patches
    tile_raster(input_image, output_folder, scales)

    print('\nPredicting with {}:'.format(os.path.basename(args.dest_folder)))

    # create geopandas DataFrame for storing output
    output_shpfile = gpd.GeoDataFrame()
    # setup projection for output
    output_shpfile.crs = from_epsg(3031)

    # get affine matrix for raster file
    with rasterio.open(input_image) as src:
        affine_matrix = affine.Affine.from_gdal(*src.transform)

    # generate empty rows
    fnames = [
        ele
        for ele in os.listdir('./{}/tiles/images/'.format(args.dest_folder))
    ]
    for fname in fnames:
        up, left, down, right = [int(ele) for ele in fname.split('_')[-5:-1]]
        coords = [
            point * affine_matrix
            for point in [[down, left], [down, right], [up, left], [up, right]]
        ]
        output_shpfile = output_shpfile.append(
            pd.Series(
                {
                    'label':
                    'open-water',
                    'geometry':
                    box(minx=min([point[0] for point in coords]),
                        miny=min([point[1] for point in coords]),
                        maxx=max([point[0] for point in coords]),
                        maxy=max([point[1] for point in coords])),
                    'count':
                    0
                },
                name=fname))

    # find class names
    class_names = sorted([
        subdir for subdir in os.listdir('./training_sets/{}/training'.format(
            args.training_dir))
    ])

    # treat all patches as positive if skipping classification
    if args.skip_to_count == '1':
        pos_patches = fnames

    else:
        # check if patches were already classified with class architecture
        preds_path = './{}/ClassPredictions/{}/{}/class_predictions.csv'.format(
            args.dest_folder.split('/')[0], args.class_architecture,
            os.path.basename(args.input_image))
        if os.path.exists(preds_path):
            predictions = pd.read_csv(preds_path)

        else:
            # find patches with seals with classification CNN
            num_classes = training_sets[args.training_dir]['num_classes']
            model = model_defs['Pipeline1'][args.class_architecture](
                num_classes)

            use_gpu = torch.cuda.is_available()
            if use_gpu:
                model.cuda()
            model.eval()

            # load saved model weights from pt_train.py
            model.load_state_dict(
                torch.load("./saved_models_stable/Pipeline1/{}/{}.tar".format(
                    args.class_architecture, args.class_architecture)))
            # classify patches
            predictions = predict_patch(
                model=model,
                input_size=model_archs[args.class_architecture]['input_size'],
                pipeline='Pipeline1',
                batch_size=hyperparameters[
                    args.hyperparameter_set_class]['batch_size_test'],
                test_dir='./' + args.dest_folder,
                out_file='{}_class'.format(os.path.basename(input_image)[:-4]),
                dest_folder='./' + args.dest_folder,
                num_workers=hyperparameters[
                    args.hyperparameter_set_class]['num_workers_train'],
                class_names=class_names)

            os.makedirs('/'.join(preds_path.split('/')[:-1]))
            predictions.to_csv(preds_path)

        # add entries for predictions in GeoDataFrame
        for row in predictions.iterrows():
            fname = row[1]['filenames']
            output_shpfile.loc[fname, 'label'] = row[1]['predictions']

        # get the subset of positive patches filenames
        positive_classes = args.pos_classes.split('_')
        pos_patches = predictions.loc[
            predictions['predictions'].isin(positive_classes), 'filenames']

    if not os.path.exists('./{}/sub-tiles/images'.format(args.dest_folder)):
        os.makedirs('./{}/sub-tiles/images'.format(args.dest_folder))

    # loop over positive patches creating subpatches
    for fname in pos_patches:
        subpatches = get_subpatches(
            patch=np.array(
                Image.open('./{}/tiles/images/{}'.format(
                    args.dest_folder, fname))),
            count_size=model_archs[args.count_architecture]['input_size'])
        for idx, patch in enumerate(subpatches):
            if np.min(patch) > 200 or np.max(patch) < 55:
                continue
            cv2.imwrite(
                './{}/sub-tiles/images/{}-{}'.format(args.dest_folder, idx,
                                                     fname), patch)

    # remove tiles to count only sub-tiles
    shutil.rmtree('./{}/tiles'.format(args.dest_folder))

    # count inside subpatches with counting CNN
    model = model_defs['Pipeline1.1'][args.count_architecture]

    use_gpu = torch.cuda.is_available()
    if use_gpu:
        model.cuda()
    model.eval()

    # load saved model weights from pt_train.py
    model.load_state_dict(
        torch.load("./saved_models_stable/Pipeline1.1/{}/{}.tar".format(
            args.count_architecture, args.count_architecture)))

    counts = predict_patch(
        model=model,
        input_size=model_archs[args.count_architecture]['input_size'],
        pipeline='Pipeline1.1',
        batch_size=hyperparameters[
            args.hyperparameter_set_count]['batch_size_test'],
        test_dir='./' + args.dest_folder,
        out_file='{}_count'.format(os.path.basename(input_image)[:-4]),
        dest_folder='./' + args.dest_folder,
        num_workers=hyperparameters[
            args.hyperparameter_set_count]['num_workers_train'],
        class_names=class_names)
    print('    Total predicted in {}: '.format(os.path.basename(input_image)),
          sum(counts['predictions']))

    for row in counts.iterrows():
        fname = row[1]['filenames'].split('-')[1]
        output_shpfile.loc[fname, 'count'] += row[1]['predictions']

    # save shapefile
    shapefile_path = './{}/predicted_shapefiles/{}/'.format(
        args.dest_folder,
        os.path.basename(input_image)[:-4])
    os.makedirs(shapefile_path)
    output_shpfile.to_file(
        shapefile_path +
        '{}_prediction.shp'.format(os.path.basename(args.dest_folder)))
示例#18
0
def main(input_df: gpd.GeoDataFrame, column=None, debug=False):
    if input_df.crs is None:  # assume 4326, WGS
        input_df.crs = crs.WGS

    original_crs = input_df.crs
    input_df = input_df.to_crs(crs.GOOGLE)
    input_df['x'] = input_df.geometry.apply(lambda g: g.x)
    input_df['y'] = input_df.geometry.apply(lambda g: g.y)
    input_df = input_df[input_df.x.notnull() & input_df.y.notnull()].copy(
    ).reset_index()  # there are files with x/y == nan

    points = input_df[['x', 'y']].as_matrix()
    group_values = input_df[
        column] if column is not None else input_df.index.tolist()

    vor = scipy.spatial.Voronoi(points)
    b = input_df.total_bounds
    box_width = min(b[2] - b[0], b[3] - b[1])

    polylines = []
    ids = []
    center = points.mean(axis=0)
    for pointidx1, simplex in tqdm(zip(vor.ridge_points, vor.ridge_vertices),
                                   desc='Processing Voronoi diagram'):
        pointidx = pointidx1.tolist()
        categories = [group_values[pointidx[0]], group_values[pointidx[1]]]

        if categories[0] != categories[1]:
            if -1 in simplex:
                smp = np.asarray(simplex)
                i = smp[smp >= 0][0]
                t = points[pointidx[1]] - points[pointidx[0]]  # tangent
                t = t / np.linalg.norm(t)

                n = np.array([-t[1], t[0]])
                start = vor.vertices[i]
                if np.linalg.norm(start - center) > 1000000:
                    continue

                far_point = vor.vertices[i] + np.sign(np.dot(
                    start - center, n)) * n * 100000
                n2 = np.linalg.norm(far_point - start)
                if n2 > 1000000.0:
                    far_point = start + (far_point - start) / n2 * 100000

                vertices = [start, far_point]

            else:
                vertices = [vor.vertices[simplex[0]], vor.vertices[simplex[1]]]

            ids.append(categories)
            polylines.append(LineString(vertices))

    bbox = geo.box(*input_df.total_bounds).buffer(box_width * .1,
                                                  0,
                                                  join_style=2)

    if debug:
        write(gpd.GeoDataFrame({'geometry': polylines}),
              '/tmp/debug-polylines.csv')

    polylines.append(bbox.boundary)
    polygons = list(polygonize(unary_union(polylines)))
    result = gpd.GeoDataFrame({'geometry': polygons}, crs=crs.GOOGLE)
    categories = []

    subset = ['geometry']
    if column is not None:
        subset.append(column)

    joined = gpd.sjoin(result, input_df[subset]).drop('index_right', axis=1)

    if column is not None:
        joined = joined.dissolve(column).reset_index()

    return joined.to_crs(original_crs)
示例#19
0
def get_stops_within_bounds(bounds):
    # Returns a FeatureCollection with all public transport stops
    bbox = geo.box(bounds['minx'], bounds['miny'], bounds['maxx'],
                   bounds['maxy'])
    return db.session.query(PublicTransport).filter(
        func.ST_Within(PublicTransport.geometry, dumps(bbox))).all()
示例#20
0
def box_intersects(one, two):
    """Return true if two boxes (as vectors of xmin, ymin, xmax, ymax) intersect."""
    return geo.box(*one).intersects(geo.box(*two))
示例#21
0
 def get_world_extent_native(self):  # type: (...) -> Polygon
     lon_ul, lat_ul = self._pixel_x_y_alt_to_lon_lat_native(0, 0)
     lon_br, lat_br = self._pixel_x_y_alt_to_lon_lat_native(
         self._npix_x, self._npix_y)
     world_poly = box(lon_ul, lat_br, lon_br, lat_ul)
     return world_poly
def create_patch_overlap_coord():
    '''This part of the code creates the path image with overlapping coordinates'''
    # img = openslide.OpenSlide(arg.svs_file)
    # img_dim = img.level_dimensions[0]
    # patch_size=10000
    # """
    # Determine what the patch size should be, and how many iterations it will take to get through the WSI
    # """
    # #num_x_patches = int(math.floor(img_dim[0]/patch_size))
    # #num_y_patches = int(math.floor(img_dim[1]/patch_size))
    # #print(str(num_x_patches)+" "+str(num_y_patches))
    # print(len(regions))
    # coords1=regions[8]
    # min=np.argmin(coords1, axis = 0)
    # #print(coords1[min[0]][0])
    # #print(coords1[min[1]][1])
    # #coords2=np.sort(coords1, axis = 0)
    # #print(coords2)
    # #sys.exit(1)
    # x1=int(coords1[min[0]][0])-10
    # y1=int(coords1[min[1]][1])-10
    # level=0
    # img_data = img.read_region((x1,y1),level, (patch_size, patch_size))
    # img_data_np = np.array(img_data)
    # img_name="sample.png"
    # im = Image.fromarray(img_data_np)
    # im.save(img_name)

    # # Create figure and axes
    # #fig,ax = plt.subplots(1)
    # fig,ax = plt.subplots()
    # # Display the image
    # ax.imshow(img_data_np)
    # #imgdata = plt.imread("sample.png")
    # #ax.imshow(imgdata)

    # # Add the patch to the Axes
    # #points = [[2, 1], [8, 1], [8, 4],[50,50],[100,100],[200,200]]
    # #plt.Polygon(points)
    # #poly = Polygon(verts, facecolor='0.9', edgecolor='0.5')
    # coords1_list=[]
    # for i in range(0,len(coords1)):
    # x_cor=int(coords1[i][0])-x1
    # y_cor=int(coords1[i][1])-y1
    # if x_cor >=0 and x_cor <= patch_size and y_cor >=0 and y_cor <=patch_size:
    # tmp=(x_cor,y_cor)
    # coords1_list.append(tmp)
    # #coords1_list.append((100,300))
    # #coords1_list.append((200,200))
    # #coords1_list.append((400,200))
    # #coords1_list.append((500,300))
    # #coords1_list.append((400,400))
    # #coords1_list.append((200,400))
    # print(coords1_list)
    # polygon = Polygon(coords1_list)
    # patch = PolygonPatch(polygon, facecolor=[0,0,0], edgecolor=[0,0.5,0], alpha=0.7, zorder=2)
    # ax.add_patch(patch)
    # plt.savefig('sample_overlay.png', alpha=True, dpi=300)
    # plt.show()

    patch_size = 2000
    OSobj = openslide.OpenSlide(arg.svs_file)
    x1 = 40000
    y1 = 37000
    img_patch = OSobj.read_region((x1, y1), 0, (patch_size, patch_size))
    img_data_np = np.array(img_patch)
    #img_name="sample3.png"
    #im = Image.fromarray(img_data_np)
    #im.save(img_name)

    # coords1_list=[]
    # # #for j in range(0,len(regions)):
    # for j in range(0,1):
    # coords1=regions[j]
    # for i in range(0,len(coords1)):
    # x_cor=int(coords1[i][0])-x1
    # y_cor=int(coords1[i][1])-y1
    # # # #print(str(x_cor)+" "+str(y_cor))
    # if x_cor >=0 and x_cor <= patch_size and y_cor >=0 and y_cor <=patch_size:
    # tmp=(x_cor,y_cor)
    # coords1_list.append(tmp)
    # # # print(coords1_list)
    # polygon = Polygon(coords1_list)
    # # print(polygon)
    # # # # Create figure and axes
    # fig,ax = plt.subplots()
    # # # # Display the image
    # ax.imshow(img_data_np)

    # # # # Add the patch to the Axes
    # patch = PolygonPatch(polygon, facecolor=[0,0,0], edgecolor=[0,0.5,0], alpha=0.7, zorder=2)
    # ax.add_patch(patch)
    # plt.savefig('sample_overlay.png', alpha=True, dpi=300)
    # plt.show()

    # #ploting Steve's way
    # #plt.style.use('dark_background')
    # f, ax = plt.subplots(frameon=False)
    # #f.set_facecolor('#eafff5')
    # #ax.set_facecolor('#eafff5')
    # f.tight_layout(pad=0, h_pad=0, w_pad=0)
    # ax.set_xlim(0, patch_size)
    # ax.set_ylim(0, patch_size)
    # #img_data_np[np.where((img_data_np != [0,0,0]).all(axis = 2))] = [0,0,0]
    # #ax.imshow(img_data_np)
    # #mask1 = np.zeros(img_data_np.shape, dtype = "uint8")
    # #mask1.fill(0)
    # mask1 = Image.new('RGBA', (patch_size, patch_size), "black")
    # ax.imshow(mask1)
    # #ax.set_axis_bgcolor("black")
    # #patch = PolygonPatch(polygon, facecolor=[0,0,0], edgecolor=[0,0.5,0], alpha=0.7, zorder=2)
    # #patch = PolygonPatch(polygon, facecolor='#FFFFFF', edgecolor='#FFFFFF', alpha=0.7, zorder=2)
    # patch = PolygonPatch(polygon, facecolor='white')
    # ax.add_patch(patch)
    # ax.set_axis_off()
    # DPI = f.get_dpi()
    # plt.subplots_adjust(left=0, bottom=0, right=1, top=1,wspace=0, hspace=0)
    # f.set_size_inches(patch_size / DPI, patch_size / DPI)
    # f.savefig("Mask_tmp.png", pad_inches='tight')

    #create a binary mask
    #mask = np.zeros(img_data_np, dtype = "uint8")
    #cv2.rectangle(mask, (x1, y1), (x1+patch_size, y1+patch_size), (255, 255, 255), -1)

    #print(img_data_np)
    # flag = np.array([37500,38000, 0,0])
    # #flag = np.array([36000,27000])
    # #img_data_np=img_data_np+flag
    # #mask = np.zeros((700,700))
    # #print(regions[0])
    # a=finalcoords
    # a=np.int64(a)-flag
    # a = a[~np.all(a < 0, axis=1)]
    # a = a[~np.all(a > 10000, axis=1)]
    # #print("sucess")
    # #a2=a1[np.logical_and(a1>=0,a1<700)]
    # print(a)
    # sys.exit(1)
    # poly = Polygon(regions[0])
    # #img = Image.new('L', (700,700), 0)
    # #ImageDraw.Draw(img).polygon(poly, outline=1, fill=1)
    # mask = np.array((700,700))
    # # Create vertex coordinates for each grid cell...
    # # (<0,0> is at the top left of the grid in this system)
    # #x, y = np.meshgrid(np.arange(700), np.arange(700))
    # #x, y = x.flatten(), y.flatten()
    # #points = np.vstack((x,y)).T
    # #grid = points_inside_poly(points, poly)
    # #grid = grid.reshape((ny,nx))
    # print(poly)
    # cv2.fillPoly(mask, poly, 1)
    # mask = mask.astype(bool)
    # plt.imshow(mask)
    #print(Polygon(img_data_np).contains(poly_regions[0]))
    #xmax, ymax = a.max(axis=0)
    #print(img_data_np.shape)
    #a = Polygon(np.array([(0, 0), (1, 1), (1,2), (2,2)]))
    #b = Polygon(np.array([(0, 0), (1, 1), (2,1), (2,2)]))
    # patch_list=[]
    # x_n=300
    # y_n=300
    # size=x_n+y_n+x_n-2+y_n-2
    # list1=[]
    # n=0
    # for x in range(0,x_n):
    # tlist=[0+39000,x+34000]
    # list1.append(tlist)
    # for x in range(0,x_n):
    # tlist=[x_n-1+39000,x+34000]
    # list1.append(tlist)
    # for x in range(1,y_n-1):
    # tlist=[x+39000,0+34000]
    # list1.append(tlist)
    # for x in range(1,y_n-1):
    # tlist=[x+39000,y_n-1+34000]
    # list1.append(tlist)

    path_poly = geo.box(x1, y1, x1 + patch_size, y1 + patch_size, ccw=True)
    # # path_poly=Polygon(np.array(list1))
    # # #path_poly=Polygon(np.array([[0, 0], [1, 0], [1, 1], [0, 1]]))
    # # print(path_poly)
    # # print(path_poly.is_valid)
    # # print(path_poly.length)
    # #print(poly_regions[0].is_valid)
    #print(len(poly_regions))
    poly_all = []
    for j in range(0, len(poly_regions)):
        #print(j)
        poly1 = path_poly.intersection(poly_regions[j])
        poly1_temp = []
        if poly1.length > 0:
            for x, y in poly1.exterior.coords:
                x = int(x) - x1
                y = int(y) - y1
                tmp = (x, y)
                poly1_temp.append(tmp)
            poly1 = Polygon(poly1_temp)
            poly_all.append(poly1)
    #multi_poly = MultiPolygon(poly_all)
    #print(len(poly_all))
    #sys.exit(1)
    #ploting Steve's way
    #plt.style.use('dark_background')
    f, ax = plt.subplots(frameon=False)
    #ax.set_facecolor('#eafff5')
    f.tight_layout(pad=0, h_pad=0, w_pad=0)
    ax.set_xlim(0, patch_size)
    ax.set_ylim(0, patch_size)
    mask1 = Image.new('RGBA', (patch_size, patch_size), "black")
    ax.imshow(mask1)
    #patch1 = PolygonPatch(poly1, facecolor="white", edgecolor="white", alpha=0.7, zorder=2)
    for j in range(0, len(poly_all)):
        patch1 = PolygonPatch(poly_all[j], facecolor="white")
        ax.add_patch(patch1)

    ax.set_axis_off()
    DPI = f.get_dpi()
    plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
    f.set_size_inches(patch_size / DPI, patch_size / DPI)
    f.savefig("Mask_tmp1.png", pad_inches='tight')