예제 #1
0
 def test_bad_layer(self):
     arcgis.gis.GIS = Mock()
     gis = arcgis.gis.GIS()
     gis.content.get.return_value = None
     assert not to_agol.agol_arg_check('test', 'test', 'test')
예제 #2
0
def main():

    t0 = timeit.default_timer()

    # Determine what, if any, items we are pushing to AGOL
    agol_push = to_agol.agol_arg_check(args.agol_user, args.agol_password,
                                       args.agol_feature_service)

    make_staging_structure(args.staging_directory)
    make_output_structure(args.output_directory)

    logger.info('Retrieving files...')
    pre_files = get_files(args.pre_directory)
    logger.debug(
        f'Retrieved {len(pre_files)} pre files from {args.pre_directory}')
    post_files = get_files(args.post_directory)
    logger.debug(
        f'Retrieved {len(post_files)} pre files from {args.post_directory}')

    logger.info('Re-projecting...')

    # Run reprojection in parallel processes
    manager = mp.Manager()
    return_dict = manager.dict()
    jobs = []

    # Some data hacking to make it more efficient for multiprocessing
    pre_files = [("pre", args.pre_crs, x) for x in pre_files]
    post_files = [("post", args.post_crs, x) for x in post_files]
    files = pre_files + post_files

    # Launch multiprocessing jobs for reprojection
    for idx, f in enumerate(files):
        p = mp.Process(target=reproject_helper,
                       args=(args, f, idx, return_dict))
        jobs.append(p)
        p.start()
    for proc in jobs:
        proc.join()

    reproj = [x for x in return_dict.values() if x[1] is not None]
    pre_reproj = [x[1] for x in reproj if x[0] == "pre"]
    post_reproj = [x[1] for x in reproj if x[0] == "post"]

    logger.info("Creating pre mosaic...")
    pre_mosaic = raster_processing.create_mosaic(
        pre_reproj, Path(f"{args.output_directory}/mosaics/pre.tif"))
    logger.info("Creating post mosaic...")
    post_mosaic = raster_processing.create_mosaic(
        post_reproj, Path(f"{args.output_directory}/mosaics/post.tif"))

    extent = raster_processing.get_intersect(pre_mosaic, post_mosaic)

    logger.info('Chipping...')
    # Todo: fix the use of logging with tqdm (doc pages for loguru)
    pre_chips = raster_processing.create_chips(
        pre_mosaic,
        args.output_directory.joinpath('chips').joinpath('pre'), extent)
    logger.debug(f'Num pre chips: {len(pre_chips)}')
    post_chips = raster_processing.create_chips(
        post_mosaic,
        args.output_directory.joinpath('chips').joinpath('post'), extent)
    logger.debug(f'Num post chips: {len(post_chips)}')

    assert len(pre_chips) == len(post_chips), logger.error(
        'Chip numbers mismatch')
    # debug

    # Defining dataset and dataloader
    pairs = []
    for idx, (pre, post) in enumerate(zip(pre_chips, post_chips)):
        if not check_data([pre, post]):
            continue

        pairs.append(
            Files(pre.stem, args.pre_directory, args.post_directory,
                  args.output_directory, pre, post))

    eval_loc_dataset = XViewDataset(pairs, 'loc')
    eval_loc_dataloader = DataLoader(eval_loc_dataset,
                                     batch_size=args.batch_size,
                                     num_workers=args.num_workers,
                                     shuffle=False,
                                     pin_memory=True)

    eval_cls_dataset = XViewDataset(pairs, 'cls')
    eval_cls_dataloader = DataLoader(eval_cls_dataset,
                                     batch_size=args.batch_size,
                                     num_workers=args.num_workers,
                                     shuffle=False,
                                     pin_memory=True)

    if args.dp_mode:
        results_dict = {}

        for sz in ['34', '50', '92', '154']:
            logger.info(f'Running models of size {sz}...')
            return_dict = {}
            loc_wrapper = XViewFirstPlaceLocModel(sz, dp_mode=args.dp_mode)

            run_inference(eval_loc_dataloader, loc_wrapper,
                          args.save_intermediates, 'loc', return_dict)

            del loc_wrapper

            cls_wrapper = XViewFirstPlaceClsModel(sz, dp_mode=args.dp_mode)

            run_inference(eval_cls_dataloader, cls_wrapper,
                          args.save_intermediates, 'cls', return_dict)

            del cls_wrapper

            results_dict.update({k: v for k, v in return_dict.items()})

    elif torch.cuda.device_count() == 2:
        # For 2-GPU machines [TESTED]

        # Loading model
        loc_gpus = {
            '34': [0, 0, 0],
            '50': [1, 1, 1],
            '92': [0, 0, 0],
            '154': [1, 1, 1]
        }

        cls_gpus = {
            '34': [1, 1, 1],
            '50': [0, 0, 0],
            '92': [1, 1, 1],
            '154': [0, 0, 0]
        }

        results_dict = {}

        # Running inference
        logger.info('Running inference...')

        for sz in loc_gpus.keys():
            logger.info(f'Running models of size {sz}...')
            loc_wrapper = XViewFirstPlaceLocModel(sz, devices=loc_gpus[sz])
            cls_wrapper = XViewFirstPlaceClsModel(sz, devices=cls_gpus[sz])

            # Running inference
            logger.info('Running inference...')

            # Run inference in parallel processes
            manager = mp.Manager()
            return_dict = manager.dict()
            jobs = []

            # Launch multiprocessing jobs for different pytorch jobs
            p1 = mp.Process(target=run_inference,
                            args=(eval_cls_dataloader, cls_wrapper,
                                  args.save_intermediates, 'cls', return_dict))
            p2 = mp.Process(target=run_inference,
                            args=(eval_loc_dataloader, loc_wrapper,
                                  args.save_intermediates, 'loc', return_dict))
            p1.start()
            p2.start()
            jobs.append(p1)
            jobs.append(p2)
            for proc in jobs:
                proc.join()

            results_dict.update({k: v for k, v in return_dict.items()})

    elif torch.cuda.device_count() == 8:
        # For 8-GPU machines
        # TODO: Test!

        # Loading model
        loc_gpus = {
            '34': [0, 0, 0],
            '50': [1, 1, 1],
            '92': [2, 2, 2],
            '154': [3, 3, 3]
        }

        cls_gpus = {
            '34': [4, 4, 4],
            '50': [5, 5, 5],
            '92': [6, 6, 6],
            '154': [7, 7, 7]
        }

        results_dict = {}
        # Run inference in parallel processes
        manager = mp.Manager()
        return_dict = manager.dict()
        jobs = []

        for sz in loc_gpus.keys():
            logger.info(f'Adding jobs for size {sz}...')
            loc_wrapper = XViewFirstPlaceLocModel(sz, devices=loc_gpus[sz])
            cls_wrapper = XViewFirstPlaceClsModel(sz, devices=cls_gpus[sz])

            # DEBUG
            #run_inference(eval_loc_dataloader,
            #                    loc_wrapper,
            #                    True, # Don't write intermediate outputs
            #                    'loc',
            #                    return_dict)

            #import ipdb; ipdb.set_trace()

            # Launch multiprocessing jobs for different pytorch jobs
            jobs.append(
                mp.Process(
                    target=run_inference,
                    args=(
                        eval_cls_dataloader,
                        cls_wrapper,
                        args.
                        save_intermediates,  # Don't write intermediate outputs
                        'cls',
                        return_dict)))
            jobs.append(
                mp.Process(
                    target=run_inference,
                    args=(
                        eval_loc_dataloader,
                        loc_wrapper,
                        args.
                        save_intermediates,  # Don't write intermediate outputs
                        'loc',
                        return_dict)))

        logger.info('Running inference...')

        for proc in jobs:
            proc.start()
        for proc in jobs:
            proc.join()

        results_dict.update({k: v for k, v in return_dict.items()})

    else:
        raise ValueError('Must use either 2 or 8 GPUs')

    # Quick check to make sure the samples in cls and loc are in teh same orer
    #assert(results_dict['34loc'][4]['in_pre_path'] == results_dict['34cls'][4]['in_pre_path'])

    results_list = [{k: v[i]
                     for k, v in results_dict.items()}
                    for i in range(len(results_dict['34cls']))]

    # Running postprocessing
    p = mp.Pool(args.n_procs)
    #postprocess_and_write(results_list[0])
    f_p = postprocess_and_write
    p.map(f_p, results_list)

    logger.info("Creating overlay mosaic")
    p = Path(args.output_directory) / "over"
    overlay_files = get_files(p)
    overlay_files = [x for x in overlay_files]
    overlay_mosaic = raster_processing.create_mosaic(
        overlay_files, Path(f"{args.output_directory}/mosaics/overlay.tif"))

    # Get files for creating shapefile and/or pushing to AGOL
    dmg_files = get_files(Path(args.output_directory) / 'dmg')
    polygons = features.create_polys(dmg_files)
    logger.debug(f'Polygons created: {len(polygons)}')

    # Create shapefile
    logger.info('Creating shapefile')
    to_shapefile.create_shapefile(
        polygons,
        Path(args.output_directory).joinpath('shapes') / 'damage.shp',
        args.destination_crs)

    if agol_push:
        to_agol.agol_helper(args, polygons)

    # Complete
    elapsed = timeit.default_timer() - t0
    logger.success(f'Run complete in {elapsed / 60:.3f} min')
예제 #3
0
 def test_bad_creds(self):
     arcgis.gis.GIS = Mock(side_effect=Exception)
     with pytest.raises(Exception):
         assert to_agol.agol_arg_check('test', 'test', 'test')
         assert not to_agol.agol_arg_check('test', 'test', 'test')
예제 #4
0
 def test_good_gis(self):
     # Mock a GIS object...this will also mock the gis.content.get method
     arcgis.gis.GIS = Mock()
     assert to_agol.agol_arg_check('test', 'test', 'test')
예제 #5
0
 def test_no_fs(self):
     assert not to_agol.agol_arg_check('test', 'test', None)
예제 #6
0
 def test_no_user(self):
     assert not to_agol.agol_arg_check(None, 'test', 'test')
예제 #7
0
 def test_no_params(self):
     assert not to_agol.agol_arg_check(None, None, None)