def main():
    config = utils.get_config_params("config.ini")
    version = config.get("es", "version")

    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_sauid_hexgrid_unclipped_{}".format(version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
                    FROM boundaries."SAUID_HexGrid_unclipped" \
                    ORDER BY "SAUID_HexGrid_unclipped"."sauid" \
                    LIMIT {limit} \
                    OFFSET {offset}',
    )

    table.postgis2es()

    return
예제 #2
0
def main():
    config = utils.get_config_params("config.ini")
    version = config.get("es", "version")

    # args = parse_args()
    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "coordinates": {
                            "type": "geo_point"
                        },
                        "geometry": {
                            "type": "geo_shape"
                        },
                    }
                },
            }),
        view="opendrr_psra_uhs_{}".format(version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
                FROM results_psra_national.psra_uhs \
                ORDER BY psra_uhs."geom" \
                LIMIT {limit} \
                OFFSET {offset}',
    )

    table.postgis2es()

    return
def main():

    config = utils.get_config_params("config.ini")
    version = config.get("es", "version")

    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                "mappings": {"properties": {"geometry": {"type": "geo_shape"}}},
            }
        ),
        view="opendrr_shakemap_scenario_extents_{}".format(version),
        sqlquerystring="SELECT *, ST_AsGeoJSON(geom) \
                    FROM gmf.shakemap_scenario_extents \
                    LIMIT {limit} \
                    OFFSET {offset}",
    )
    dsraTable.postgis2es()
    return
예제 #4
0
def main():

    config = utils.get_config_params("config.ini")
    version = config.get("es", "version")

    # sauid level aggregation
    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_nhsl_physical_exposure_indicators_s_{}".format(version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom_poly) \
            FROM \
            results_nhsl_physical_exposure.nhsl_physical_exposure_indicators_s \
            ORDER BY nhsl_physical_exposure_indicators_s."Sauid" \
            LIMIT {limit} \
            OFFSET {offset}',
    )
    table.postgis2es()

    # building level aggregation
    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "coordinates": {
                            "type": "geo_point"
                        },
                        "geometry": {
                            "type": "geo_shape"
                        },
                    }
                },
            }),
        view="opendrr_nhsl_physical_exposure_indicators_b_{}".format(version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom_point) \
            FROM \
            results_nhsl_physical_exposure.nhsl_physical_exposure_indicators_b \
            ORDER BY nhsl_physical_exposure_indicators_b."BldgID" \
            LIMIT {limit} \
            OFFSET {offset}',
    )
    table.postgis2es()

    # hexgrid 1km aggregation
    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_nhsl_physical_exposure_indicators_hexgrid_1km_{}".format(
            version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM \
            results_nhsl_physical_exposure.nhsl_physical_exposure_indicators_hexgrid_1km \
            ORDER BY nhsl_physical_exposure_indicators_hexgrid_1km."gridid_1" \
            LIMIT {limit} \
            OFFSET {offset}',
    )
    table.postgis2es()

    # hexgrid 1km unclipped aggregation
    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_nhsl_physical_exposure_indicators_hexgrid_1km_uc_{}".
        format(version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM \
            results_nhsl_physical_exposure.nhsl_physical_exposure_indicators_hexgrid_1km_uc \
            ORDER BY nhsl_physical_exposure_indicators_hexgrid_1km_uc."gridid_1" \
            LIMIT {limit} \
            OFFSET {offset}',
    )
    table.postgis2es()

    # hexgrid 5km aggregation
    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_nhsl_physical_exposure_indicators_hexgrid_5km_{}".format(
            version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM \
            results_nhsl_physical_exposure.nhsl_physical_exposure_indicators_hexgrid_5km \
            ORDER BY nhsl_physical_exposure_indicators_hexgrid_5km."gridid_5" \
            LIMIT {limit} \
            OFFSET {offset}',
    )
    table.postgis2es()

    # hexgrid 5km unclipped aggregation
    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_nhsl_physical_exposure_indicators_hexgrid_5km_uc_{}".
        format(version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM \
            results_nhsl_physical_exposure.nhsl_physical_exposure_indicators_hexgrid_5km_uc \
            ORDER BY nhsl_physical_exposure_indicators_hexgrid_5km_uc."gridid_5" \
            LIMIT {limit} \
            OFFSET {offset}',
    )
    table.postgis2es()

    # hexgrid 10km aggregation
    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_nhsl_physical_exposure_indicators_hexgrid_10km_{}".
        format(version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM \
            results_nhsl_physical_exposure.nhsl_physical_exposure_indicators_hexgrid_10km \
            ORDER BY nhsl_physical_exposure_indicators_hexgrid_10km."gridid_10" \
            LIMIT {limit} \
            OFFSET {offset}',
    )
    table.postgis2es()

    # hexgrid 10km unclipped aggregation
    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_nhsl_physical_exposure_indicators_hexgrid_10km_uc_{}".
        format(version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM \
            results_nhsl_physical_exposure.nhsl_physical_exposure_indicators_hexgrid_10km_uc \
            ORDER BY nhsl_physical_exposure_indicators_hexgrid_10km_uc."gridid_10" \
            LIMIT {limit} \
            OFFSET {offset}',
    )
    table.postgis2es()

    # hexgrid 25km aggregation
    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_nhsl_physical_exposure_indicators_hexgrid_25km_{}".
        format(version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM \
            results_nhsl_physical_exposure.nhsl_physical_exposure_indicators_hexgrid_25km \
            ORDER BY nhsl_physical_exposure_indicators_hexgrid_25km."gridid_25" \
            LIMIT {limit} \
            OFFSET {offset}',
    )
    table.postgis2es()

    # hexgrid 25km  unclipped aggregation
    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_nhsl_physical_exposure_indicators_hexgrid_25km_uc_{}".
        format(version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM \
            results_nhsl_physical_exposure.nhsl_physical_exposure_indicators_hexgrid_25km_uc \
            ORDER BY nhsl_physical_exposure_indicators_hexgrid_25km_uc."gridid_25" \
            LIMIT {limit} \
            OFFSET {offset}',
    )
    table.postgis2es()

    # hexgrid 50km unclipped aggregation
    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_nhsl_physical_exposure_indicators_hexgrid_50km_uc_{}".
        format(version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM \
            results_nhsl_physical_exposure.nhsl_physical_exposure_indicators_hexgrid_50km_uc \
            ORDER BY nhsl_physical_exposure_indicators_hexgrid_50km_uc."gridid_50" \
            LIMIT {limit} \
            OFFSET {offset}',
    )
    table.postgis2es()

    # hexgrid 100km unclipped aggregation
    table = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_nhsl_physical_exposure_indicators_hexgrid_100km_uc_{}".
        format(version),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM \
            results_nhsl_physical_exposure.nhsl_physical_exposure_indicators_hexgrid_100km_uc \
            ORDER BY nhsl_physical_exposure_indicators_hexgrid_100km_uc."gridid_100" \
            LIMIT {limit} \
            OFFSET {offset}',
    )
    table.postgis2es()

    # hexgrid global fabric
    # table = utils.PostGISdataset(
    #     utils.PostGISConnection(),
    #     utils.ESConnection(settings={
    #         'settings': {
    #             'number_of_shards': 1,
    #             'number_of_replicas': 0
    #         },
    #         'mappings': {
    #             'properties': {
    #                 'geometry': {
    #                     'type': 'geo_shape'
    #                 }
    #             }
    #         }
    #     }),
    #     view="opendrr_nhsl_physical_exposure_indicators_hexgrid_global_fabric",
    #     sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
    #         FROM \
    #         results_nhsl_physical_exposure.nhsl_physical_exposure_indicators_hexgrid_global_fabric \
    #         ORDER BY nhsl_physical_exposure_indicators_hexgrid_global_fabric."gridid" \
    #         LIMIT {limit} \
    #         OFFSET {offset}'
    # )
    # table.postgis2es()

    return
예제 #5
0
def main():
    args = parse_args()

    config = utils.get_config_params("config.ini")
    version = config.get("es", "version")

    if args.aggregation.lower() == "sauid":
        aggregation = args.aggregation[0].lower()
    else:
        aggregation = args.aggregation

    # index settings
    if args.geometry == "geom_poly" or "geom":
        table = utils.PostGISdataset(
            utils.PostGISConnection(),
            utils.ESConnection(
                settings={
                    "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                    "mappings": {"properties": {"geometry": {"type": "geo_shape"}}},
                }
            ),
            view="opendrr_nhsl_social_fabric_indicators_{agg}_{version}".format(
                **{"agg": aggregation, "version": version}
            ),
            sqlquerystring='SELECT *, ST_AsGeoJSON({geom}) \
                FROM \
                results_nhsl_social_fabric.nhsl_social_fabric_indicators_{agg} \
                ORDER BY "{sort_field}" \
                LIMIT {{limit}} \
                OFFSET {{offset}}'.format(
                **{
                    "geom": args.geometry,
                    "agg": aggregation,
                    "sort_field": args.sortfield,
                }
            ),
        )

    elif args.geometry == "geom_point":
        table = utils.PostGISdataset(
            utils.PostGISConnection(),
            utils.ESConnection(
                settings={
                    "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                    "mappings": {
                        "properties": {
                            "coordinates": {"type": "geo_point"},
                            "geometry": {"type": "geo_shape"},
                        }
                    },
                }
            ),
            view="opendrr_nhsl_social_fabric_indicators_{agg}_{version}".format(
                **{"agg": args.aggregation[0].lower(), "version": version}
            ),
            sqlquerystring='SELECT *, ST_AsGeoJSON(geom_point) \
                FROM \
                results_nhsl_social_fabric.nhsl_social_fabric_indicators_{agg} \
                ORDER BY "{sort_field}" \
                LIMIT {{limit}} \
                OFFSET {{offset}}'.format(
                **{"agg": aggregation, "sort_field": args.sortfield}
            ),
        )

    table.postgis2es()

    return
            raise Exception

    elif cfg.get_service_date == "on_demand":
        is_changes_applied = utils.validate_graph_changes_applied(coverage_name)
        if not is_changes_applied:
            raise Exception

    # If it's up - delete the old gtfs and osm files - only from AWS machines
    if is_changes_applied and utils.is_aws_machine():
        utils.delete_file_from_host(Path(os.getcwd()).parent / osm_file_path / osm_file_name)
        utils.delete_file_from_host(Path(os.getcwd()).parent / gtfs_file_path / gtfs_file_name)


# config variables to be moved to config-file downstrem
default_coverage_name, coverage_name, navitia_docker_compose_file_path, navitia_docker_compose_file_name, \
navitia_docker_compose_default_file_name, gtfs_file_path, gtfs_zip_file_name = utils.get_config_params()

try:

    # Get the docker service client
    docker_client = utils.get_docker_service_client()

    containers = docker_client.containers.list(filters={"name": "worker"})
    if len(containers) == 0:
        _log.error("Navitia docker containers are down, bringing them up with default coverage for processing")
        utils.start_navitia_with_single_coverage(navitia_docker_compose_file_path,
                                                 navitia_docker_compose_default_file_name,
                                                 default_coverage_name)
        containers = docker_client.containers.list(filters={"name": "worker"})
        
    # Get the worker container
예제 #7
0
def main():
    args = parse_args()

    config = utils.get_config_params("config.ini")
    version = config.get("es", "version")

    # Create shakemap object and load to ES
    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                "mappings": {
                    "properties": {
                        "coordinates": {"type": "geo_point"},
                        "geometry": {"type": "geo_shape"},
                    }
                },
            }
        ),
        view="opendrr_dsra_{eqScenario}_shakemap_{version}".format(
            **{"eqScenario": args.eqScenario, "version": version}
        ).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_shakemap \
            ORDER BY dsra_{eqScenario}_shakemap."SiteID" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(
            **{"eqScenario": args.eqScenario}
        ),
    )
    dsraTable.postgis2es()

    # Create load 1km shakemap hexgrid
    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                "mappings": {"properties": {"geometry": {"type": "geo_shape"}}},
            }
        ),
        view="opendrr_dsra_{eqScenario}_shakemap_hexgrid_1km_{version}".format(
            **{"eqScenario": args.eqScenario, "version": version}
        ).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_sm_hg_1 \
            ORDER BY dsra_{eqScenario}_sm_hg_1."gridid_1" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(
            **{"eqScenario": args.eqScenario}
        ),
    )
    dsraTable.postgis2es()

    # Create load 1km shakemap hexgrid unclipped
    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                "mappings": {"properties": {"geometry": {"type": "geo_shape"}}},
            }
        ),
        view="opendrr_dsra_{eqScenario}_shakemap_hexgrid_1km_uc_{version}".format(
            **{"eqScenario": args.eqScenario, "version": version}
        ).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_sm_hg_1_uc \
            ORDER BY dsra_{eqScenario}_sm_hg_1_uc."gridid_1" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(
            **{"eqScenario": args.eqScenario}
        ),
    )
    dsraTable.postgis2es()

    # Create load 5km shakemap hexgrid
    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                "mappings": {"properties": {"geometry": {"type": "geo_shape"}}},
            }
        ),
        view="opendrr_dsra_{eqScenario}_shakemap_hexgrid_5km_{version}".format(
            **{"eqScenario": args.eqScenario, "version": version}
        ).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_sm_hg_5 \
            ORDER BY dsra_{eqScenario}_sm_hg_5."gridid_5" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(
            **{"eqScenario": args.eqScenario}
        ),
    )
    dsraTable.postgis2es()

    # Create load 5km shakemap hexgrid unclipped
    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                "mappings": {"properties": {"geometry": {"type": "geo_shape"}}},
            }
        ),
        view="opendrr_dsra_{eqScenario}_shakemap_hexgrid_5km_uc_{version}".format(
            **{"eqScenario": args.eqScenario, "version": version}
        ).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_sm_hg_5_uc \
            ORDER BY dsra_{eqScenario}_sm_hg_5_uc."gridid_5" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(
            **{"eqScenario": args.eqScenario}
        ),
    )
    dsraTable.postgis2es()

    # Create load 10km shakemap hexgrid
    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                "mappings": {"properties": {"geometry": {"type": "geo_shape"}}},
            }
        ),
        view="opendrr_dsra_{eqScenario}_shakemap_hexgrid_10km_{version}".format(
            **{"eqScenario": args.eqScenario, "version": version}
        ).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_sm_hg_10 \
            ORDER BY dsra_{eqScenario}_sm_hg_10."gridid_10" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(
            **{"eqScenario": args.eqScenario}
        ),
    )
    dsraTable.postgis2es()

    # Create load 10km shakemap hexgrid unclipped
    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                "mappings": {"properties": {"geometry": {"type": "geo_shape"}}},
            }
        ),
        view="opendrr_dsra_{eqScenario}_shakemap_hexgrid_10km_uc_{version}".format(
            **{"eqScenario": args.eqScenario, "version": version}
        ).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_sm_hg_10_uc \
            ORDER BY dsra_{eqScenario}_sm_hg_10_uc."gridid_10" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(
            **{"eqScenario": args.eqScenario}
        ),
    )
    dsraTable.postgis2es()

    # Create load 25km shakemap hexgrid
    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                "mappings": {"properties": {"geometry": {"type": "geo_shape"}}},
            }
        ),
        view="opendrr_dsra_{eqScenario}_shakemap_hexgrid_25km_{version}".format(
            **{"eqScenario": args.eqScenario, "version": version}
        ).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_sm_hg_25 \
            ORDER BY dsra_{eqScenario}_sm_hg_25."gridid_25" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(
            **{"eqScenario": args.eqScenario}
        ),
    )
    dsraTable.postgis2es()

    # Create load 25km shakemap hexgrid unclipped
    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                "mappings": {"properties": {"geometry": {"type": "geo_shape"}}},
            }
        ),
        view="opendrr_dsra_{eqScenario}_shakemap_hexgrid_25km_uc_{version}".format(
            **{"eqScenario": args.eqScenario, "version": version}
        ).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_sm_hg_25_uc \
            ORDER BY dsra_{eqScenario}_sm_hg_25_uc."gridid_25" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(
            **{"eqScenario": args.eqScenario}
        ),
    )
    dsraTable.postgis2es()

    # Create load 50km shakemap hexgrid unclipped
    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                "mappings": {"properties": {"geometry": {"type": "geo_shape"}}},
            }
        ),
        view="opendrr_dsra_{eqScenario}_shakemap_hexgrid_50km_uc_{version}".format(
            **{"eqScenario": args.eqScenario, "version": version}
        ).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_sm_hg_50_uc \
            ORDER BY dsra_{eqScenario}_sm_hg_50_uc."gridid_50" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(
            **{"eqScenario": args.eqScenario}
        ),
    )
    dsraTable.postgis2es()

    # Create load 100km shakemap hexgrid unclipped
    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                "mappings": {"properties": {"geometry": {"type": "geo_shape"}}},
            }
        ),
        view="opendrr_dsra_{eqScenario}_shakemap_hexgrid_100km_uc_{version}".format(
            **{"eqScenario": args.eqScenario, "version": version}
        ).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_sm_hg_100_uc \
            ORDER BY dsra_{eqScenario}_sm_hg_100_uc."gridid_100" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(
            **{"eqScenario": args.eqScenario}
        ),
    )
    dsraTable.postgis2es()

    return
예제 #8
0
#!/usr/bin/python
import time
import praw
import traceback
import re
import logger
import utils
import lookup
import profiler
import formatter

config = utils.get_config_params('reddit_bot')

# get the login info
with open(config['login_file']) as f:
    for l in f:
        setting = ''.join(l.split()).split(':')
        config[setting[0]] = setting[1]

# posts we've searched
#      key: hour the post was posted (0 for 12:00 AM - 12:59, etc)
#      value: list of post_id
#   this structure lets us easily remove lists of posts that are too old
_searched = {}

# submissions we've replied to
#      key: submission_id
#      value: list of dictionaries of hero info we have profiled
#   ensures we don't post the same profile more than once per thread
_replied_to = {}
예제 #9
0
import sqlite3
import utils
import logger

config = utils.get_config_params('formatter')

def format_gear(gear):
    """ given a dictionary of gear { item_slot: { item_info } }, return
        a formatted string to send to a reddit post """

    items = {}
    intro = '\n\n###### \n\n****\n**Equipped Gear:**\n\n'

    try:
        # open a db connection
        db = sqlite3.connect(config['database'])
        db.row_factory = sqlite3.Row
        db_cur = db.cursor()
        table = config['item_table']
        query = 'SELECT * FROM {t} WHERE name=\'{s}\''

        # get the max order here (useful to remove secondary effects, etc)
        max_order = int(config['max_order'])

        for slot in gear:
            name = _create_url(gear[slot]['name'], gear[slot]['url'])
            i_type = gear[slot]['type']
            item_info = u'> **{n} ({t})**'.format(n=name, t=i_type)
            # because blizzard randomly uses a non-standard apostrophe sometimes
            item_info = item_info.replace(u'\u2019', '\'')
예제 #10
0
import time
import sqlite3
import lookup
import utils
import logger

config = utils.get_config_params('profiler')

def get_gear(hero_data, region='us'):
    """ given a dictionary of hero data, get the gear and return it in a
        dictionary of { item_slot: { item_info } } use region to get proper
        urls """

    gear = {}

    try:
        # open a db connection
        db = sqlite3.connect(config['database'])
        db_cur = db.cursor()
        table = config['item_table']

        for slot, item in hero_data['items'].iteritems():
            item_url = item['tooltipParams']
            item_data = lookup.item_lookup(item_url, region=region)

            if not item_data:
                logger.error('Unable to load item {i}'.format(i=item_url))
                utils.close_db(db)
                return None

            gear[slot] = {
예제 #11
0
import time
import traceback
import utils

config = utils.get_config_params('logging')

def get_file_path():
    """ Returns the file path for the log file """

    file_path = (config['log_location'], config['log_file'])
    return '{path}/{file}'.format(path=file_path[0], file=file_path[1])

def debug(entry):
    """ Log entry if log_level is set to debug or lower """

    if _get_log_level() <= 0:
        _log('DEBUG - {entry}'.format(entry=entry))

def info(entry):
    """ Log entry if log_level is set to info or lower """
    
    if _get_log_level() <= 1:
        _log('INFO - {entry}'.format(entry=entry))

def warn(entry):
    """ Log entry if log_level is set to warn or lower """
    
    if _get_log_level() <= 2:
        _log('WARN - {entry}'.format(entry=entry))

def error(entry):
def main(args):
	""" Gets all eval.txt files for models in a given directory and compiles them into a single document (as well as single csv).
	Also prints information on the best-performing model. """
	eval_writer = open(os.path.join(args.models_dir, 'cumulative_evals-'+args.model_iteration+'.txt'), 'w')
	eval_csv_writer = open(os.path.join(args.models_dir, 'cumulative_evals-'+args.model_iteration+'.csv'), 'w')

	csv_header_written = False
	csv_header = 'saved model path, best epoch, validation loss, validation perplexity, '

	sys.stdout = Logger(os.path.join(args.models_dir, 'cumulative_evals-'+args.model_iteration+'.log'))

	best_validation_loss = None
	best_model_path = None
	best_config = None
	best_eval = None

	validation_losses = []
	perplexities = []
	epochs = []

	all_params = {}

	modified_eval_types = ['simple_terms', 'colors', 'spatial_relations', 'dialogue', 'synonym']
	agreement_types = ['next_placements', 'all_placements', 'all_removals']

	eval_statistics = {}
	eval_terms = [x.strip() for x in args.eval_terms.split(',')]

	for config_file in glob(args.models_dir+'/**/config.txt', recursive=True):
		_, config_params = get_config_params(config_file)
		for param in config_params:
			value = config_params[param]
			if config_params[param] is None:
				value = "None"
			all_params[param] = type(value)

	# iterate over model directories that have successfully been trained and evaluated
	for config_file in glob(args.models_dir+'/**/config.txt', recursive=True):
		print('Accumulating evals for model:', '/'.join(config_file.split('/')[:-1]))
		if not os.path.exists(config_file.replace('config.txt','eval-'+args.model_iteration+'.txt')):
			continue

		model_path = ('/'.join(os.path.abspath(config_file).split("/")[:-1]))
		content = '='*89+'\nsaved model path: '+model_path+'\n'
		csv = model_path+','

		# get the configuration of parameters used for this particular model
		config_content, config_params = get_config_params(config_file)

		with open(config_file.replace('config.txt','eval-'+args.model_iteration+'.txt'), 'r') as f:
			eval_content = f.read()

		content += '\n'+config_content+'\n'+eval_content
		eval_writer.write(content+'\n')

		# parse the model's eval.txt file
		eval_values = {}
		for line in eval_content.split('\n'):
			if line.startswith('Best model found at') or line.startswith('Final model found at'):
				epoch = int(line.split()[-1].replace('.',''))
				eval_values["epoch"] = epoch
				epochs.append(epoch)

			elif line.startswith('Loss:'):
				validation_loss = float(line.split()[-1])
				eval_values["validation_loss"] = validation_loss

				if not best_validation_loss or validation_loss <= best_validation_loss:
					best_validation_loss = validation_loss
					best_model_path = model_path
					best_config = config_params
					best_eval = eval_values

				validation_losses.append(validation_loss)

			elif line.startswith('Perplexity:'):
				perplexity = float(line.split()[-1])
				eval_values["validation_perplexity"] = perplexity
				perplexities.append(perplexity)

		if not eval_values.get('validation_perplexity'):
			eval_values['validation_perplexity'] = -1
			perplexities.append(-1)

		csv += str(eval_values["epoch"])+',' + str(eval_values['validation_loss'])+',' + str(eval_values['validation_perplexity']*args.perplexity_factor)+','

		for split in ['val', 'test']:
			args_sfx = '-'+args.model_iteration+'-'+split+('' if not args.development_mode else '-development_mode')
			args_sfx += '-multinomial' if args.decoding_strategy == 'multinomial' else '-beam_'+str(args.beam_size)+('-gamma_'+str(args.gamma) if args.gamma else '')

			if not csv_header_written:
				csv_header += 'mean utterance length ('+split+'), std dev ('+split+'), bleu-1 ('+split+'), bleu-2 ('+split+'), bleu-3 ('+split+'), bleu-4 ('+split+'), '
				for eval_type in modified_eval_types:
					csv_header += eval_type+'-bleu ('+split+'), '
				for agreement_type in agreement_types:
					csv_header += agreement_type+' agreement ('+split+'), '

			if os.path.exists(config_file.replace('config.txt', 'mul-std-bleu'+args_sfx+'.csv')):
				with open(config_file.replace('config.txt', 'mul-std-bleu'+args_sfx+'.csv')) as f:
					mul_std_bleu = f.readline().strip()
					csv += mul_std_bleu+','
					mul_std_bleu = mul_std_bleu.split(',')

					if 'mean utterance length ('+split+')' in eval_terms:
						eval_values['mean utterance length ('+split+')'] = float(mul_std_bleu[0])

					if 'std dev ('+split+')' in eval_terms:
						eval_values['std dev ('+split+')'] = float(mul_std_bleu[1])

					for i in range(4):
						if 'bleu-'+str(i+1)+' ('+split+')' in eval_terms:
							eval_values['bleu-'+str(i+1)+' ('+split+')'] = float(mul_std_bleu[i+2])

				args_sfx = '-multinomial' if args.decoding_strategy == 'multinomial' else '-beam_'+str(args.beam_size)+('-gamma_'+str(args.gamma) if args.gamma else '')
				generated_sentences_file = config_file.replace('config.txt', 'generated_sentences-best-'+split+args_sfx+'.txt')

				parser = argparse.ArgumentParser()
				parser.add_argument('generated_sentences_file', help='file of sentences generated by a model')
				parser.add_argument('--simple_terms_file', default='../data/lexicons/simple-terms-redux.txt')
				parser.add_argument('--colors_file', default='../data/lexicons/colors.txt')
				parser.add_argument('--spatial_relations_file', default='../data/lexicons/spatial-relations.txt')
				parser.add_argument('--dialogue_file', default='../data/lexicons/dialogue.txt')
				parser.add_argument('--shapes_file', default='../data/lexicons/shapes.txt')
				parser.add_argument('--synonyms_file', default='../data/lexicons/synonym_substitutions.json')
				parser.add_argument('--with_simple_synonyms', default=True, action='store_true')
				parser.add_argument('--with_utterance_synonyms', default=True, action='store_true')
				parser.add_argument('--num_synonym_references', default=4)
				parser.add_argument('--suppress_printing', default=True)
				parser.add_argument('--output_file', default=None)

				modified_bleu_args = parser.parse_args([generated_sentences_file])
				modified_bleu_scores, agreements = evaluation_analysis.main(modified_bleu_args)

				for eval_type in modified_eval_types:
					header_str = eval_type+'-bleu ('+split+')'
					if header_str in eval_terms:
						eval_values[header_str] = modified_bleu_scores[eval_type]
					csv += str(modified_bleu_scores[eval_type])+','

				for agreement_type in agreement_types:
					header_str = agreement_type+' agreement ('+split+')'
					if header_str in eval_terms:
						eval_values[header_str] = agreements[agreement_type]
					csv += str(agreements[agreement_type])+','

			else:
				for i in range(9+len(modified_eval_types)):
					csv += '-1,'

		for param in all_params:
			if config_params.get(param) is None:
				default_value = '-1'
				if all_params[param] == bool:
					default_value = 'False'
				elif all_params[param] == str:
					default_value = 'None'

				config_params[param] = default_value

		pair_values = {'num_encoder_hidden_layers': None, 'num_decoder_hidden_layers': None, 'dropout_rnn': None, 'dropout_nae': None, 'dropout_counter': None}

		for param, value in sorted(config_params.items()):
			if param == 'load_dataset' or param == 'ignore_diff' or param == 'data_dir' or param == 'gold_configs_dir' or param == 'vocab_dir' or param == 'date_dir' or param == 'teacher_forcing_ratio' or param == 'seed' or param == 'num_workers' or param == 'strict' or param == 'suppress_logs' or param == 'visualize':
				continue

			if args.model_type == 'cnn_3d':
				if 'pretrained' in param or param == 'add_builder_utterances' or param == 'augment_dataset' or param == 'augmentation_factor' or param == 'pretrained_and_augmented' or param == 'exactly_k' or param == 'strict' or param == 'ignore_diff' or 'world_state' in param or 'block_embedding' in param:
					continue

			if isinstance(value, str) and ',' in value:
				value = '"'+value+'"'

			csv += str(value)+','
			if not csv_header_written:
				csv_header += param+','

			if param in pair_values and value is not None:
				pair_values[param] = str(value)
			elif param != 'hyperparameter_file':
				append_eval_values(eval_statistics, param, value, eval_values)

		append_eval_values(eval_statistics, 'encoder_decoder_layers', pair_values['num_encoder_hidden_layers']+','+pair_values['num_decoder_hidden_layers'], eval_values)

		if pair_values['dropout_rnn'] is not None and pair_values['dropout_rnn'] != 'None':
			header = 'dropout_rnn_counter'
			pair_value = pair_values['dropout_counter']

			if 'next_actions' in args.model_type:
				header = 'dropout_rnn_nae'
				pair_value = pair_values['dropout_nae']

			append_eval_values(eval_statistics, header, pair_values['dropout_rnn']+','+pair_value, eval_values)

		if not csv_header_written:
			eval_csv_writer.write(csv_header[:-1]+'\n')
			csv_header_written = True

		eval_csv_writer.write(csv[:-1]+'\n')

	if not best_model_path:
		print("Error: no best model was found -- check that the model directories include an eval.txt file.")
		sys.exit(0)

	eval_writer.write('='*89)
	eval_writer.close()
	eval_csv_writer.close()

	# print details of best overall model found
	print("Best model found:", model_path)
	for param in best_config:
		print(param.ljust(25), best_config[param])
	for value in best_eval:
		print(value.ljust(25), best_eval[value])

	# print statistics of min/max/std over all models
	print('\nEpochs at which best models were found:')
	print('\tmin:', np.min(epochs))
	print('\tmax:', np.max(epochs))
	print('\tstd:', np.std(epochs))

	print('\nValidation losses:')
	print('\tmin:', np.min(validation_losses))
	print('\tmax:', np.max(validation_losses))
	print('\tstd:', np.std(validation_losses))

	print('\nPerplexities:')
	print('\tmin:', np.min(perplexities))
	print('\tmax:', np.max(perplexities))
	print('\tstd:', np.std(perplexities))

	print("\nWrote cumulative evaluation log to", os.path.join(args.models_dir, 'cumulative_evals-'+args.model_iteration+'.txt'))
	print("Wrote cumulative evaluation csv to", os.path.join(args.models_dir, 'cumulative_evals-'+args.model_iteration+'.csv'), '\n')

	for param in list(eval_statistics.keys()):
		for value in list(eval_statistics[param].keys()):
			if len(eval_statistics[param][value]) < len(eval_terms):
				eval_statistics[param].pop(value)

		if len(eval_statistics[param]) < 2:
			eval_statistics.pop(param)

	for param in eval_statistics:
		print('Parameter:', param)
		for value in eval_statistics[param]:
			print('\tValue:', value)
			for eval_term in eval_terms:
				eval_values = eval_statistics[param][value][eval_term]
				if len(eval_values) < 1:
					continue

				max_value = "{0:.3f}".format(max(eval_values))
				mean = "{0:.3f}".format(np.mean(eval_values))
				std = "{0:.3f}".format(np.std(eval_values))
				print('\t\t', eval_term.ljust(30), 'max: '+max_value+'  mean: '+mean+'  std: '+std)

		print()

	sys.stdout = sys.__stdout__
예제 #13
0
import os
from uuid import uuid4
from utils import get_config_params, _run, clean_environ, init_logger
import logging

logger = logging.getLogger(__name__)
init_logger(logger)

VYOS_SHELL_API = get_config_params('bin', 'shell_api_path')
VYOS_SBIN_DIR = get_config_params('bin', 'vyos_sbin_dir')
VYOS_SAVE_SCRIPT = 'vyatta-save-config.pl'

# Create/Get the logger object
#logger = init_logger()

class SessionAlreadyExists(Exception): pass
class SetupSessionFailed(Exception): pass
class OperationFailed(Exception): pass
class SessionNotExists(Exception): pass

class Session(object):
    """
    Return the session instance if exists. Else, create new one.
    SessionAlreadyExists exception raised on the second instantiation.
    """
    _ref = None
    def __new__(cls, *args, **kw):
        if cls._ref is not None:
            raise SessionAlreadyExists('A session exist already !')
        cls._ref = super(Session, cls).__new__(cls, *args, **kw)
        return cls._ref
예제 #14
0
#!/usr/bin/python3
# =================================================================
# SPDX-License-Identifier: MIT
#
# Copyright (C) 2020-2021 Government of Canada
#
# Main Authors: Drew Rotheram <*****@*****.**>
#               Joost van Ulden <*****@*****.**>
# =================================================================

import argparse

import utils

config = utils.get_config_params("config.ini")
version = config.get("es", "version")


def main():
    # args = parse_args()
    table = utils.PostGISTable(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "coordinates": {
예제 #15
0
import os
from uuid import uuid4
from utils import get_config_params, _run, clean_environ, init_logger
import logging

logger = logging.getLogger(__name__)
init_logger(logger)

VYOS_SHELL_API = get_config_params('bin', 'shell_api_path')
VYOS_SBIN_DIR = get_config_params('bin', 'vyos_sbin_dir')
VYOS_SAVE_SCRIPT = 'vyatta-save-config.pl'

# Create/Get the logger object
#logger = init_logger()


class SessionAlreadyExists(Exception):
    pass


class SetupSessionFailed(Exception):
    pass


class OperationFailed(Exception):
    pass


class SessionNotExists(Exception):
    pass
예제 #16
0
def main():
    args = parse_args()

    config = utils.get_config_params("config.ini")
    version = config.get("es", "version")

    # Create building level aggregation object and load to ES
    dsraTable = utils.PostGISPointDataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "coordinates": {
                            "type": "geo_point"
                        },
                        "geometry": {
                            "type": "geo_shape"
                        },
                    }
                },
            }),
        view="opendrr_dsra_{eqScenario}_indicators_b_{version}".format(
            **{
                "eqScenario": args.eqScenario,
                "version": version
            }).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom_point) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_indicators_b \
            ORDER BY dsra_{eqScenario}_indicators_b."AssetID" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(**{"eqScenario": args.eqScenario}),
    )
    dsraTable.postgis2es()

    # Create Sauid level aggregation object and load to ES
    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_dsra_{eqScenario}_indicators_s_{version}".format(
            **{
                "eqScenario": args.eqScenario,
                "version": version
            }).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom_poly) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_indicators_s \
            ORDER BY dsra_{eqScenario}_indicators_s."Sauid" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(**{"eqScenario": args.eqScenario}),
    )
    dsraTable.postgis2es()

    # Create CSD level aggregation object and load to ES
    dsraTable = utils.PostGISdataset(
        utils.PostGISConnection(),
        utils.ESConnection(
            settings={
                "settings": {
                    "number_of_shards": 1,
                    "number_of_replicas": 0
                },
                "mappings": {
                    "properties": {
                        "geometry": {
                            "type": "geo_shape"
                        }
                    }
                },
            }),
        view="opendrr_dsra_{eqScenario}_indicators_csd_{version}".format(
            **{
                "eqScenario": args.eqScenario,
                "version": version
            }).lower(),
        sqlquerystring='SELECT *, ST_AsGeoJSON(geom) \
            FROM results_dsra_{eqScenario}.dsra_{eqScenario}_indicators_csd \
            ORDER BY dsra_{eqScenario}_indicators_csd."csduid" \
            LIMIT {{limit}} \
            OFFSET {{offset}}'.format(**{"eqScenario": args.eqScenario}),
    )
    dsraTable.postgis2es()

    return