示例#1
0
    def getDevices(self):
        """
        Calls Knack to retrieve Unit Data.
        """
        device_filters = {'match': 'and',
                          'rules': [{'field': 'field_2384',
                                     'operator': 'is',
                                     'value': 1}]}

        knackApp = knackpy.App(app_id=self.appID,
                               api_key=self.apiKey)
        device_locs = knackApp.get('object_98',
                                   filters=device_filters,
                                   generate=True)
        device_locs = [loc.format() for loc in device_locs]

        devices_data = pd.DataFrame(device_locs)
        devices_data['SENSOR_TYPE'] = 'GRIDSMART'
        devices_data = (pd.merge(devices_data, self._getLocations(),
                                 on='SIGNAL_ID', how='left')
                        .drop(labels='SIGNAL_ID', axis='columns')
                        .rename(columns=GS_RENAME))
        devices_data = devices_data[['device_type', 'atd_device_id',
                                     'device_ip', 'device_status',
                                     'ip_comm_status', 'atd_location_id',
                                     'coa_intersection_id',
                                     'lat', 'lon', 'primary_st',
                                     'primary_st_segment_id',
                                     'cross_st', 'cross_st_segment_id']]

        return devices_data
def main():
    logging.info("Starting...")
    KNACK_API_KEY = os.environ["KNACK_API_KEY"]
    KNACK_APP_ID = os.environ["KNACK_APP_ID"]
    GITHUB_ACCESS_TOKEN = os.environ["GITHUB_ACCESS_TOKEN"]
    REPO = "cityofaustin/atd-data-tech"
    KNACK_OBJ = "object_30"
    KNACK_TITLE_FIELD = "field_538"
    KNACK_ISSUE_NUMBER_FIELD = "field_492"

    app = knackpy.App(app_id=KNACK_APP_ID, api_key=KNACK_API_KEY)
    project_records = app.get(KNACK_OBJ)

    g = Github(GITHUB_ACCESS_TOKEN)
    repo = g.get_repo(REPO)

    project_issues_paginator = repo.get_issues(state="all", labels=["Project Index"])
    project_issues = [issue for issue in project_issues_paginator]

    knack_payload = build_payload(
        project_records, project_issues, KNACK_TITLE_FIELD, KNACK_ISSUE_NUMBER_FIELD
    )

    logging.info(f"Creating/updating {len(knack_payload)} issues")

    for record in knack_payload:
        method = "update" if record.get("id") else "create"
        app.record(data=record, method=method, obj=KNACK_OBJ)

    logging.info(f"{len(knack_payload)} records processed.")
def main(env):
    if (env) != "local":
        proceed = None
        proceed = input(
            f"About to fetch projects in Moped {env} environment. Type 'yes' to proceed or any key to quit > "
        )
        if proceed != "yes":
            sys.exit()

    logger.info("Initializing Knack app...")
    app = knackpy.App(app_id=KNACK_AUTH["app_id"],
                      api_key=KNACK_AUTH["api_key"])

    logger.info("Fetching Moped projects with Knack ID")
    moped_projects_with_knack_ids = make_hasura_request(
        query=MOPED_KNACK_PROJECTS_QUERY,
        variables=None,
        key="moped_project",
        env=env)

    logger.info(
        f"{len(moped_projects_with_knack_ids)} projects to update in Knack...")
    for i, project in enumerate(moped_projects_with_knack_ids):
        logger.info(
            f"Updating {i + 1} of {len(moped_projects_with_knack_ids)}")
        record = {
            "id": project["knack_project_id"],
            KNACK_MOPED_PROJECT_ID_FIELD: project["project_id"],
        }
        app.record(obj=KNACK_PROJECTS_OBJECT, data=record, method="update")
        logger.info(
            f"Updated Knack record {record['id']} with Moped project ID {record[KNACK_MOPED_PROJECT_ID_FIELD]}"
        )
    logger.info("Done :)")
示例#4
0
def app():
    with open("tests/_metadata.json", "r") as fin:
        metadata = json.loads(fin.read())

    with open("tests/_all_fields.json", "r") as fin:
        data = json.loads(fin.read())
        data = data["records"]

    app = knackpy.App(app_id=metadata["application"]["id"], metadata=metadata)
    app.data = {OBJ_KEY: data}
    return app
示例#5
0
def main():
    logger.info("Initializing Knack app...")
    app = knackpy.App(app_id=KNACK_AUTH["app_id"],
                      api_key=KNACK_AUTH["api_key"])

    logger.info("Saving file list...")
    records = app.get(KNACK_PROJECT_FILES_OBJECT)
    write_csv(records)

    logger.info("Downloading files. This may take a while...")
    app.download(
        container=KNACK_PROJECT_FILES_OBJECT,
        field=KNACK_ATTACHMENT_FIELD_ID,
        out_dir="_knack_files",
        label_keys=[KNACK_ATTACHMENT_MOPED_PROJ_ID_FIELD],
    )
def main():
    args = cli_args()
    record_type = args.name
    app_name = args.dest

    # get latest finance records from AWS S3
    logging.info(f"Downloading {record_type} records from S3...")

    records_current_unfiltered = download_json(bucket_name=BUCKET,
                                               fname=f"{record_type}.json")
    src_data_filter_func = (FIELD_MAPS[record_type].get("src_data_filter",
                                                        {}).get(app_name))

    records_current = (apply_src_data_filter(records_current_unfiltered,
                                             src_data_filter_func)
                       if src_data_filter_func else records_current_unfiltered)

    # fetch the same type of records from knack
    logging.info(f"Downloading {record_type} records from Knack...")

    app = knackpy.App(app_id=KNACK_APP_ID, api_key=KNACK_API_KEY)
    knack_obj = FIELD_MAPS[record_type]["knack_object"][app_name]
    records_knack = [dict(record) for record in app.get(knack_obj)]

    logging.info(f"Transforming records...")
    field_map = FIELD_MAPS[record_type]["field_map"]

    current_pk, knack_pk = get_pks(field_map, app_name)

    coalesce_fields = FIELD_MAPS[record_type].get("coalesce_fields")

    if coalesce_fields:
        records_current = coalesce_records(records_current, coalesce_fields,
                                           current_pk)

    # identify new/changed records and map to destination Knack app schema
    todos = handle_records(records_current, records_knack, knack_pk, field_map,
                           app_name)

    logging.info(f"{len(todos)} records to process.")

    for record in todos:
        method = "create" if not record.get("id") else "update"
        app.record(data=record, method=method, obj=knack_obj)
import psycopg2

# using this extras library to enable dictionary like behavior on db results
from psycopg2 import (
    extras, )

# get connected to a postgres database
pg = psycopg2.connect(
    host=os.getenv("MOPED_RR_HOSTNAME"),
    database=os.getenv("MOPED_RR_DATABASE"),
    user=os.getenv("MOPED_RR_USERNAME"),
    password=os.getenv("MOPED_RR_PASSWORD"),
)

# instantiate our knackpy client
knack = knackpy.App(app_id=os.getenv("KNACK_APP_ID"),
                    api_key=os.getenv("KNACK_API_KEY"))

# pull a copy of all the projects in the projects table
records = knack.get(os.getenv("KNACK_PROJECT_VIEW"))

# iterate over all knack projects
for knack_record in records:

    # only operate on knack projects which have an associated moped project
    if knack_record[os.getenv("KNACK_MOPED_ID_FIELD")]:
        # make a little whitespace to show iterations in STDOUT
        print()

        # extract the moped_project.project_id value
        project_id = knack_record[os.getenv("KNACK_MOPED_ID_FIELD")]
def main():
    APP_ID = os.getenv("KNACK_APP_ID")
    PGREST_JWT = os.getenv("PGREST_JWT")
    PGREST_ENDPOINT = os.getenv("PGREST_ENDPOINT")

    args = utils.args.cli_args(["app-name", "container", "date"])
    logger.info(args)

    container = args.container
    config = CONFIG.get(args.app_name).get(container)

    if not config:
        raise ValueError(
            f"No config entry found for app: {args.app_name}, container: {container}"
        )

    location_field_id = config.get("location_field_id")
    client_postgrest = utils.postgrest.Postgrest(PGREST_ENDPOINT,
                                                 token=PGREST_JWT)
    metadata_knack = utils.postgrest.get_metadata(client_postgrest, APP_ID)
    app = knackpy.App(app_id=APP_ID, metadata=metadata_knack)
    filter_iso_date_str = format_filter_date(args.date)

    logger.info(
        f"Downloading records from app {APP_ID}, container {container}.")

    data = client_postgrest.select(
        "knack",
        params={
            "select": "record",
            "app_id": f"eq.{APP_ID}",
            "container_id": f"eq.{container}",
            "updated_at": f"gte.{filter_iso_date_str}",
        },
        order_by="record_id",
    )

    logger.info(f"{len(data)} records to process")

    if not data:
        return

    client_socrata = utils.socrata.get_client()
    resource_id = config["socrata_resource_id"]
    metadata_socrata = client_socrata.get_metadata(resource_id)

    if location_field_id:
        patch_formatters(app.field_defs, location_field_id, metadata_socrata)

    # side-load knack data so we can utilize knackpy Record class for formatting
    app.data[container] = [r["record"] for r in data]

    records = app.get(container)

    # apply transforms to meet socrata's expectations
    payload = [record.format() for record in records]
    # format knack field names as lowercase/no spaces
    payload = [utils.shared.format_keys(record) for record in payload]
    # remove unknown fields first to reduce extra processing when doing subsequent transforms
    remove_unknown_fields(payload, metadata_socrata)
    bools_to_strings(payload)
    handle_arrays(payload)
    floating_timestamp_fields = utils.socrata.get_floating_timestamp_fields(
        resource_id, metadata_socrata)
    handle_floating_timestamps(payload, floating_timestamp_fields)

    timestamp_key = config.get("append_timestamps_socrata", {}).get("key")

    if timestamp_key:
        utils.socrata.append_current_timestamp(payload, timestamp_key)

    method = "replace" if not args.date else "upsert"

    if config.get("no_replace_socrata") and method == "replace":
        raise ValueError("""
            Replacement of this Socrata dataset is not allowed. Specify a date range or
            modify the 'no_replace_socrata' setting in this container's config.
            """)

    utils.socrata.publish(method=method,
                          resource_id=resource_id,
                          payload=payload,
                          client=client_socrata)
    logger.info(f"{len(payload)} records processed.")
def main():
    KNACK_APP_NAME = os.getenv("KNACK_APP_NAME")
    KNACK_APP_ID = os.getenv("KNACK_APP_ID")
    KNACK_API_KEY = os.getenv("KNACK_API_KEY")

    result = {}

    records_hr_banner = get_employee_data()
    employee_emails = get_emails_data()
    records_hr_emails = update_emails(records_hr_banner, employee_emails)
    records_mapped = map_records(records_hr_emails, FIELD_MAP, KNACK_APP_NAME)

    # use knackpy to get records from knack hr object
    knack_obj = ACCOUNTS_OBJS[KNACK_APP_NAME]
    app = knackpy.App(app_id=KNACK_APP_ID, api_key=KNACK_API_KEY)
    records_knack = app.get(knack_obj)

    pk_field = get_primary_key_field(FIELD_MAP, KNACK_APP_NAME)
    status_field = USER_STATUS_FIELD[KNACK_APP_NAME]
    password_field = PASSWORD_FIELD[KNACK_APP_NAME]
    email_field = EMAIL_FIELD[KNACK_APP_NAME]
    created_date_field = CREATED_DATE_FIELD[KNACK_APP_NAME]
    class_field = CLASS_FIELD[KNACK_APP_NAME]
    separated_field = SEPARATED_FIELD[KNACK_APP_NAME]
    name_field = NAME_FIELD[KNACK_APP_NAME]
    user_role_field = USER_ROLE_FIELD[KNACK_APP_NAME]
    payload = build_payload(
        records_knack,
        records_mapped,
        pk_field,
        status_field,
        password_field,
        created_date_field,
        class_field,
        separated_field,
        user_role_field,
        email_field,
        name_field,
        result,
    )
    cleaned_payload = remove_empty_emails(payload, email_field, name_field)

    logging.info(f"{len(cleaned_payload)} total records to process in Knack.")

    result["errors"] = []
    for record in cleaned_payload:
        method = "update" if record.get("id") else "create"
        try:
            app.record(data=record, method=method, obj=knack_obj)
        except requests.HTTPError as e:
            if e.response.status_code == 400:
                errors_list = e.response.json()["errors"]
                result["errors"].append(format_errors(errors_list, record))
                continue
            else:
                # if we get an error that is not 400, that error is raised, but we won't see previous errors
                raise e

    logging.info(f"Update complete. {len(result['errors'])} errors.")
    logging.info(result)
    return result
示例#10
0
        
    Returns:
        Set: A set of all internal IDs used by Knack to for the signals
    """
    signals = set()
    for feature in record['moped_proj_features']:
        signals.add(feature['location']['properties']['id'])
    return signals


# Get Moped's current state of synchronized projects
moped_data = run_query(get_all_synchronized_projects)
#logger.debug(moped_data)

# Use KnackPy to pull the current state of records in Data Tracker
app = knackpy.App(app_id=KNACK_DATA_TRACKER_APP_ID,
                  api_key=KNACK_DATA_TRACKER_API_KEY)

knack_query_filter = {
    "match": "and",
    "rules": [
        {
            "field": KNACK_OBJECT_PROJECT_ID,
            "operator": "is not blank"
        },
    ],
}

records = app.get("view_" + KNACK_DATA_TRACKER_VIEW,
                  filters=knack_query_filter,
                  generate=1)
knack_records = {}
示例#11
0
    1. Roll back any previous changes and try again (hoping that next run will go smooth).
    2. Re-attempt every failed run until knack responds with an HTTP 200-OK.
    
    The first option requires a lot more code that needs testing, and I am not confident it's
    the best way moving forward. The second option I think gives us less code to write
    and we have the freedom to re-attempt to insert a single record as many times as needed.
    
    This report is important so I think it needs to keep trying to insert as many times as it
    has to until either Knack has time to respond to the request or the DAG gives up.
"""

print("Inserting records into knack...")
for record in data:
    print("Processing: ", record)
    done = False
    while not done:
        try:
            app = knackpy.App(app_id=os.getenv("knack_app_id"),
                              api_key=os.getenv("knack_api_key"))
            response = app.record(method="create",
                                  data=record,
                                  obj=os.getenv("knack_object"))
            print("Response: ", response, "\n")
            done = True
        except requests.exceptions.HTTPError as e:
            print("Error: ", str(e), "\n")
            lapse = random.randrange(10, 15)
            print("Trying again in " + str(lapse) + " seconds")
            time.sleep(lapse)
            done = False
示例#12
0
def main():
    logging.info("Starting...")
    view = KNACK_APP["api_view"]["view"]
    app = knackpy.App(app_id=KNACK_APP_ID, api_key=KNACK_API_KEY)

    issues = app.get(view)

    if not issues:
        logging.info("No issues to process.")
        return 0

    prepared = []

    for issue in issues:
        # turn knack issues into github issues
        github_issue = map_issue(issue, FIELDS)
        github_issue = format_title(github_issue)
        # all issues are assigned to the service bot. on issue creation an email will
        # be sent to the transportation.data inbox, to be handled by the service desk
        github_issue["assignee"] = ["atdservicebot"]
        prepared.append(github_issue)

    g = Github(GITHUB_ACCESS_TOKEN)
    repo = get_repo(g, REPO)

    token = get_token(
        KNACK_DTS_PORTAL_SERVICE_BOT_USERNAME,
        KNACK_DTS_PORTAL_SERVICE_BOT_PASSWORD,
        KNACK_APP_ID,
    )

    responses = []

    for issue in prepared:
        result = repo.create_issue(
            title=issue["title"],
            labels=issue.get("labels"),
            assignees=issue.get("assignee"),
            body=issue["description"],
        )

        knack_payload = {
            "id": issue["knack_id"],
            "field_394": result.number,  # github issue number
            "field_395": issue["repo"],  # repo
            "field_392": "Sent",  # github transmission status
        }

        # update knack record as "Sent" using form API, which will
        # trigger an email notificaiton if warranted
        response = form_submit(
            token,
            KNACK_APP_ID,
            KNACK_APP["api_form"]["scene"],
            KNACK_APP["api_form"]["view"],
            knack_payload,
        )

        responses.append(response)

    logging.info(f"{len(responses)} issues processed.")
def main():
    args = utils.args.cli_args(["app-name", "container", "date"])
    logger.info(args)
    container = args.container
    config = CONFIG.get(args.app_name).get(container)

    if not config:
        raise ValueError(
            f"No config entry found for app: {args.app_name}, container: {container}"
        )

    location_field_id = config.get("location_field_id")
    service_id = config["service_id"]
    layer_id = config["layer_id"]
    item_type = config["item_type"]

    client_postgrest = utils.postgrest.Postgrest(PGREST_ENDPOINT, token=PGREST_JWT)
    metadata_knack = utils.postgrest.get_metadata(client_postgrest, APP_ID)
    app = knackpy.App(app_id=APP_ID, metadata=metadata_knack)

    logger.info(f"Downloading records from app {APP_ID}, container {container}.")

    filter_iso_date_str = format_filter_date(args.date)

    data = client_postgrest.select(
        "knack",
        params={
            "select": "record",
            "app_id": f"eq.{APP_ID}",
            "container_id": f"eq.{container}",
            "updated_at": f"gte.{filter_iso_date_str}",
        },
        order_by="record_id",
    )

    logger.info(f"{len(data)} to process.")

    if not data:
        return

    app.data[container] = [r["record"] for r in data]
    records = app.get(container)

    fields_names_to_sanitize = [
        f.name
        for f in app.field_defs
        if f.type in ["short_text", "paragraph_text"]
        and (f.obj == container or container in f.views)
    ]

    gis = arcgis.GIS(url=URL, username=USERNAME, password=PASSWORD)
    service = gis.content.get(service_id)

    if item_type == "layer":
        layer = service.layers[layer_id]
    elif item_type == "table":
        layer = service.tables[layer_id]
    else:
        raise ValueError(f"Unknown item_type specified: {item_type}")

    logger.info("Building features...")

    features = [
        utils.agol.build_feature(
            record, SPATIAL_REFERENCE, location_field_id, fields_names_to_sanitize
        )
        for record in records
    ]

    if not args.date:
        """
        Completely replace destination data. arcgis does have layer.manager.truncate()
        method, but this method is not supported on the parent layer of parent-child
        relationships. So we truncate the layer by deleteing with a "where 1=1"
        expression. We use the "future" option to avoid request timeouts on large
        datasets.
        """
        logger.info("Deleting all features...")
        res = resilient_layer_request(
            layer.delete_features, {"where": "1=1", "future": True}
        )
        # returns a "<Future>" response class which does not appear to be documented
        while res._state != "FINISHED":
            logger.info(f"Response state: {res._state}. Sleeping for 1 second")
            time.sleep(1)
        utils.agol.handle_response(res._result)

    else:
        """
        Simulate an upsert by deleting features from AGOL if they exist.

        The arcgis package does have a method that supports upserting: append()
        https://developers.arcgis.com/python/api-reference/arcgis.features.toc.html#featurelayer  # noqa E501

        However this method errored out on multiple datasets and i gave up.
        layer.append(
            edits=features, upsert=True, upsert_matching_field="id"
        )
        """
        logger.info(f"Deleting {len(features)} features...")
        key = "id"
        keys = [f'\'{f["attributes"][key]}\'' for f in features]
        for key_chunk in chunks(keys, 100):
            key_list_stringified = ",".join(key_chunk)
            res = resilient_layer_request(
                layer.delete_features, {"where": f"{key} in ({key_list_stringified})"}
            )
            utils.agol.handle_response(res)

    logger.info("Uploading features...")

    for features_chunk in chunks(features, 500):
        logger.info("Uploading chunk...")
        res = resilient_layer_request(
            layer.edit_features, {"adds": features_chunk, "rollback_on_failure": False}
        )
        utils.agol.handle_response(res)
示例#14
0
文件: MISC.py 项目: Iisting/Miza
 def __init__(self, c_id, c_sec):
     self.id = c_id
     self.secret = c_sec
     self.time = utc()
     self.knack = knackpy.App(app_id=self.id, api_key=self.secret)
     create_future_ex(self.pull)
示例#15
0
import json
import knackpy
"""
Quick start for repl experimentation. Run as: `python -i knack_exp.py`
"""

#
# Sample usage:
#
# $ python -i knack_exp.py
# >>> app.info()
# {'objects': 7, 'scenes': 9, 'records': 5560, 'size': '1.05gb'}
# >>> last5
# [<Record '02/13/2021'>, <Record '02/13/2021'>, <Record '02/13/2021'>, <Record '02/13/2021'>, <Record '02/13/2021'>]
# >>> r
# <Record '02/13/2021'>
#

APP_ID = "5faae3b10442ac00165da195"
API_KEY = "renderer"

app = knackpy.App(app_id=APP_ID, api_key=API_KEY)

last5 = app.get("Covid Events", record_limit=5)

r = last5[0]