def send_slack_message(
    message_type: MessageType,
    project_name: str,
    project_id: Optional[str],
    details: str = "no details provided",
):
    """Initialize slack client with values provided in environment."""
    if SLACK_TOKEN in [None, ""] or SLACK_CHANNEL in [None, ""]:
        logger.info("No configuration for Slack was found. " +
                    f"No '{message_type}' Slack message was sent.")
        return None

    slack_client = slack.WebClient(token=SLACK_TOKEN)

    if message_type == MessageType.SUCCESS:
        message = (
            "### PROJECT CREATION SUCCESSFUL ###\n" +
            f"Project Name: {project_name}\n" +
            f"Project Id: {project_id}\n\n" +
            "Make sure to activate the project using the manager dashboard.\n"
            + "Happy Swiping. :)")
        slack_client.chat_postMessage(channel=SLACK_CHANNEL, text=message)
    elif message_type == MessageType.FAIL:
        message = ("### PROJECT CREATION FAILED ###\n" +
                   f"Project Name: {project_name}\n" +
                   "Project draft is deleted.\n\n" + "REASON:\n" +
                   f"{details}")
        slack_client.chat_postMessage(channel=SLACK_CHANNEL, text=message)
    elif message_type == MessageType.NOTIFICATION_90:
        message = ("### ALMOST THERE! PROJECT REACHED 90% ###\n" +
                   f"Project Name: {project_name}\n" +
                   f"Project Id: {project_id}\n\n" +
                   "Get your next projects ready.")
        slack_client.chat_postMessage(channel="mapswipe_managers",
                                      text=message)
    elif message_type == MessageType.NOTIFICATION_100:
        message = ("### GREAT! PROJECT REACHED 100% ###\n" +
                   f"Project Name: {project_name}\n" +
                   f"Project Id: {project_id}\n\n" +
                   "You can set this project to 'finished' " +
                   "and activate another one.")
        slack_client.chat_postMessage(channel="mapswipe_managers",
                                      text=message)
    elif message_type == MessageType.PROJECT_STATUS_FINISHED:
        message = ("### SET PROJECT STATUS TO FINISHED ###\n" +
                   f"Project Name: {project_name}\n" +
                   f"Project Id: {project_id}\n\n" +
                   "The status of the project has been set to 'finished' " +
                   "by MapSwipe's backend workers.")
        slack_client.chat_postMessage(channel=SLACK_CHANNEL, text=message)
    elif message_type == MessageType.PROJECT_STATUS_ACTIVE:
        message = ("### SET PROJECT STATUS TO ACTIVE ###\n" +
                   f"Project Name: {project_name}\n" +
                   f"Project Id: {project_id}\n\n" +
                   "The status of the project has been set to 'active' " +
                   "by MapSwipe's backend workers.")
        slack_client.chat_postMessage(channel=SLACK_CHANNEL, text=message)
    else:
        # TODO: Raise an Exception
        pass
def get_projects(status: str) -> OrderedDict:
    """Load 'active' projects from Firebase."""
    fb_db = firebaseDB()
    projects = (fb_db.reference("v2/projects/").order_by_child(
        "status").equal_to(status).get())
    logger.info(f"got {len(projects)} {status} projects from firebase")
    return projects
def run_create_tutorials() -> None:
    fb_db = auth.firebaseDB()
    ref = fb_db.reference("v2/tutorialDrafts/")
    tutorial_drafts = ref.get()

    if tutorial_drafts is None:
        logger.info("There are no tutorial drafts in firebase.")
        return None

    for tutorial_draft_id, tutorial_draft in tutorial_drafts.items():
        tutorial_draft["tutorialDraftId"] = tutorial_draft_id
        project_type = tutorial_draft["projectType"]
        project_name = tutorial_draft["name"]

        try:
            tutorial = ProjectType(project_type).tutorial(tutorial_draft)
            tutorial.create_tutorial_groups()
            tutorial.create_tutorial_tasks()
            tutorial.save_tutorial()
            send_slack_message(MessageType.SUCCESS, project_name,
                               tutorial.projectId)
            logger.info(f"Success: Tutorial Creation ({project_name})")
        except CustomError:
            ref = fb_db.reference(f"v2/tutorialDrafts/{tutorial_draft_id}")
            ref.set({})
            send_slack_message(MessageType.FAIL, project_name,
                               tutorial.projectId)
            logger.exception(
                "Failed: Project Creation ({0}))".format(project_name))
            sentry.capture_exception()
        continue
def get_contributors_by_date(results_df: pd.DataFrame) -> pd.DataFrame:
    """
    for each project we retrospectively generate the following attributes for a given
    date utilizing the results:
    number_of_users, number_of_new_users, cum_number_of_users
    """

    user_first_day_df = results_df.groupby(
        ["user_id"]).agg(first_day=pd.NamedAgg(column="day", aggfunc="min"))
    logger.info("calculated first day per user")

    results_by_user_id_df = results_df.groupby([
        "project_id", "user_id", "day"
    ]).agg(number_of_results=pd.NamedAgg(column="user_id", aggfunc="count"))
    results_by_user_id_df = results_by_user_id_df.reset_index().merge(
        user_first_day_df, left_on="user_id", right_on="user_id")
    results_by_user_id_df["new_user"] = results_by_user_id_df.apply(
        lambda row: is_new_user(row["day"], row["first_day"]), axis=1)

    contributors_by_date_df = (results_by_user_id_df.reset_index().groupby(
        ["project_id", "day"]).agg(
            number_of_users=pd.NamedAgg(column="user_id",
                                        aggfunc=pd.Series.nunique),
            number_of_new_users=pd.NamedAgg(column="new_user", aggfunc="sum"),
        ))
    contributors_by_date_df["cum_number_of_users"] = contributors_by_date_df[
        "number_of_new_users"].cumsum()

    logger.info("calculated contributors by date")
    return contributors_by_date_df
    def save_tutorial(self):
        """Save the tutorial in Firebase."""

        tutorial = vars(self)
        groups = self.groups
        tasks = self.tasks

        tutorial.pop("groups", None)
        tutorial.pop("tasks", None)
        tutorial.pop("raw_tasks", None)
        tutorial.pop("examplesFile", None)
        tutorial.pop("tutorial_tasks", None)

        fb_db = auth.firebaseDB()
        ref = fb_db.reference("")

        if not self.projectId or self.projectId == "":
            raise CustomError(
                f"""Given argument resulted in invalid Firebase Realtime Database reference.
                    Project Id is invalid: {self.projectId}""")

        ref.update({
            f"v2/projects/{self.projectId}": tutorial,
            f"v2/groups/{self.projectId}": groups,
            f"v2/tasks/{self.projectId}": tasks,
        })

        logger.info(f"uploaded tutorial data to firebase for {self.projectId}")

        ref = fb_db.reference(f"v2/tutorialDrafts/{self.tutorialDraftId}")
        ref.set({})
Esempio n. 6
0
def get_recent_projects(hours: int = 3):
    """Get ids for projects when results have been submitted within the last x hours."""
    pg_db = auth.postgresDB()
    query_insert_results = """
        select project_id
        from results
        -- Using timestamp attribute here which is set for all projects
        -- and also represents the start_time for newer projects.
        -- "Old" projects have no start_time attribute.
        -- There is an index defined on "timestamp".
        where "timestamp" >= %(timestamp)s
        group by project_id
    """
    timestamp = (dt.datetime.utcnow() -
                 dt.timedelta(hours=hours)).isoformat()[0:-3] + "Z"
    project_info = pg_db.retr_query(query_insert_results,
                                    {"timestamp": timestamp})

    project_ids = []
    for project_id in project_info:
        project_ids.append(project_id[0])
    logger.info(
        f"Got {len(project_ids)} projects from postgres with recent results.")

    return project_ids
Esempio n. 7
0
 def is_valid(self):
     """Check if a group contains any tasks"""
     if self.numberOfTasks > 0:
         return True
     else:
         logger.info(f"group is not valid: {self.groupId}")
         return False
Esempio n. 8
0
def create_user(email, username, password):
    fb_db = firebaseDB()
    try:
        user = auth.create_user(email=email,
                                display_name=username,
                                password=password)

        ref = fb_db.reference(f"v2/users/{user.uid}/")
        ref.update({
            "username":
            username,
            "taskContributionCount":
            0,
            "groupContributionCount":
            0,
            "projectContributionCount":
            0,
            "created":
            datetime.datetime.utcnow().isoformat()[0:-3] +
            "Z",  # Store current datetime in milliseconds
        })
        logger.info(f"created new user: {user.uid}")
        return user
    except Exception as e:
        logger.info(f"could not create new user {email}.")
        raise CustomError(e)
Esempio n. 9
0
def adjust_overlapping_groups(groups: Dict, zoom: int):
    """Loop through groups dict and merge overlapping groups."""

    groups_without_overlap = {}
    overlaps_total = 0

    for group_id in list(groups.keys()):

        # skip if groups has been removed already
        if group_id not in groups.keys():
            continue

        overlap_count = 0
        for group_id_b in list(groups.keys()):
            # skip if it is the same group
            if group_id_b == group_id:
                continue

            if groups_intersect(groups[group_id], groups[group_id_b]):
                overlap_count += 1
                new_group = merge_groups(groups[group_id], groups[group_id_b],
                                         zoom)
                del groups[group_id_b]
                groups_without_overlap[group_id] = new_group

        if overlap_count == 0:
            groups_without_overlap[group_id] = groups[group_id]

        del groups[group_id]
        overlaps_total += overlap_count

    logger.info(f"overlaps_total: {overlaps_total}")
    return groups_without_overlap, overlaps_total
Esempio n. 10
0
def get_all_projects_of_type(project_type: int):
    """Get the project ids for active and inactive projects in Firebase DB."""

    project_id_list = []
    fb_db = firebaseDB()

    # we neglect private projects here
    # since there are no projects set up in production yet
    status_list = ["active", "inactive"]

    for status in status_list:
        logger.info(f"query {status} projects")
        projects = (
            fb_db.reference(f"v2/projects/")
            .order_by_child("status")
            .equal_to(status)
            .get()
        )
        for project_id, data in projects.items():
            if (data.get("projectType", 1) == project_type) & (
                data.get("tutorialId", None) is None
            ):
                project_id_list.append(project_id)

    logger.info(f"got {len(project_id_list)} project from firebase.")
    return project_id_list
def send_progress_notification(project_id: int):
    """Send progress notification to project managers in Slack."""
    fb_db = auth.firebaseDB()
    progress = fb_db.reference(f"v2/projects/{project_id}/progress").get()

    if not progress:
        logger.info(
            f"could not get progress from firebase for project {project_id}")
    elif progress >= 90:
        project_name = fb_db.reference(f"v2/projects/{project_id}/name").get()
        notification_90_sent = fb_db.reference(
            f"v2/projects/{project_id}/notification_90_sent").get()
        notification_100_sent = fb_db.reference(
            f"v2/projects/{project_id}/notification_100_sent").get()
        logger.info(
            f"{project_id} - progress: {progress},"
            f"notifications: {notification_90_sent} {notification_100_sent}")

        if progress >= 90 and not notification_90_sent:
            # send notification and set value in firebase
            send_slack_message(MessageType.NOTIFICATION_90, project_name,
                               project_id)
            fb_db.reference(
                f"v2/projects/{project_id}/notification_90_sent").set(True)

        if progress >= 100 and not notification_100_sent:
            # send notification and set value in firebase
            send_slack_message(MessageType.NOTIFICATION_100, project_name,
                               project_id)
            fb_db.reference(
                f"v2/projects/{project_id}/notification_100_sent").set(True)
Esempio n. 12
0
def get_results(filename: str, project_id: str) -> pd.DataFrame:
    """
    Query results from postgres database for project id.
    Save results to a csv file.
    Load pandas dataframe from this csv file.
    Parse timestamp as datetime object and add attribute "day" for each result.
    Return None if there are no results for this project.
    Otherwise return dataframe.

    Parameters
    ----------
    filename: str
    project_id: str
    """

    sql_query = sql.SQL("""
        COPY (
            SELECT *
            FROM results
            WHERE project_id = {}
        ) TO STDOUT WITH CSV HEADER
        """).format(sql.Literal(project_id))
    write_sql_to_gzipped_csv(filename, sql_query)

    df = load_df_from_csv(filename)

    if df.empty:
        logger.info(f"there are no results for this project {project_id}")
        return None
    else:
        df["timestamp"] = pd.to_datetime(df["timestamp"])
        df["day"] = df["timestamp"].apply(
            lambda x: datetime.datetime(year=x.year, month=x.month, day=x.day))
        logger.info(f"added day attribute for results for {project_id}")
        return df
Esempio n. 13
0
def move_project_data_to_v2(project_id):
    """
    Copy project information from old path to v2/projects in Firebase.
    Add status=archived attribute.
    Use Firebase transaction function for this.
    """

    # Firebase transaction function
    def transfer(current_data):
        # we need to add these attributes
        # since they are expected for version 2
        current_data["status"] = "archived"
        current_data["projectType"] = 1
        current_data["projectId"] = str(project_id)
        current_data["progress"] = current_data.get("progress", 0)
        current_data["name"] = current_data.get("name", "unknown")
        fb_db.reference("v2/projects/{0}".format(project_id)).set(current_data)
        return dict()

    fb_db = auth.firebaseDB()
    projects_ref = fb_db.reference(f"projects/{project_id}")
    try:
        projects_ref.transaction(transfer)
        logger.info(
            f"{project_id}: Transfered project to v2 and delete in old path")
        return True
    except fb_db.TransactionAbortedError:
        logger.exception(f"{project_id}: Firebase transaction"
                         f"for transferring project failed to commit")
        return False
def add_tutorial_id_to_projects(project_id_list, tutorial_id):
    fb_db = firebaseDB()
    for project_id in project_id_list:
        fb_db.reference(f"v2/projects/{project_id}/tutorialId").set(
            tutorial_id)
        logger.info(
            f"added tutorial id '{tutorial_id}' to project '{project_id}'")
def update_groups_table(project_id: str):
    """Remove duplicates in 'project_types_specifics' attribute in groups table."""

    logger.info(f"Start process for project: '{project_id}'")
    p_con = auth.postgresDB()

    query = """
        UPDATE groups
        SET project_type_specifics = project_type_specifics::jsonb
            #- '{projectId}'
            #- '{id}'
            #- '{requiredCount}'
            #- '{finishedCount}'
            #- '{neededCount}'
            #- '{reportCount}'
            #- '{distributedCount}'
        WHERE project_id = %(project_id)s
    """
    try:
        p_con.query(query, {"project_id": project_id})
        logger.info(f"Updated tasks table for project '{project_id}'.")
    except Exception as e:
        sentry.capture_exception(e)
        sentry.capture_message(
            f"Could NOT update tasks table for project '{project_id}'.")
        logger.exception(e)
        logger.warning(
            f"Could NOT update tasks table for project '{project_id}'.")
Esempio n. 16
0
def get_groups(filename: str, project_id: str) -> pd.DataFrame:
    """
    Check if groups have been downloaded already.
    If not: Query groups from postgres database for project id and
    save groups to a csv file.
    Then load pandas dataframe from this csv file.
    Return dataframe.

    Parameters
    ----------
    filename: str
    project_id: str
    """

    if os.path.isfile(filename):
        logger.info(f"file {filename} already exists for {project_id}. skip download.")
        pass
    else:
        # TODO: check how we use number_of_users_required
        #   it can get you a wrong number, if more users finished than required
        sql_query = sql.SQL(
            """
            COPY (
                SELECT *, (required_count+finished_count) as number_of_users_required
                FROM groups
                WHERE project_id = {}
            ) TO STDOUT WITH CSV HEADER
            """
        ).format(sql.Literal(project_id))
        write_sql_to_csv(filename, sql_query)

    df = load_df_from_csv(filename)
    return df
def load_project_info_dynamic(filename: str) -> pd.DataFrame:
    """
    The function loads data from a csv file into a pandas dataframe.
    If not file exists, it will be initialized.

    Parameters
    ----------
    filename: str
    """

    if os.path.isfile(filename):
        logger.info(f"file {filename} exists. Init from this file.")
        df = pd.read_csv(filename, index_col="idx")
    else:
        columns = [
            "project_id",
            "progress",
            "number_of_users",
            "number_of_results",
            "number_of_results_progress",
            "day",
        ]
        df = pd.DataFrame(index=[], columns=columns)
        df["project_id"].astype("str")

    return df
def save_projects(filename: str, df: pd.DataFrame,
                  df_dynamic: pd.DataFrame) -> pd.DataFrame:
    """
    The function merges the dataframes for static and dynamic project information
    and then save the result as csv file.
    Additionally, two geojson files are generated using
    (a) the geometry of the projects and
    (b) the centroid of the projects.

    Parameters
    ----------
    filename: str
    df: pd.DataFrame
    df_dynamic: pd.DataFrame
    """

    projects_df = df.merge(df_dynamic,
                           left_on="project_id",
                           right_on="project_id",
                           how="left")
    projects_df.to_csv(filename, index_label="idx", line_terminator="\n")
    logger.info(f"saved projects: {filename}")
    geojson_functions.csv_to_geojson(filename, "geom")
    geojson_functions.csv_to_geojson(filename, "centroid")

    return projects_df
def get_overall_stats(projects_df: pd.DataFrame,
                      filename: str) -> pd.DataFrame:
    """
    The function aggregates the statistics per project using the status attribute.
    We derive aggregated statistics for active, inactive and finished projects.
    The number of users should not be summed up here, since this would generate wrong
    results.
    A single user can contribute to multiple projects, we need to consider this.

    Parameters
    ----------
    projects_df: pd.DataFrame
    filename: str
    """
    projects_df["number_of_users"].fillna(0, inplace=True)
    overall_stats_df = projects_df.groupby(["status"]).agg(
        count_projects=pd.NamedAgg(column="project_id", aggfunc="count"),
        area_sqkm=pd.NamedAgg(column="area_sqkm", aggfunc="sum"),
        number_of_results=pd.NamedAgg(column="number_of_results",
                                      aggfunc="sum"),
        number_of_results_progress=pd.NamedAgg(
            column="number_of_results_progress", aggfunc="sum"),
        average_number_of_users_per_project=pd.NamedAgg(
            column="number_of_users", aggfunc="mean"),
    )

    overall_stats_df.to_csv(filename, index_label="status")
    logger.info(f"saved overall stats to {filename}")

    return overall_stats_df
Esempio n. 20
0
def delete_team(team_id):
    """Delete team in Firebase."""
    # TODO: What is the consequence of this on projects and users
    #   do we expect that the teamId is removed there as well?
    #   teamId is removed for users, but not for projects at the moment
    fb_db = firebaseDB()  # noqa E841
    try:
        # check if team exist in firebase
        if not fb_db.reference(f"v2/teams/{team_id}").get():
            raise CustomError(f"can't find team in firebase: {team_id}")

        # remove all team members
        remove_all_team_members(team_id)

        # get team name from firebase
        team_name = fb_db.reference(f"v2/teams/{team_id}/teamName").get()

        # check if reference path is valid, e.g. if team_id is None
        ref = fb_db.reference(f"v2/teams/{team_id}")
        if not re.match(r"/v2/\w+/[-a-zA-Z0-9]+", ref.path):
            raise CustomError(
                f"""Given argument resulted in invalid Firebase Realtime Database reference.
                        {ref.path}""")

        # delete team in firebase
        ref.delete()
        logger.info(f"deleted team: {team_id} - '{team_name}'")

    except Exception as e:
        logger.info(f"could not delete team: {team_id}")
        raise CustomError(e)
Esempio n. 21
0
def renew_team_token(team_id):
    """Create new team in Firebase."""
    fb_db = firebaseDB()  # noqa E841
    try:
        # check if team exist in firebase
        if not fb_db.reference(f"v2/teams/{team_id}").get():
            raise CustomError(f"can't find team in firebase: {team_id}")

        # get team name from firebase
        team_name = fb_db.reference(f"v2/teams/{team_id}/teamName").get()

        # check if reference path is valid
        ref = fb_db.reference(f"v2/teams/{team_id}")
        if not re.match(r"/v2/\w+/[-a-zA-Z0-9]+", ref.path):
            raise CustomError(
                f"""Given argument resulted in invalid Firebase Realtime Database reference.
                        {ref.path}""")

        # generate new uuid4 token
        new_team_token = str(uuid.uuid4())

        # set team token in firebase
        ref.update({"teamToken": new_team_token})
        logger.info(
            f"renewed team token: {team_id} - '{team_name}' - {new_team_token}"
        )
        return new_team_token

    except Exception as e:
        logger.info(f"could not delete team: {team_id}")
        raise CustomError(e)
def csv_to_geojson(filename: str, geometry_field: str = "geom"):
    """
    Use ogr2ogr to convert csv file to GeoJSON
    """

    outfile = filename.replace(".csv", f"_{geometry_field}.geojson")
    # need to remove file here because ogr2ogr can't overwrite when choosing GeoJSON
    if os.path.isfile(outfile):
        os.remove(outfile)
    filename_without_path = filename.split("/")[-1].replace(".csv", "")
    # TODO: remove geom column from normal attributes in sql query
    subprocess.run(
        [
            "ogr2ogr",
            "-f",
            "GeoJSON",
            outfile,
            filename,
            "-sql",
            f'SELECT *, CAST({geometry_field} as geometry) FROM "{filename_without_path}"',  # noqa E501
        ],
        check=True,
    )
    logger.info(f"converted {filename} to {outfile}.")

    cast_datatypes_for_geojson(outfile)
Esempio n. 23
0
def get_tasks(filename: str, project_id: str) -> pd.DataFrame:
    """
    Check if tasks have been downloaded already.
    If not: Query tasks from postgres database for project id and
    save tasks to a csv file.
    Then load pandas dataframe from this csv file.
    Return dataframe.

    Parameters
    ----------
    filename: str
    project_id: str
    """

    if os.path.isfile(filename):
        logger.info(f"file {filename} already exists for {project_id}. skip download.")
        pass
    else:

        sql_query = sql.SQL(
            """
            COPY (
                SELECT project_id, group_id, task_id, ST_AsText(geom) as geom
                FROM tasks
                WHERE project_id = {}
            ) TO STDOUT WITH CSV HEADER
            """
        ).format(sql.Literal(project_id))
        write_sql_to_csv(filename, sql_query)

    df = load_df_from_csv(filename)
    return df
Esempio n. 24
0
    def create_tutorial_groups(self):
        """Create group for the tutorial based on provided examples in geojson file."""
        # load examples/tasks from file

        number_of_screens = len(self.screens)
        # create the groups dict to be uploaded in Firebase
        self.groups[101] = {
            "xMax": 100
            + (2 * number_of_screens)
            - 1,  # this depends on the number of screens/tasks to show
            "xMin": 100,  # this will be always set to 100
            "yMax": 131074,  # this is set to be at the equator
            "yMin": 131072,  # this is set to be at the equator
            "requiredCount": 5,  # this is not needed from back end perspective
            "finishedCount": 0,  # this is not needed from back end perspective
            "groupId": 101,  # a tutorial has only one group
            "projectId": self.projectId,
            "numberOfTasks": len(
                self.tutorial_tasks
            ),  # this depends on the number of screens/tasks to show
            "progress": 0,  # this is not needed from back end perspective
        }

        if self.projectType in [ProjectType.CHANGE_DETECTION.value]:
            # need to adjust xMax and yMax for Change Detection projects
            # since they use a different view with only one tile per screen
            self.groups[101]["xMax"] = str(100 + (number_of_screens - 1))
            self.groups[101]["yMax"] = str(self.groups[101]["yMin"])

        logger.info(
            f"{self.projectId}"
            f" - create_tutorial_groups - "
            f"created groups dictionary"
        )
Esempio n. 25
0
def get_project_attribute_from_firebase(project_ids: List[str],
                                        attribute: str):
    """Use threading to query a project attribute in firebase.

    Follows a workflow describted in this blogpost:
    https://www.digitalocean.com/community/tutorials/how-to-use-threadpoolexecutor-in-python-3
    """
    def get_project_attribute(_project_id, _attribute):
        ref = fb_db.reference(f"v2/projects/{_project_id}/{_attribute}")
        return [_project_id, ref.get()]

    fb_db = auth.firebaseDB()
    project_attribute_dict = {}
    with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
        futures = []
        for project_id in project_ids:
            futures.append(
                executor.submit(get_project_attribute,
                                _project_id=project_id,
                                _attribute=attribute))

        for future in concurrent.futures.as_completed(futures):
            project_id, status = future.result()
            project_attribute_dict[project_id] = status

    logger.info(
        f"Got attribute '{attribute}' from firebase for {len(project_ids)} projects."
    )
    return project_attribute_dict
Esempio n. 26
0
 def set_firebase_db(self, path, data, token=None):
     """Upload results to Firebase using REST api."""
     request_ref = f"{path}.json?auth={token}"
     headers = {"content-type": "application/json; charset=UTF-8"}
     self.client.put(request_ref,
                     headers=headers,
                     data=json.dumps(data).encode("utf-8"))
     logger.info(f"set data in firebase for {path}.json")
def transfer_results(project_id_list=None):
    """
    Download results from firebase,
    saves them to postgres and then deletes the results in firebase.
    This is implemented as a transactional operation as described in
    the Firebase docs to avoid missing new generated results in
    Firebase during execution of this function.
    """

    # Firebase transaction function
    def transfer(current_results):
        if current_results is None:
            logger.info(f"{project_id}: No results in Firebase")
            return dict()
        else:
            results_user_id_list = get_user_ids_from_results(current_results)
            update_data.update_user_data(results_user_id_list)
            results_file = results_to_file(current_results, project_id)
            save_results_to_postgres(results_file)
            return dict()

    fb_db = auth.firebaseDB()

    if not project_id_list:
        # get project_ids from existing results if no project ids specified
        project_id_list = fb_db.reference("v2/results/").get(shallow=True)
        if not project_id_list:
            project_id_list = []
            logger.info(f"There are no results to transfer.")

    # get all project ids from postgres,
    # we will only transfer results for projects we have there
    postgres_project_ids = get_projects_from_postgres()

    for project_id in project_id_list:
        if project_id not in postgres_project_ids:
            logger.info(f"{project_id}: This project is not in postgres. "
                        f"We will not transfer results")
            continue
        elif "tutorial" in project_id:
            logger.info(f"{project_id}: these are results for a tutorial. "
                        f"We will not transfer these")
            continue

        logger.info(f"{project_id}: Start transfering results")

        results_ref = fb_db.reference(f"v2/results/{project_id}")
        truncate_temp_results()

        try:
            results_ref.transaction(transfer)
            logger.info(f"{project_id}: Transfered results to postgres")
        except fb_db.TransactionAbortedError:
            logger.exception(f"{project_id}: Firebase transaction for "
                             f"transfering results failed to commit")

    del fb_db
    return project_id_list
def run_create_projects():
    """
    Create projects from submitted project drafts.

    Get project drafts from Firebase.
    Create projects with groups and tasks.
    Save created projects, groups and tasks to Firebase and Postgres.
    """

    fb_db = auth.firebaseDB()
    ref = fb_db.reference("v2/projectDrafts/")
    project_drafts = ref.get()

    if project_drafts is None:
        logger.info("There are no project drafts in firebase.")
        return None

    for project_draft_id, project_draft in project_drafts.items():
        project_draft["projectDraftId"] = project_draft_id
        project_type = project_draft["projectType"]
        project_name = project_draft["name"]
        try:
            # Create a project object using appropriate class (project type).
            project = ProjectType(project_type).constructor(project_draft)
            # TODO: here the project.geometry attribute is overwritten
            #  this is super confusing since it's not a geojson anymore
            #  but this is what we set initially,
            #  e.g. in tile_map_service_grid/project.py
            #  project.geometry is set to a list of wkt geometries now
            #  this can't be handled in postgres,
            #  postgres expects just a string not an array
            #  validated_geometries should be called during init already
            #  for the respective project types

            project.geometry = project.validate_geometries()
            project.create_groups()
            project.calc_required_results()
            # Save project and its groups and tasks to Firebase and Postgres.
            project.save_project()
            send_slack_message(MessageType.SUCCESS, project_name,
                               project.projectId)
            logger.info("Success: Project Creation ({0})".format(project_name))
        except CustomError as e:
            ref = fb_db.reference(f"v2/projectDrafts/{project_draft_id}")
            ref.set({})

            # check if project could be initialized
            try:
                project_id = project.projectId
            except UnboundLocalError:
                project_id = None

            send_slack_message(MessageType.FAIL, project_name, project_id,
                               str(e))
            logger.exception(
                "Failed: Project Creation ({0}))".format(project_name))
            sentry.capture_exception()
        continue
Esempio n. 29
0
def write_sql_to_csv(filename: str, sql_query: sql.SQL):
    """
    Use the copy statement to write data from postgres to a csv file.
    """

    pg_db = auth.postgresDB()
    with open(filename, "w") as f:
        pg_db.copy_expert(sql_query, f)
    logger.info(f"wrote csv file from sql: {filename}")
Esempio n. 30
0
def add_metadata_to_csv(filename: str):
    """
    Append a metadata line to the csv file about intended data usage.
    """

    with open(filename, "a") as fd:
        fd.write("# This data can only be used for editing in OpenStreetMap.")

    logger.info(f"added metadata to {filename}.")