示例#1
0
def task_finish_post_to_outbox() -> _Response:
    task = p.parse(flask.request)
    app.logger.info(f"task={task!r}")
    iri = task.payload
    try:
        activity = ap.fetch_remote_activity(iri)
        app.logger.info(f"activity={activity!r}")

        recipients = activity.recipients()

        process_outbox(activity, {})

        app.logger.info(f"recipients={recipients}")
        activity = ap.clean_activity(activity.to_dict())

        payload = json.dumps(activity)
        for recp in recipients:
            app.logger.debug(f"posting to {recp}")
            Tasks.post_to_remote_inbox(payload, recp)
    except (ActivityGoneError, ActivityNotFoundError):
        app.logger.exception(f"no retry")
    except Exception as err:
        app.logger.exception(f"failed to post to remote inbox for {iri}")
        raise TaskError() from err

    return ""
示例#2
0
def task_process_new_activity() -> _Response:
    """Process an activity received in the inbox"""
    task = p.parse(flask.request)
    app.logger.info(f"task={task!r}")
    iri = task.payload
    try:
        activity = ap.fetch_remote_activity(iri)
        app.logger.info(f"activity={activity!r}")

        flags = {}

        if not activity.published:
            flags[_meta(MetaKey.PUBLISHED)] = now()
        else:
            flags[_meta(MetaKey.PUBLISHED)] = activity.published

        set_inbox_flags(activity, flags)
        app.logger.info(f"a={activity}, flags={flags!r}")
        if flags:
            DB.activities.update_one({"remote_id": activity.id}, {"$set": flags})

        app.logger.info(f"new activity {iri} processed")
        if not activity.has_type(ap.ActivityType.DELETE):
            Tasks.cache_actor(iri)
    except (ActivityGoneError, ActivityNotFoundError):
        app.logger.exception(f"dropping activity {iri}, skip processing")
        return ""
    except Exception as err:
        app.logger.exception(f"failed to process new activity {iri}")
        raise TaskError() from err

    return ""
示例#3
0
def _announce_process_inbox(announce: ap.Announce, new_meta: _NewMeta) -> None:
    _logger.info(f"process_inbox activity={announce!r}")
    # TODO(tsileo): actually drop it without storing it and better logging, also move the check somewhere else
    # or remove it?
    try:
        obj = announce.get_object()
    except NotAnActivityError:
        _logger.exception(
            f'received an Annouce referencing an OStatus notice ({announce._data["object"]}), dropping the message'
        )
        return

    if obj.has_type(ap.ActivityType.QUESTION):
        Tasks.fetch_remote_question(obj)

    update_one_activity(
        by_remote_id(announce.id),
        upsert({
            MetaKey.OBJECT: obj.to_dict(embed=True),
            MetaKey.OBJECT_ACTOR: obj.get_actor().to_dict(embed=True),
        }),
    )
    update_one_activity(
        {
            **by_type(ap.ActivityType.CREATE),
            **by_object_id(obj.id)
        },
        inc(MetaKey.COUNT_BOOST, 1),
    )
示例#4
0
def api_new_question() -> _Response:
    source = _user_api_arg("content")
    if not source:
        raise ValueError("missing content")

    content, tags = parse_markdown(source)
    cc = [ID + "/followers"]

    for tag in tags:
        if tag["type"] == "Mention":
            cc.append(tag["href"])

    answers = []
    for i in range(4):
        a = _user_api_arg(f"answer{i}", default=None)
        if not a:
            break
        answers.append({
            "type": ap.ActivityType.NOTE.value,
            "name": a,
            "replies": {
                "type": ap.ActivityType.COLLECTION.value,
                "totalItems": 0
            },
        })

    open_for = int(_user_api_arg("open_for"))
    choices = {
        "endTime":
        ap.format_datetime(
            datetime.now(timezone.utc) + timedelta(minutes=open_for))
    }
    of = _user_api_arg("of")
    if of == "anyOf":
        choices["anyOf"] = answers
    else:
        choices["oneOf"] = answers

    raw_question = dict(
        attributedTo=MY_PERSON.id,
        cc=list(set(cc)),
        to=[ap.AS_PUBLIC],
        content=content,
        tag=tags,
        source={
            "mediaType": "text/markdown",
            "content": source
        },
        inReplyTo=None,
        **choices,
    )

    question = ap.Question(**raw_question)
    create = question.build_create()
    create_id = post_to_outbox(create)

    Tasks.update_question_outbox(create_id, open_for)

    return _user_api_response(activity=create_id)
示例#5
0
def _create_process_inbox(create: ap.Create, new_meta: _NewMeta) -> None:
    _logger.info(f"process_inbox activity={create!r}")
    # If it's a `Quesiion`, trigger an async task for updating it later (by fetching the remote and updating the
    # local copy)
    question = create.get_object()
    if question.has_type(ap.ActivityType.QUESTION):
        Tasks.fetch_remote_question(question)

    back._handle_replies(MY_PERSON, create)
示例#6
0
def _announce_process_outbox(announce: ap.Announce, new_meta: _NewMeta) -> None:
    _logger.info(f"process_outbox activity={announce!r}")

    obj = announce.get_object()
    if obj.has_type(ap.ActivityType.QUESTION):
        Tasks.fetch_remote_question(obj)

    Tasks.cache_object(announce.id)

    update_one_activity(
        {**by_object_id(obj.id), **by_type(ap.ActivityType.CREATE)},
        upsert({MetaKey.BOOSTED: announce.id}),
    )
示例#7
0
def _like_set_inbox_flags(activity: ap.Like, new_meta: _NewMeta) -> None:
    _logger.info(f"set_inbox_flags activity={activity!r}")
    # Is it a Like of local acitivty/from the outbox
    if is_from_outbox(activity.get_object()):
        # Flag it as a notification
        _flag_as_notification(activity, new_meta)

        # Cache the object (for display on the notifcation page)
        Tasks.cache_object(activity.id)

        # Also set the "keep mark" for the GC (as we want to keep it forever)
        _set_flag(new_meta, MetaKey.GC_KEEP)

    return None
示例#8
0
def _like_process_outbox(like: ap.Like, new_meta: _NewMeta) -> None:
    _logger.info(f"process_outbox activity={like!r}")

    obj = like.get_object()
    if obj.has_type(ap.ActivityType.QUESTION):
        Tasks.fetch_remote_question(obj)

    # Cache the object for display on the "Liked" public page
    Tasks.cache_object(like.id)

    update_one_activity(
        {**by_object_id(obj.id), **by_type(ap.ActivityType.CREATE)},
        {**inc(MetaKey.COUNT_LIKE, 1), **upsert({MetaKey.LIKED: like.id})},
    )
示例#9
0
def task_cache_actor() -> _Response:
    task = p.parse(flask.request)
    app.logger.info(f"task={task!r}")
    iri = task.payload["iri"]
    try:
        activity = ap.fetch_remote_activity(iri)
        app.logger.info(f"activity={activity!r}")

        # Fetch the Open Grah metadata if it's a `Create`
        if activity.has_type(ap.ActivityType.CREATE):
            Tasks.fetch_og_meta(iri)

        actor = activity.get_actor()
        if actor.icon:
            if isinstance(actor.icon, dict) and "url" in actor.icon:
                config.MEDIA_CACHE.cache_actor_icon(actor.icon["url"])
            else:
                app.logger.warning(f"failed to parse icon {actor.icon} for {iri}")

        if activity.has_type(ap.ActivityType.FOLLOW):
            if actor.id == config.ID:
                # It's a new following, cache the "object" (which is the actor we follow)
                DB.activities.update_one(
                    {"remote_id": iri},
                    {
                        "$set": {
                            "meta.object": activity.get_object().to_dict(embed=True)
                        }
                    },
                )

        # Cache the actor info
        DB.activities.update_one(
            {"remote_id": iri}, {"$set": {"meta.actor": actor.to_dict(embed=True)}}
        )

        app.logger.info(f"actor cached for {iri}")
        if activity.has_type([ap.ActivityType.CREATE, ap.ActivityType.ANNOUNCE]):
            Tasks.cache_attachments(iri)

    except (ActivityGoneError, ActivityNotFoundError):
        DB.activities.update_one({"remote_id": iri}, {"$set": {"meta.deleted": True}})
        app.logger.exception(f"flagging activity {iri} as deleted, no actor caching")
    except Exception as err:
        app.logger.exception(f"failed to cache actor for {iri}")
        raise TaskError() from err

    return ""
示例#10
0
def task_cache_actor() -> _Response:
    task = p.parse(flask.request)
    app.logger.info(f"task={task!r}")
    iri = task.payload["iri"]
    try:
        activity = ap.fetch_remote_activity(iri)
        app.logger.info(f"activity={activity!r}")

        # Reload the actor without caching (in case it got upated)
        with no_cache():
            actor = ap.fetch_remote_activity(activity.get_actor().id)

        # Fetch the Open Grah metadata if it's a `Create`
        if activity.has_type(ap.ActivityType.CREATE):
            links = opengraph.links_from_note(activity.get_object().to_dict())
            if links:
                Tasks.fetch_og_meta(iri)

        if activity.has_type(ap.ActivityType.FOLLOW):
            if actor.id == config.ID:
                # It's a new following, cache the "object" (which is the actor we follow)
                DB.activities.update_one(
                    by_remote_id(iri),
                    upsert({MetaKey.OBJECT: activity.get_object().to_dict(embed=True)}),
                )

        # Cache the actor info
        update_cached_actor(actor)

        app.logger.info(f"actor cached for {iri}")
        if not activity.has_type([ap.ActivityType.CREATE, ap.ActivityType.ANNOUNCE]):
            return ""

        if activity.get_object()._data.get(
            "attachment", []
        ) or activity.get_object().has_type(ap.ActivityType.VIDEO):
            Tasks.cache_attachments(iri)

    except (ActivityGoneError, ActivityNotFoundError):
        DB.activities.update_one({"remote_id": iri}, {"$set": {"meta.deleted": True}})
        app.logger.exception(f"flagging activity {iri} as deleted, no actor caching")
    except Exception as err:
        app.logger.exception(f"failed to cache actor for {iri}")
        raise TaskError() from err

    return ""
示例#11
0
def task_forward_activity() -> _Response:
    task = p.parse(flask.request)
    app.logger.info(f"task={task!r}")
    iri = task.payload
    try:
        activity = ap.fetch_remote_activity(iri)
        recipients = back.followers_as_recipients()
        app.logger.debug(f"Forwarding {activity!r} to {recipients}")
        activity = ap.clean_activity(activity.to_dict())
        payload = json.dumps(activity)
        for recp in recipients:
            app.logger.debug(f"forwarding {activity!r} to {recp}")
            Tasks.post_to_remote_inbox(payload, recp)
    except Exception as err:
        app.logger.exception("task failed")
        raise TaskError() from err

    return ""
示例#12
0
def post_to_outbox(activity: ap.BaseActivity) -> str:
    if activity.has_type(ap.CREATE_TYPES):
        activity = activity.build_create()

    # Assign create a random ID
    obj_id = binascii.hexlify(os.urandom(8)).decode("utf-8")
    uri = activity_url(obj_id)
    activity._data["id"] = uri
    if activity.has_type(ap.ActivityType.CREATE):
        activity._data["object"]["id"] = urljoin(
            BASE_URL, url_for("outbox_activity", item_id=obj_id))
        activity._data["object"]["url"] = urljoin(
            BASE_URL, url_for("note_by_id", note_id=obj_id))
        activity.reset_object_cache()

    save(Box.OUTBOX, activity)
    Tasks.cache_actor(activity.id)
    Tasks.finish_post_to_outbox(activity.id)
    return activity.id
示例#13
0
def post_to_inbox(activity: ap.BaseActivity) -> None:
    # Check for Block activity
    actor = activity.get_actor()
    if outbox_is_blocked(actor.id):
        logger.info(
            f"actor {actor!r} is blocked, dropping the received activity {activity!r}"
        )
        return

    if DB.activities.find_one({
            "box": Box.INBOX.value,
            "remote_id": activity.id
    }):
        # The activity is already in the inbox
        logger.info(f"received duplicate activity {activity!r}, dropping it")
        return

    save(Box.INBOX, activity)
    Tasks.process_new_activity(activity.id)

    logger.info(f"spawning task for {activity!r}")
    Tasks.finish_post_to_inbox(activity.id)
示例#14
0
def task_cache_object() -> _Response:
    task = p.parse(flask.request)
    app.logger.info(f"task={task!r}")
    iri = task.payload
    try:
        activity = ap.fetch_remote_activity(iri)
        app.logger.info(f"activity={activity!r}")
        obj = activity.get_object()
        Tasks.cache_emojis(obj)

        # Refetch the object actor (without cache)
        obj_actor = ap.fetch_remote_activity(obj.get_actor().id, no_cache=True)

        cache = {MetaKey.OBJECT: obj.to_dict(embed=True)}

        if activity.get_actor().id != obj_actor.id:
            # Cache the object actor
            obj_actor_hash = _actor_hash(obj_actor)
            cache[MetaKey.OBJECT_ACTOR] = obj_actor.to_dict(embed=True)
            cache[MetaKey.OBJECT_ACTOR_ID] = obj_actor.id
            cache[MetaKey.OBJECT_ACTOR_HASH] = obj_actor_hash

            # Update the actor cache for the other activities
            update_cached_actor(obj_actor)

        update_one_activity(by_remote_id(activity.id), upsert(cache))

    except (ActivityGoneError, ActivityNotFoundError, NotAnActivityError):
        DB.activities.update_one({"remote_id": iri},
                                 {"$set": {
                                     "meta.deleted": True
                                 }})
        app.logger.exception(
            f"flagging activity {iri} as deleted, no object caching")
    except Exception as err:
        app.logger.exception(f"failed to cache object for {iri}")
        raise TaskError() from err

    return ""
示例#15
0
def update_cached_actor(actor: ap.BaseActivity) -> None:
    actor_hash = _actor_hash(actor)
    update_many_activities(
        {
            **flag(MetaKey.ACTOR_ID, actor.id),
            **flag(MetaKey.ACTOR_HASH, {"$ne": actor_hash}),
        },
        upsert(
            {MetaKey.ACTOR: actor.to_dict(embed=True), MetaKey.ACTOR_HASH: actor_hash}
        ),
    )
    update_many_activities(
        {
            **flag(MetaKey.OBJECT_ACTOR_ID, actor.id),
            **flag(MetaKey.OBJECT_ACTOR_HASH, {"$ne": actor_hash}),
        },
        upsert(
            {
                MetaKey.OBJECT_ACTOR: actor.to_dict(embed=True),
                MetaKey.OBJECT_ACTOR_HASH: actor_hash,
            }
        ),
    )
    DB.replies.update_many(
        {
            **flag(MetaKey.ACTOR_ID, actor.id),
            **flag(MetaKey.ACTOR_HASH, {"$ne": actor_hash}),
        },
        upsert(
            {MetaKey.ACTOR: actor.to_dict(embed=True), MetaKey.ACTOR_HASH: actor_hash}
        ),
    )
    # TODO(tsileo): Also update following (it's in the object)
    # DB.activities.update_many(
    #     {"meta.object_id": actor.id}, {"$set": {"meta.object": actor.to_dict(embed=True)}}
    # )
    _cache_actor_icon(actor)
    Tasks.cache_emojis(actor)
示例#16
0
def _announce_process_outbox(announce: ap.Announce,
                             new_meta: _NewMeta) -> None:
    _logger.info(f"process_outbox activity={announce!r}")

    obj = announce.get_object()
    if obj.has_type(ap.ActivityType.QUESTION):
        Tasks.fetch_remote_question(obj)

    update_one_activity(
        by_remote_id(announce.id),
        upsert({
            MetaKey.OBJECT: obj.to_dict(embed=True),
            MetaKey.OBJECT_ACTOR: obj.get_actor().to_dict(embed=True),
        }),
    )

    update_one_activity(
        {
            **by_object_id(obj.id),
            **by_type(ap.ActivityType.CREATE)
        },
        upsert({MetaKey.BOOSTED: announce.id}),
    )
示例#17
0
def task_cache_attachments() -> _Response:
    task = p.parse(flask.request)
    app.logger.info(f"task={task!r}")
    iri = task.payload
    try:
        activity = ap.fetch_remote_activity(iri)
        app.logger.info(f"caching attachment for activity={activity!r}")
        # Generates thumbnails for the actor's icon and the attachments if any

        if activity.has_type(
            [ap.ActivityType.CREATE, ap.ActivityType.ANNOUNCE]):
            obj = activity.get_object()
        else:
            obj = activity

        if obj.content:
            content_html = BeautifulSoup(obj.content, "html5lib")
            for img in content_html.find_all("img"):
                src = img.attrs.get("src")
                if src:
                    Tasks.cache_attachment({"url": src}, iri)

        if obj.has_type(ap.ActivityType.VIDEO):
            if isinstance(obj.url, list):
                # TODO: filter only videogt
                link = select_video_to_cache(obj.url)
                if link:
                    Tasks.cache_attachment({"url": link["href"]}, iri)
            elif isinstance(obj.url, str):
                Tasks.cache_attachment({"url": obj.url}, iri)
            else:
                app.logger.warning(
                    f"failed to parse video link {obj!r} for {iri}")

        # Iter the attachments
        for attachment in obj._data.get("attachment", []):
            Tasks.cache_attachment(attachment, iri)

        app.logger.info(f"attachments cached for {iri}")

    except (ActivityGoneError, ActivityNotFoundError, NotAnActivityError):
        app.logger.exception(f"dropping activity {iri}, no attachment caching")
    except Exception as err:
        app.logger.exception(f"failed to cache attachments for {iri}")
        raise TaskError() from err

    return ""
示例#18
0
def _announce_process_inbox(announce: ap.Announce, new_meta: _NewMeta) -> None:
    _logger.info(f"process_inbox activity={announce!r}")
    # TODO(tsileo): actually drop it without storing it and better logging, also move the check somewhere else
    # or remove it?
    try:
        obj = announce.get_object()
    except NotAnActivityError:
        _logger.exception(
            f'received an Annouce referencing an OStatus notice ({announce._data["object"]}), dropping the message'
        )
        return

    if obj.has_type(ap.ActivityType.QUESTION):
        Tasks.fetch_remote_question(obj)

    # Cache the announced object
    Tasks.cache_object(announce.id)

    # Process the reply of the announced object if any
    in_reply_to = obj.get_in_reply_to()
    if in_reply_to:
        reply = ap.fetch_remote_activity(in_reply_to)
        if reply.has_type(ap.ActivityType.CREATE):
            reply = reply.get_object()

        in_reply_to_data = {MetaKey.IN_REPLY_TO: in_reply_to}
        # Update the activity to save some data about the reply
        if reply.get_actor().id == obj.get_actor().id:
            in_reply_to_data.update({MetaKey.IN_REPLY_TO_SELF: True})
        else:
            in_reply_to_data.update({
                MetaKey.IN_REPLY_TO_ACTOR:
                reply.get_actor().to_dict(embed=True)
            })
        update_one_activity(by_remote_id(announce.id),
                            upsert(in_reply_to_data))
        # Spawn a task to process it (and determine if it needs to be saved)
        Tasks.process_reply(reply.id)

    update_one_activity(
        {
            **by_type(ap.ActivityType.CREATE),
            **by_object_id(obj.id)
        },
        inc(MetaKey.COUNT_BOOST, 1),
    )
示例#19
0
def _cache_actor_icon(actor: ap.BaseActivity) -> None:
    if actor.icon:
        if isinstance(actor.icon, dict) and "url" in actor.icon:
            Tasks.cache_actor_icon(actor.icon["url"], actor.id)
        else:
            logger.warning(f"failed to parse icon {actor.icon} for {actor!r}")
示例#20
0
def task_process_reply() -> _Response:
    """Process `Announce`d posts from Pleroma relays in order to process replies of activities that are in the inbox."""
    task = p.parse(flask.request)
    app.logger.info(f"task={task!r}")
    iri = task.payload
    try:
        activity = ap.fetch_remote_activity(iri)
        app.logger.info(f"checking for reply activity={activity!r}")

        # Some AP server always return Create when requesting an object
        if activity.has_type(ap.ActivityType.CREATE):
            activity = activity.get_object()

        in_reply_to = activity.get_in_reply_to()
        if not in_reply_to:
            # If it's not reply, we can drop it
            app.logger.info(
                f"activity={activity!r} is not a reply, dropping it")
            return ""

        root_reply = in_reply_to

        # Fetch the activity reply
        reply = ap.fetch_remote_activity(in_reply_to)
        if reply.has_type(ap.ActivityType.CREATE):
            reply = reply.get_object()

        new_replies = [activity, reply]

        while 1:
            in_reply_to = reply.get_in_reply_to()
            if not in_reply_to:
                break

            root_reply = in_reply_to
            reply = ap.fetch_remote_activity(root_reply)

            if reply.has_type(ap.ActivityType.CREATE):
                reply = reply.get_object()

            new_replies.append(reply)

        app.logger.info(f"root_reply={reply!r} for activity={activity!r}")

        # In case the activity was from the inbox
        update_one_activity(
            {
                **by_object_id(activity.id),
                **by_type(ap.ActivityType.CREATE)
            },
            upsert({MetaKey.THREAD_ROOT_PARENT: root_reply}),
        )

        for (new_reply_idx, new_reply) in enumerate(new_replies):
            if find_one_activity({
                    **by_object_id(new_reply.id),
                    **by_type(ap.ActivityType.CREATE)
            }) or DB.replies.find_one(by_remote_id(new_reply.id)):
                continue

            actor = new_reply.get_actor()
            is_root_reply = new_reply_idx == len(new_replies) - 1
            if is_root_reply:
                reply_flags: Dict[str, Any] = {}
            else:
                reply_actor = new_replies[new_reply_idx + 1].get_actor()
                is_in_reply_to_self = actor.id == reply_actor.id
                reply_flags = {
                    MetaKey.IN_REPLY_TO_SELF.value: is_in_reply_to_self,
                    MetaKey.IN_REPLY_TO.value: new_reply.get_in_reply_to(),
                }
                if not is_in_reply_to_self:
                    reply_flags[MetaKey.IN_REPLY_TO_ACTOR.
                                value] = reply_actor.to_dict(embed=True)

            # Save the reply with the cached actor and the thread flag/ID
            save_reply(
                new_reply,
                {
                    **reply_flags,
                    MetaKey.THREAD_ROOT_PARENT.value: root_reply,
                    MetaKey.ACTOR.value: actor.to_dict(embed=True),
                    MetaKey.ACTOR_HASH.value: _actor_hash(actor),
                },
            )

            # Update the reply counters
            if new_reply.get_in_reply_to():
                update_one_activity(
                    {
                        **by_object_id(new_reply.get_in_reply_to()),
                        **by_type(ap.ActivityType.CREATE),
                    },
                    inc(MetaKey.COUNT_REPLY, 1),
                )
                DB.replies.update_one(
                    by_remote_id(new_reply.get_in_reply_to()),
                    inc(MetaKey.COUNT_REPLY, 1),
                )

            # Cache the actor icon
            _cache_actor_icon(actor)
            # And cache the attachments
            Tasks.cache_attachments(new_reply.id)
    except (ActivityGoneError, ActivityNotFoundError):
        app.logger.exception(f"dropping activity {iri}, skip processing")
        return ""
    except Exception as err:
        app.logger.exception(f"failed to process new activity {iri}")
        raise TaskError() from err

    return ""
示例#21
0
def post_to_inbox(activity: ap.BaseActivity) -> None:
    # Check for Block activity
    actor = activity.get_actor()
    if outbox_is_blocked(actor.id):
        logger.info(
            f"actor {actor!r} is blocked, dropping the received activity {activity!r}"
        )
        return

    # If the message is coming from a Pleroma relay, we process it as a possible reply for a stream activity
    if (
        actor.has_type(ap.ActivityType.APPLICATION)
        and actor.id.endswith("/relay")
        and activity.has_type(ap.ActivityType.ANNOUNCE)
        and not find_one_activity(
            {
                **by_object_id(activity.get_object_id()),
                **by_type(ap.ActivityType.CREATE),
            }
        )
        and not DB.replies.find_one(by_remote_id(activity.get_object_id()))
    ):
        Tasks.process_reply(activity.get_object_id())
        return

    # Hubzilla sends Update with the same ID as the actor, and it poisons the cache
    if (
        activity.has_type(ap.ActivityType.UPDATE)
        and activity.id == activity.get_object_id()
    ):
        # Start a task to update the cached actor
        Tasks.cache_actor(activity.id)
        return

    # Honk forwards activities in a Read, process them as replies
    if activity.has_type(ap.ActivityType.READ):
        Tasks.process_reply(activity.get_object_id())
        return

    # TODO(tsileo): support ignore from Honk

    # Hubzilla forwards activities in a Create, process them as possible replies
    if activity.has_type(ap.ActivityType.CREATE) and server(activity.id) != server(
        activity.get_object_id()
    ):
        Tasks.process_reply(activity.get_object_id())
        return

    if DB.activities.find_one({"box": Box.INBOX.value, "remote_id": activity.id}):
        # The activity is already in the inbox
        logger.info(f"received duplicate activity {activity!r}, dropping it")
        return

    save(Box.INBOX, activity)
    logger.info(f"spawning tasks for {activity!r}")
    if not activity.has_type([ap.ActivityType.DELETE, ap.ActivityType.UPDATE]):
        Tasks.cache_actor(activity.id)
    Tasks.process_new_activity(activity.id)
    Tasks.finish_post_to_inbox(activity.id)
示例#22
0
def handle_replies(create: ap.Create) -> None:
    """Go up to the root reply, store unknown replies in the `threads` DB and set the "meta.thread_root_parent"
    key to make it easy to query a whole thread."""
    in_reply_to = create.get_object().get_in_reply_to()
    if not in_reply_to:
        return

    reply = ap.fetch_remote_activity(in_reply_to)
    if reply.has_type(ap.ActivityType.CREATE):
        reply = reply.get_object()
    # FIXME(tsileo): can be a 403 too, in this case what to do? not error at least

    # Ensure the this is a local reply, of a question, with a direct "to" addressing
    if (
        reply.id.startswith(BASE_URL)
        and reply.has_type(ap.ActivityType.QUESTION.value)
        and _is_local_reply(create)
        and not create.is_public()
    ):
        return handle_question_reply(create, reply)
    elif (
        create.id.startswith(BASE_URL)
        and reply.has_type(ap.ActivityType.QUESTION.value)
        and not create.is_public()
    ):
        # Keep track of our own votes
        DB.activities.update_one(
            {"activity.object.id": reply.id, "box": "inbox"},
            {
                "$set": {
                    f"meta.poll_answers_sent.{_answer_key(create.get_object().name)}": True
                }
            },
        )
        # Mark our reply as a poll answers, to "hide" it from the UI
        update_one_activity(
            by_remote_id(create.id),
            upsert({MetaKey.POLL_ANSWER: True, MetaKey.POLL_ANSWER_TO: reply.id}),
        )
        return None

    in_reply_to_data = {MetaKey.IN_REPLY_TO: in_reply_to}
    # Update the activity to save some data about the reply
    if reply.get_actor().id == create.get_actor().id:
        in_reply_to_data.update({MetaKey.IN_REPLY_TO_SELF: True})
    else:
        in_reply_to_data.update(
            {MetaKey.IN_REPLY_TO_ACTOR: reply.get_actor().to_dict(embed=True)}
        )
    update_one_activity(by_remote_id(create.id), upsert(in_reply_to_data))

    # It's a regular reply, try to increment the reply counter
    creply = DB.activities.find_one_and_update(
        {**by_object_id(in_reply_to), **by_type(ap.ActivityType.CREATE)},
        inc(MetaKey.COUNT_REPLY, 1),
    )
    if not creply:
        # Maybe it's the reply of a reply?
        DB.replies.find_one_and_update(
            by_remote_id(in_reply_to), inc(MetaKey.COUNT_REPLY, 1)
        )

    # Spawn a task to process it (and determine if it needs to be saved)
    Tasks.process_reply(create.get_object_id())
示例#23
0
import app  # noqa: F401  # here to init the backend
from core.activitypub import _actor_hash
from core.shared import MY_PERSON
from core.shared import p
from core.tasks import Tasks
from utils.local_actor_cache import is_actor_updated

h = _actor_hash(MY_PERSON, local=True)
if is_actor_updated(h):
    Tasks.send_actor_update()

p.push({}, "/task/cleanup", schedule="@every 1h")