コード例 #1
0
ファイル: rename_stream.py プロジェクト: brockwhittaker/zulip
    def handle(self, *args: Any, **options: str) -> None:
        realm = self.get_realm(options)
        assert realm is not None  # Should be ensured by parser
        old_name = options['old_name']
        new_name = options['new_name']
        encoding = sys.getfilesystemencoding()

        stream = get_stream(force_text(old_name, encoding), realm)
        do_rename_stream(stream, force_text(new_name, encoding))
コード例 #2
0
ファイル: rename_stream.py プロジェクト: yhl-python/zulip
    def handle(self, *args, **options):
        # type: (*Any, **str) -> None
        realm = self.get_realm(options)
        old_name = options['old_name']
        new_name = options['new_name']
        encoding = sys.getfilesystemencoding()

        stream = get_stream(force_text(old_name, encoding), realm)
        do_rename_stream(stream, force_text(new_name, encoding))
コード例 #3
0
ファイル: create_stream.py プロジェクト: zulip/zulip
    def handle(self, *args, **options):
        # type: (*Any, **str) -> None
        string_id = options['realm']
        encoding = sys.getfilesystemencoding()
        stream_name = options['stream_name']

        realm = get_realm_by_string_id(force_text(string_id, encoding))
        if realm is None:
            print("Unknown string_id %s" % (string_id,))
            exit(1)
        else:
            do_create_stream(realm, force_text(stream_name, encoding))
コード例 #4
0
ファイル: create_stream.py プロジェクト: 150vb/zulip
    def handle(self, *args, **options):
        # type: (*Any, **str) -> None
        domain = options['domain']
        encoding = sys.getfilesystemencoding()
        stream_name = options['stream_name']

        realm = get_realm(force_text(domain, encoding))
        if realm is None:
            print("Unknown domain %s" % (domain,))
            exit(1)
        else:
            do_create_stream(realm, force_text(stream_name, encoding))
コード例 #5
0
ファイル: rename_stream.py プロジェクト: acemaster/zulip
    def handle(self, *args, **options):
        # type: (*Any, **str) -> None
        string_id = options['string_id']
        old_name = options['old_name']
        new_name = options['new_name']
        encoding = sys.getfilesystemencoding()

        realm = get_realm(force_text(string_id, encoding))
        if realm is None:
            print("Unknown subdomain or string_id %s" % (string_id,))
            exit(1)

        do_rename_stream(realm, force_text(old_name, encoding),
                         force_text(new_name, encoding))
コード例 #6
0
ファイル: email_mirror.py プロジェクト: 150vb/zulip
def process_message(message, rcpt_to=None, pre_checked=False):
    # type: (message.Message, Optional[text_type], bool) -> None
    subject_header = message.get("Subject", "(no subject)")
    encoded_subject, encoding = decode_header(subject_header)[0] # type: ignore # https://github.com/python/typeshed/pull/333
    if encoding is None:
        subject = force_text(encoded_subject) # encoded_subject has type str when encoding is None
    else:
        try:
            subject = encoded_subject.decode(encoding)
        except (UnicodeDecodeError, LookupError):
            subject = u"(unreadable subject)"

    debug_info = {}

    try:
        if rcpt_to is not None:
            to = rcpt_to
        else:
            to = find_emailgateway_recipient(message)
        debug_info["to"] = to

        if is_missed_message_address(to):
            process_missed_message(to, message, pre_checked)
        else:
            process_stream_message(to, subject, message, debug_info)
    except ZulipEmailForwardError as e:
        # TODO: notify sender of error, retry if appropriate.
        log_and_report(message, str(e), debug_info)
コード例 #7
0
ファイル: email_mirror.py プロジェクト: brainwane/zulip
def process_message(message: message.Message, rcpt_to: Optional[str]=None, pre_checked: bool=False) -> None:
    subject_header = str(message.get("Subject", "")).strip()
    if subject_header == "":
        subject_header = "(no topic)"
    encoded_subject, encoding = decode_header(subject_header)[0]
    if encoding is None:
        subject = force_text(encoded_subject)  # encoded_subject has type str when encoding is None
    else:
        try:
            subject = encoded_subject.decode(encoding)
        except (UnicodeDecodeError, LookupError):
            subject = "(unreadable subject)"

    debug_info = {}

    try:
        if rcpt_to is not None:
            to = rcpt_to
        else:
            to = find_emailgateway_recipient(message)
        debug_info["to"] = to

        if is_missed_message_address(to):
            process_missed_message(to, message, pre_checked)
        else:
            process_stream_message(to, subject, message, debug_info)
    except ZulipEmailForwardError as e:
        # TODO: notify sender of error, retry if appropriate.
        log_and_report(message, str(e), debug_info)
コード例 #8
0
ファイル: create_stream.py プロジェクト: brockwhittaker/zulip
    def handle(self, *args: Any, **options: str) -> None:
        realm = self.get_realm(options)
        assert realm is not None  # Should be ensured by parser

        encoding = sys.getfilesystemencoding()
        stream_name = options['stream_name']
        create_stream_if_needed(realm, force_text(stream_name, encoding))
コード例 #9
0
ファイル: __init__.py プロジェクト: krtkmj/zulip
 def get_url_data(self, e):
     # type: (Element) -> Optional[Tuple[text_type, text_type]]
     if e.tag == "a":
         if e.text is not None:
             return (e.get("href"), force_text(e.text))
         return (e.get("href"), e.get("href"))
     return None
コード例 #10
0
ファイル: utils.py プロジェクト: ahmadassaf/Zulip
def make_safe_digest(string, hash_func=hashlib.sha1):
    # type: (text_type, Callable[[binary_type], Any]) -> text_type
    """
    return a hex digest of `string`.
    """
    # hashlib.sha1, md5, etc. expect bytes, so non-ASCII strings must
    # be encoded.
    return force_text(hash_func(string.encode('utf-8')).hexdigest())
コード例 #11
0
ファイル: __init__.py プロジェクト: Kingedgar/zulip
def list_of_tlds():
    # type: () -> List[text_type]
    # HACK we manually blacklist .py
    blacklist = [u'PY\n', ]

    # tlds-alpha-by-domain.txt comes from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
    tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt')
    tlds = [force_text(tld).lower().strip() for tld in open(tlds_file, 'r')
                if tld not in blacklist and not tld[0].startswith('#')]
    tlds.sort(key=len, reverse=True)
    return tlds
コード例 #12
0
ファイル: upload.py プロジェクト: Kingedgar/zulip
def get_file_info(request, user_file):
    # type: (HttpRequest, File) -> Tuple[text_type, text_type]

    uploaded_file_name = user_file.name
    content_type = request.GET.get('mimetype')
    if content_type is None:
        content_type = force_text(guess_type(uploaded_file_name)[0])
    else:
        uploaded_file_name = uploaded_file_name + guess_extension(content_type)

    uploaded_file_name = urllib.parse.unquote(uploaded_file_name)
    return uploaded_file_name, content_type
コード例 #13
0
ファイル: codehilite.py プロジェクト: 150vb/zulip
 def run(self, root):
     # type: (ElementTree) -> None
     """ Find code blocks and store in htmlStash. """
     blocks = root.getiterator('pre')
     for block in blocks:
         children = block.getchildren()
         tag = force_text(children[0].tag)
         if len(children) == 1 and tag == 'code':
             text = force_text(children[0].text)
             code = CodeHilite(text,
                         force_linenos=self.config['force_linenos'],
                         guess_lang=self.config['guess_lang'],
                         css_class=self.config['css_class'],
                         style=self.config['pygments_style'],
                         noclasses=self.config['noclasses'],
                         tab_length=self.markdown.tab_length)
             placeholder = self.markdown.htmlStash.store(code.hilite(),
                                                         safe=True)
             # Clear codeblock in etree instance
             block.clear()
             # Change to p element which will later
             # be removed when inserting raw html
             block.tag = 'p'
             block.text = placeholder
コード例 #14
0
    def handle(self, *args, **options):
        # type: (*Any, **str) -> None

        string_id = options['string_id']
        bot_email = options['bot_email']
        service_name = options['service_name']
        base_url = options['base_url']

        encoding = sys.getfilesystemencoding()
        realm = get_realm(force_text(string_id, encoding))
        if realm is None:
            print('Unknown subdomain or string_id %s' % (string_id,))
            exit(1)

        if not bot_email:
            print('Email of existing bot must be provided')
            exit(1)

        if not service_name:
            print('Name for Service object must be provided')
            exit(1)

        if not base_url:
            print('Base URL of outgoing webhook must be provided')
            exit(1)

        # TODO: Normalize email?
        bot_profile = UserProfile.objects.get(email=bot_email)
        if not bot_profile:
            print('User %s does not exist' % (bot_email,))
            exit(1)
        if not bot_profile.is_bot:
            print('User %s is not a bot' % (bot_email,))
            exit(1)
        if bot_profile.is_outgoing_webhook_bot:
            print('%s is already marked as an outgoing webhook bot' % (bot_email,))
            exit(1)

        Service.objects.create(name=service_name,
                               user_profile=bot_profile,
                               base_url=base_url,
                               token='',
                               interface=1)

        bot_profile.bot_type = UserProfile.OUTGOING_WEBHOOK_BOT
        bot_profile.save()

        print('Successfully converted %s into an outgoing webhook bot' % (bot_email,))
コード例 #15
0
ファイル: messages.py プロジェクト: ghostdevhv/zulip
def highlight_string_bytes_offsets(text, locs):
    # type: (AnyStr, Iterable[Tuple[int, int]]) -> text_type
    string = force_bytes(text)
    highlight_start = b'<span class="highlight">'
    highlight_stop = b'</span>'
    pos = 0
    result = b''
    for loc in locs:
        (offset, length) = loc
        result += string[pos:offset]
        result += highlight_start
        result += string[offset:offset + length]
        result += highlight_stop
        pos = offset + length
    result += string[pos:]
    return force_text(result)
コード例 #16
0
def list_of_tlds():
    # type: () -> List[text_type]
    # HACK we manually blacklist .py
    blacklist = [
        u'PY\n',
    ]

    # tlds-alpha-by-domain.txt comes from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
    tlds_file = os.path.join(os.path.dirname(__file__),
                             'tlds-alpha-by-domain.txt')
    tlds = [
        force_text(tld).lower().strip() for tld in open(tlds_file, 'r')
        if tld not in blacklist and not tld[0].startswith('#')
    ]
    tlds.sort(key=len, reverse=True)
    return tlds
コード例 #17
0
ファイル: messages.py プロジェクト: souravbadami/zulip
def highlight_string_bytes_offsets(text, locs):
    # type: (AnyStr, Iterable[Tuple[int, int]]) -> Text
    string = force_bytes(text)
    highlight_start = b'<span class="highlight">'
    highlight_stop = b'</span>'
    pos = 0
    result = b''
    for loc in locs:
        (offset, length) = loc
        result += string[pos:offset]
        result += highlight_start
        result += string[offset:offset + length]
        result += highlight_stop
        pos = offset + length
    result += string[pos:]
    return force_text(result)
コード例 #18
0
ファイル: messages.py プロジェクト: galexrt/zulip
def highlight_string_text_offsets(text, locs):
    # type: (AnyStr, Iterable[Tuple[int, int]]) -> text_type
    string = force_text(text)
    highlight_start = u'<span class="highlight">'
    highlight_stop = u"</span>"
    pos = 0
    result = u""
    for loc in locs:
        (offset, length) = loc
        result += string[pos:offset]
        result += highlight_start
        result += string[offset : offset + length]
        result += highlight_stop
        pos = offset + length
    result += string[pos:]
    return result
コード例 #19
0
ファイル: upload.py プロジェクト: shekhirin/zulip
def get_file_info(request, user_file):
    # type: (HttpRequest, File) -> Tuple[text_type, Optional[text_type]]

    uploaded_file_name = user_file.name
    assert isinstance(uploaded_file_name, str)

    content_type = request.GET.get('mimetype')
    if content_type is None:
        guessed_type = guess_type(uploaded_file_name)[0]
        if guessed_type is not None:
            content_type = force_text(guessed_type)
    else:
        uploaded_file_name = uploaded_file_name + guess_extension(content_type)

    uploaded_file_name = urllib.parse.unquote(uploaded_file_name)
    return uploaded_file_name, content_type
コード例 #20
0
ファイル: upload.py プロジェクト: zeeshanqamar/zulip
def get_file_info(request, user_file):
    # type: (HttpRequest, File) -> Tuple[text_type, Optional[text_type]]

    uploaded_file_name = user_file.name
    assert isinstance(uploaded_file_name, str)

    content_type = request.GET.get('mimetype')
    if content_type is None:
        guessed_type = guess_type(uploaded_file_name)[0]
        if guessed_type is not None:
            content_type = force_text(guessed_type)
    else:
        uploaded_file_name = uploaded_file_name + guess_extension(content_type)

    uploaded_file_name = urllib.parse.unquote(uploaded_file_name)
    return uploaded_file_name, content_type
コード例 #21
0
    def write_translation_strings(self, translation_strings: List[str]) -> None:
        for locale, output_path in zip(self.get_locales(), self.get_output_paths()):
            self.stdout.write("[frontend] processing locale {}".format(locale))
            try:
                with open(output_path, 'r') as reader:
                    old_strings = json.load(reader)
            except (IOError, ValueError):
                old_strings = {}

            new_strings = {
                force_text(k): v
                for k, v in self.get_new_strings(old_strings,
                                                 translation_strings,
                                                 locale).items()
            }
            with open(output_path, 'w') as writer:
                json.dump(new_strings, writer, indent=2, sort_keys=True)
コード例 #22
0
ファイル: makemessages.py プロジェクト: JamesLinus/zulip
    def write_translation_strings(self, translation_strings):
        # type: (Iterable[str]) -> None
        for locale, output_path in zip(self.get_locales(), self.get_output_paths()):
            self.stdout.write("[frontend] processing locale {}".format(locale))
            try:
                with open(output_path, 'r') as reader:
                    old_strings = json.load(reader)
            except (IOError, ValueError):
                old_strings = {}

            new_strings = {
                force_text(k): v
                for k, v in self.get_new_strings(old_strings,
                                                 translation_strings).items()
            }
            with open(output_path, 'w') as writer:
                json.dump(new_strings, writer, indent=2, sort_keys=True)
コード例 #23
0
ファイル: upload.py プロジェクト: zmuhammad261/zulip
def sanitize_name(raw_value):
    # type: (NonBinaryStr) -> Text
    """
    Sanitizes a value to be safe to store in a Linux filesystem, in
    S3, and in a URL.  So unicode is allowed, but not special
    characters other than ".", "-", and "_".

    This implementation is based on django.utils.text.slugify; it is
    modified by:
    * hardcoding allow_unicode=True.
    * adding '.' and '_' to the list of allowed characters.
    * preserving the case of the value.
    """
    value = force_text(raw_value)
    value = unicodedata.normalize('NFKC', value)
    value = re.sub('[^\w\s._-]', '', value, flags=re.U).strip()
    return mark_safe(re.sub('[-\s]+', '-', value, flags=re.U))
コード例 #24
0
ファイル: upload.py プロジェクト: shekhirin/zulip
def sanitize_name(raw_value):
    # type: (NonBinaryStr) -> text_type
    """
    Sanitizes a value to be safe to store in a Linux filesystem, in
    S3, and in a URL.  So unicode is allowed, but not special
    characters other than ".", "-", and "_".

    This implementation is based on django.utils.text.slugify; it is
    modified by:
    * hardcoding allow_unicode=True.
    * adding '.' and '_' to the list of allowed characters.
    * preserving the case of the value.
    """
    value = force_text(raw_value)
    value = unicodedata.normalize('NFKC', value)
    value = re.sub('[^\w\s._-]', '', value, flags=re.U).strip()
    return mark_safe(re.sub('[-\s]+', '-', value, flags=re.U))
コード例 #25
0
    def handle(self, *args, **options):
        # type: (*Any, **str) -> None
        rcpt_to = force_text(os.environ.get("ORIGINAL_RECIPIENT", options['recipient']))
        if rcpt_to is not None:
            if is_missed_message_address(rcpt_to):
                try:
                    mark_missed_message_address_as_used(rcpt_to)
                except ZulipEmailForwardError:
                    print("5.1.1 Bad destination mailbox address: Bad or expired missed message address.")
                    exit(posix.EX_NOUSER) # type: ignore # There are no stubs for posix in python 3
            else:
                try:
                    extract_and_validate(rcpt_to)
                except ZulipEmailForwardError:
                    print("5.1.1 Bad destination mailbox address: Please use the address specified "
                          "in your Streams page.")
                    exit(posix.EX_NOUSER) # type: ignore # There are no stubs for posix in python 3

            # Read in the message, at most 25MiB. This is the limit enforced by
            # Gmail, which we use here as a decent metric.
            msg_text = sys.stdin.read(25*1024*1024)

            if len(sys.stdin.read(1)) != 0:
                # We're not at EOF, reject large mail.
                print("5.3.4 Message too big for system: Max size is 25MiB")
                exit(posix.EX_DATAERR) # type: ignore # There are no stubs for posix in python 3

            queue_json_publish(
                "email_mirror",
                {
                    "message": msg_text,
                    "rcpt_to": rcpt_to
                },
                lambda x: None
            )
        else:
            # We're probably running from cron, try to batch-process mail
            if (not settings.EMAIL_GATEWAY_BOT or not settings.EMAIL_GATEWAY_LOGIN or
                not settings.EMAIL_GATEWAY_PASSWORD or not settings.EMAIL_GATEWAY_IMAP_SERVER or
                    not settings.EMAIL_GATEWAY_IMAP_PORT or not settings.EMAIL_GATEWAY_IMAP_FOLDER):
                print("Please configure the Email Mirror Gateway in /etc/zulip/, "
                      "or specify $ORIGINAL_RECIPIENT if piping a single mail.")
                exit(1)
            for message in get_imap_messages():
                process_message(message)
コード例 #26
0
ファイル: email_mirror.py プロジェクト: Jianchun1/zulip
    def handle(self, *args, **options):
        # type: (*Any, **str) -> None
        rcpt_to = force_text(os.environ.get("ORIGINAL_RECIPIENT", options['recipient']))
        if rcpt_to is not None:
            if is_missed_message_address(rcpt_to):
                try:
                    mark_missed_message_address_as_used(rcpt_to)
                except ZulipEmailForwardError:
                    print("5.1.1 Bad destination mailbox address: Bad or expired missed message address.")
                    exit(posix.EX_NOUSER) # type: ignore # There are no stubs for posix in python 3
            else:
                try:
                    extract_and_validate(rcpt_to)
                except ZulipEmailForwardError:
                    print("5.1.1 Bad destination mailbox address: Please use the address specified "
                          "in your Streams page.")
                    exit(posix.EX_NOUSER) # type: ignore # There are no stubs for posix in python 3

            # Read in the message, at most 25MiB. This is the limit enforced by
            # Gmail, which we use here as a decent metric.
            msg_text = sys.stdin.read(25*1024*1024)

            if len(sys.stdin.read(1)) != 0:
                # We're not at EOF, reject large mail.
                print("5.3.4 Message too big for system: Max size is 25MiB")
                exit(posix.EX_DATAERR) # type: ignore # There are no stubs for posix in python 3

            queue_json_publish(
                    "email_mirror",
                    {
                        "message": msg_text,
                        "rcpt_to": rcpt_to
                    },
                    lambda x: None
            )
        else:
            # We're probably running from cron, try to batch-process mail
            if (not settings.EMAIL_GATEWAY_BOT or not settings.EMAIL_GATEWAY_LOGIN or
                not settings.EMAIL_GATEWAY_PASSWORD or not settings.EMAIL_GATEWAY_IMAP_SERVER or
                    not settings.EMAIL_GATEWAY_IMAP_PORT or not settings.EMAIL_GATEWAY_IMAP_FOLDER):
                print("Please configure the Email Mirror Gateway in /etc/zulip/, "
                      "or specify $ORIGINAL_RECIPIENT if piping a single mail.")
                exit(1)
            for message in get_imap_messages():
                process_message(message)
コード例 #27
0
ファイル: upload.py プロジェクト: shekhirin/zulip
def get_signed_upload_url(path):
    # type: (text_type) -> text_type
    conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
    return force_text(conn.generate_url(15, 'GET', bucket=settings.S3_AUTH_UPLOADS_BUCKET, key=force_str(path)))
コード例 #28
0
 def fixture_data(self, type, action, file_type='json'):
     # type: (text_type, text_type, text_type) -> text_type
     return force_text(open(os.path.join(os.path.dirname(__file__),
                                         "../fixtures/%s/%s_%s.%s" % (type, type, action, file_type))).read())
コード例 #29
0
ファイル: test_classes.py プロジェクト: christi3k/zulip
 def fixture_data(self, type, action, file_type='json'):
     # type: (Text, Text, Text) -> Text
     return force_text(open(os.path.join(os.path.dirname(__file__),
                                         "../webhooks/%s/fixtures/%s.%s" % (type, action, file_type))).read())
コード例 #30
0
ファイル: test_helpers.py プロジェクト: cotsog/zulip
 def fixture_data(self, type, action, file_type='json'):
     # type: (text_type, text_type, text_type) -> text_type
     return force_text(open(os.path.join(os.path.dirname(__file__),
                                         "../fixtures/%s/%s_%s.%s" % (type, type, action, file_type))).read())
コード例 #31
0
    def twitter_text(self, text, urls, user_mentions, media):
        # type: (text_type, List[Dict[text_type, text_type]], List[Dict[text_type, Any]], List[Dict[text_type, Any]]) -> Element
        """
        Use data from the twitter API to turn links, mentions and media into A
        tags.

        This works by using the urls, user_mentions and media data from the
        twitter API.

        The first step is finding the locations of the URLs, mentions and media
        in the text. For each match we build a dictionary with the start
        location, end location, the URL to link to, and the text to show in the
        link.

        Next we sort the matches by start location. And for each we add the
        text from the end of the last link to the start of the current link to
        the output. The text needs to added to the text attribute of the first
        node (the P tag) or the tail the last link created.

        Finally we add any remaining text to the last node.
        """

        to_linkify = [] # type: List[Dict[text_type, Any]]
        # Build dicts for URLs
        for url_data in urls:
            short_url = url_data["url"]
            full_url = url_data["expanded_url"]
            for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
                to_linkify.append({
                    'start': match.start(),
                    'end': match.end(),
                    'url': short_url,
                    'text': full_url,
                })
        # Build dicts for mentions
        for user_mention in user_mentions:
            screen_name = user_mention['screen_name']
            mention_string = u'@' + screen_name
            for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE):
                to_linkify.append({
                    'start': match.start(),
                    'end': match.end(),
                    'url': u'https://twitter.com/' + force_text(urllib.parse.quote(force_str(screen_name))),
                    'text': mention_string,
                })
        # Build dicts for media
        for media_item in media:
            short_url = media_item['url']
            expanded_url = media_item['expanded_url']
            for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
                to_linkify.append({
                    'start': match.start(),
                    'end': match.end(),
                    'url': short_url,
                    'text': expanded_url,
                })

        to_linkify.sort(key=lambda x: x['start'])
        p = current_node = markdown.util.etree.Element('p')

        def set_text(text):
            # type: (text_type) -> None
            """
            Helper to set the text or the tail of the current_node
            """
            if current_node == p:
                current_node.text = text
            else:
                current_node.tail = text

        current_index = 0
        for link in to_linkify:
            # The text we want to link starts in already linked text skip it
            if link['start'] < current_index:
                continue
            # Add text from the end of last link to the start of the current
            # link
            set_text(text[current_index:link['start']])
            current_index = link['end']
            current_node = a = url_to_a(link['url'], link['text'])
            p.append(a)

        # Add any unused text
        set_text(text[current_index:])
        return p
コード例 #32
0
 def handle(self, *args, **options):
     # type: (*Any, **str) -> None
     realm = self.get_realm(options)
     encoding = sys.getfilesystemencoding()
     stream_name = options['stream_name']
     create_stream_if_needed(realm, force_text(stream_name, encoding))
コード例 #33
0
 def fixture_data(self, type, action, file_type='json'):
     # type: (Text, Text, Text) -> Text
     return force_text(open(os.path.join(os.path.dirname(__file__),
                                         "../webhooks/%s/fixtures/%s.%s" % (type, action, file_type))).read())
コード例 #34
0
ファイル: create_stream.py プロジェクト: yhl-python/zulip
 def handle(self, *args, **options):
     # type: (*Any, **str) -> None
     realm = self.get_realm(options)
     encoding = sys.getfilesystemencoding()
     stream_name = options['stream_name']
     create_stream_if_needed(realm, force_text(stream_name, encoding))
コード例 #35
0
def get_signed_upload_url(path):
    # type: (text_type) -> text_type
    conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
    return force_text(conn.generate_url(15, 'GET', bucket=settings.S3_AUTH_UPLOADS_BUCKET, key=force_str(path)))
コード例 #36
0
ファイル: __init__.py プロジェクト: 150vb/zulip
    def twitter_text(self, text, urls, user_mentions, media):
        # type: (text_type, List[Dict[text_type, text_type]], List[Dict[text_type, Any]], List[Dict[text_type, Any]]) -> Element
        """
        Use data from the twitter API to turn links, mentions and media into A
        tags.

        This works by using the urls, user_mentions and media data from the
        twitter API.

        The first step is finding the locations of the URLs, mentions and media
        in the text. For each match we build a dictionary with the start
        location, end location, the URL to link to, and the text to show in the
        link.

        Next we sort the matches by start location. And for each we add the
        text from the end of the last link to the start of the current link to
        the output. The text needs to added to the text attribute of the first
        node (the P tag) or the tail the last link created.

        Finally we add any remaining text to the last node.
        """

        to_linkify = [] # type: List[Dict[text_type, Any]]
        # Build dicts for URLs
        for url_data in urls:
            short_url = url_data["url"]
            full_url = url_data["expanded_url"]
            for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
                to_linkify.append({
                    'start': match.start(),
                    'end': match.end(),
                    'url': short_url,
                    'text': full_url,
                })
        # Build dicts for mentions
        for user_mention in user_mentions:
            screen_name = user_mention['screen_name']
            mention_string = u'@' + screen_name
            for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE):
                to_linkify.append({
                    'start': match.start(),
                    'end': match.end(),
                    'url': u'https://twitter.com/' + force_text(urllib.parse.quote(force_str(screen_name))),
                    'text': mention_string,
                })
        # Build dicts for media
        for media_item in media:
            short_url = media_item['url']
            expanded_url = media_item['expanded_url']
            for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
                to_linkify.append({
                    'start': match.start(),
                    'end': match.end(),
                    'url': short_url,
                    'text': expanded_url,
                })

        to_linkify.sort(key=lambda x: x['start'])
        p = current_node = markdown.util.etree.Element('p')

        def set_text(text):
            # type: (text_type) -> None
            """
            Helper to set the text or the tail of the current_node
            """
            if current_node == p:
                current_node.text = text
            else:
                current_node.tail = text

        current_index = 0
        for link in to_linkify:
            # The text we want to link starts in already linked text skip it
            if link['start'] < current_index:
                continue
            # Add text from the end of last link to the start of the current
            # link
            set_text(text[current_index:link['start']])
            current_index = link['end']
            current_node = a = url_to_a(link['url'], link['text'])
            p.append(a)

        # Add any unused text
        set_text(text[current_index:])
        return p