Exemplo n.º 1
0
def search_packages_info(query):
    """
    Gather details from installed distributions. Print distribution name,
    version, location, and installed files. Installed files requires a
    pip generated 'installed-files.txt' in the distributions '.egg-info'
    directory.
    """
    installed = {}
    for p in pkg_resources.working_set:
        installed[canonicalize_name(p.project_name)] = p

    query_names = [canonicalize_name(name) for name in query]

    for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
        package = {
            'name': dist.project_name,
            'version': dist.version,
            'location': dist.location,
            'requires': [dep.project_name for dep in dist.requires()],
        }
        file_list = None
        metadata = None
        if isinstance(dist, pkg_resources.DistInfoDistribution):
            # RECORDs should be part of .dist-info metadatas
            if dist.has_metadata('RECORD'):
                lines = dist.get_metadata_lines('RECORD')
                paths = [l.split(',')[0] for l in lines]
                paths = [os.path.join(dist.location, p) for p in paths]
                file_list = [os.path.relpath(p, dist.location) for p in paths]

            if dist.has_metadata('METADATA'):
                metadata = dist.get_metadata('METADATA')
        else:
            # Otherwise use pip's log for .egg-info's
            if dist.has_metadata('installed-files.txt'):
                paths = dist.get_metadata_lines('installed-files.txt')
                paths = [os.path.join(dist.egg_info, p) for p in paths]
                file_list = [os.path.relpath(p, dist.location) for p in paths]

            if dist.has_metadata('PKG-INFO'):
                metadata = dist.get_metadata('PKG-INFO')

        if dist.has_metadata('entry_points.txt'):
            entry_points = dist.get_metadata_lines('entry_points.txt')
            package['entry_points'] = entry_points

        if dist.has_metadata('INSTALLER'):
            for line in dist.get_metadata_lines('INSTALLER'):
                if line.strip():
                    package['installer'] = line.strip()
                    break

        # @todo: Should pkg_resources.Distribution have a
        # `get_pkg_info` method?
        feed_parser = FeedParser()
        feed_parser.feed(metadata)
        pkg_info_dict = feed_parser.close()
        for key in ('metadata-version', 'summary', 'home-page', 'author',
                    'author-email', 'license'):
            package[key] = pkg_info_dict.pop_from_serial(key)

        # It looks like FeedParser cannot deal with repeated headers
        classifiers = []
        for line in metadata.splitlines():
            if line.startswith('Classifier: '):
                classifiers.append(line[len('Classifier: '):])
        package['classifiers'] = classifiers

        if file_list:
            package['files'] = sorted(file_list)
        yield package
Exemplo n.º 2
0
def search_packages_info(query):
    # type: (List[str]) -> Iterator[Dict[str, str]]
    """
    Gather details from installed distributions. Print distribution name,
    version, location, and installed files. Installed files requires a
    pip generated 'installed-files.txt' in the distributions '.egg-info'
    directory.
    """
    installed = {}
    for p in pkg_resources.working_set:
        installed[canonicalize_name(p.project_name)] = p

    query_names = [canonicalize_name(name) for name in query]
    missing = sorted([
        name for name, pkg in zip(query, query_names) if pkg not in installed
    ])
    if missing:
        logger.warning("Package(s) not found: %s", ", ".join(missing))

    def get_requiring_packages(package_name):
        # type: (str) -> List[str]
        canonical_name = canonicalize_name(package_name)
        return [
            pkg.project_name for pkg in pkg_resources.working_set
            if canonical_name in
            [canonicalize_name(required.name) for required in pkg.requires()]
        ]

    for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
        package = {
            "name": dist.project_name,
            "version": dist.version,
            "location": dist.location,
            "requires": [dep.project_name for dep in dist.requires()],
            "required_by": get_requiring_packages(dist.project_name),
        }
        file_list = None
        metadata = ""
        if isinstance(dist, pkg_resources.DistInfoDistribution):
            # RECORDs should be part of .dist-info metadatas
            if dist.has_metadata("RECORD"):
                lines = dist.get_metadata_lines("RECORD")
                paths = [line.split(",")[0] for line in lines]
                paths = [os.path.join(dist.location, p) for p in paths]
                file_list = [os.path.relpath(p, dist.location) for p in paths]

            if dist.has_metadata("METADATA"):
                metadata = dist.get_metadata("METADATA")
        else:
            # Otherwise use pip's log for .egg-info's
            if dist.has_metadata("installed-files.txt"):
                paths = dist.get_metadata_lines("installed-files.txt")
                paths = [os.path.join(dist.egg_info, p) for p in paths]
                file_list = [os.path.relpath(p, dist.location) for p in paths]

            if dist.has_metadata("PKG-INFO"):
                metadata = dist.get_metadata("PKG-INFO")

        if dist.has_metadata("entry_points.txt"):
            entry_points = dist.get_metadata_lines("entry_points.txt")
            package["entry_points"] = entry_points

        if dist.has_metadata("INSTALLER"):
            for line in dist.get_metadata_lines("INSTALLER"):
                if line.strip():
                    package["installer"] = line.strip()
                    break

        # @todo: Should pkg_resources.Distribution have a
        # `get_pkg_info` method?
        feed_parser = FeedParser()
        feed_parser.feed(metadata)
        pkg_info_dict = feed_parser.close()
        for key in (
                "metadata-version",
                "summary",
                "home-page",
                "author",
                "author-email",
                "license",
        ):
            package[key] = pkg_info_dict.get(key)

        # It looks like FeedParser cannot deal with repeated headers
        classifiers = []
        for line in metadata.splitlines():
            if line.startswith("Classifier: "):
                classifiers.append(line[len("Classifier: "):])
        package["classifiers"] = classifiers

        if file_list:
            package["files"] = sorted(file_list)
        yield package
Exemplo n.º 3
0
    def execute(self, http=None):
        """Execute all the requests as a single batched HTTP request.

    Args:
      http: httplib2.Http, an http object to be used in place of the one the
        HttpRequest request object was constructed with.  If one isn't supplied
        then use a http object from the requests in this batch.

    Returns:
      None

    Raises:
      apiclient.errors.HttpError if the response was not a 2xx.
      httplib2.Error if a transport error has occured.
      apiclient.errors.BatchError if the response is the wrong format.
    """
        if http is None:
            for request_id in self._order:
                request, callback = self._requests[request_id]
                if request is not None:
                    http = request.http
                    break
        if http is None:
            raise ValueError("Missing a valid http object.")

        msgRoot = MIMEMultipart('mixed')
        # msgRoot should not write out it's own headers
        setattr(msgRoot, '_write_headers', lambda self: None)

        # Add all the individual requests.
        for request_id in self._order:
            request, callback = self._requests[request_id]

            msg = MIMENonMultipart('application', 'http')
            msg['Content-Transfer-Encoding'] = 'binary'
            msg['Content-ID'] = self._id_to_header(request_id)

            body = self._serialize_request(request)
            msg.set_payload(body)
            msgRoot.attach(msg)

        body = msgRoot.as_string()

        headers = {}
        headers['content-type'] = ('multipart/mixed; '
                                   'boundary="%s"') % msgRoot.get_boundary()

        resp, content = http.request(self._batch_uri,
                                     'POST',
                                     body=body,
                                     headers=headers)

        if resp.status >= 300:
            raise HttpError(resp, content, self._batch_uri)

        # Now break up the response and process each one with the correct postproc
        # and trigger the right callbacks.
        boundary, _ = content.split(None, 1)

        # Prepend with a content-type header so FeedParser can handle it.
        header = 'content-type: %s\r\n\r\n' % resp['content-type']
        for_parser = header + content

        parser = FeedParser()
        parser.feed(for_parser)
        respRoot = parser.close()

        if not respRoot.is_multipart():
            raise BatchError("Response not in multipart/mixed format.", resp,
                             content)

        parts = respRoot.get_payload()
        for part in parts:
            request_id = self._header_to_id(part['Content-ID'])

            headers, content = self._deserialize_response(part.get_payload())

            # TODO(jcgregorio) Remove this temporary hack once the server stops
            # gzipping individual response bodies.
            if content[0] != '{':
                gzipped_content = content
                content = gzip.GzipFile(
                    fileobj=StringIO.StringIO(gzipped_content)).read()

            request, cb = self._requests[request_id]
            postproc = request.postproc
            response = postproc(resp, content)
            if cb is not None:
                cb(request_id, response)
            if self._callback is not None:
                self._callback(request_id, response)
Exemplo n.º 4
0
    def read_multi(self, environ, keep_blank_values, strict_parsing):
        """Internal: read a part that is itself multipart."""
        ib = self.innerboundary
        if not valid_boundary(ib):
            raise ValueError('Invalid boundary in multipart form: %r' % (ib, ))
        self.list = []
        if self.qs_on_post:
            query = urllib.parse.parse_qsl(self.qs_on_post,
                                           self.keep_blank_values,
                                           self.strict_parsing,
                                           encoding=self.encoding,
                                           errors=self.errors,
                                           max_num_fields=self.max_num_fields)
            self.list.extend(
                MiniFieldStorage(key, value) for key, value in query)

        klass = self.FieldStorageClass or self.__class__
        first_line = self.fp.readline()  # bytes
        if not isinstance(first_line, bytes):
            raise ValueError("%s should return bytes, got %s" \
                             % (self.fp, type(first_line).__name__))
        self.bytes_read += len(first_line)

        # Ensure that we consume the file until we've hit our inner boundary
        while (first_line.strip() != (b"--" + self.innerboundary)
               and first_line):
            first_line = self.fp.readline()
            self.bytes_read += len(first_line)

        # Propagate max_num_fields into the sub class appropriately
        max_num_fields = self.max_num_fields
        if max_num_fields is not None:
            max_num_fields -= len(self.list)

        while True:
            parser = FeedParser()
            hdr_text = b""
            while True:
                data = self.fp.readline()
                hdr_text += data
                if not data.strip():
                    break
            if not hdr_text:
                break
            # parser takes strings, not bytes
            self.bytes_read += len(hdr_text)
            parser.feed(hdr_text.decode(self.encoding, self.errors))
            headers = parser.close()

            # Some clients add Content-Length for part headers, ignore them
            if 'content-length' in headers:
                del headers['content-length']

            part = klass(self.fp, headers, ib, environ, keep_blank_values,
                         strict_parsing, self.limit - self.bytes_read,
                         self.encoding, self.errors, max_num_fields)

            if max_num_fields is not None:
                max_num_fields -= 1
                if part.list:
                    max_num_fields -= len(part.list)
                if max_num_fields < 0:
                    raise ValueError('Max number of fields exceeded')

            self.bytes_read += part.bytes_read
            self.list.append(part)
            if part.done or self.bytes_read >= self.length > 0:
                break
        self.skip_lines()
Exemplo n.º 5
0
def search_packages_info(query, index_url=None, session=None):
    """
    Gather details from installed distributions. Print distribution name,
    version, location, and installed files. Installed files requires a
    pip generated 'installed-files.txt' in the distributions '.egg-info'
    directory.
    """
    installed = dict([(p.project_name.lower(), p)
                      for p in pkg_resources.working_set])
    query_names = [name.lower() for name in query]
    for dist in [installed[pkg] for pkg in query_names if pkg in installed]:

        required_by = []
        for _, p in installed.iteritems():
            if dist.project_name.lower() in [
                    dep.project_name.lower() for dep in p.requires()
            ]:
                required_by.append(p.project_name)
            else:
                for e in p.extras:
                    if dist.project_name.lower() in [
                            dep.project_name.lower() for dep in p.requires([e])
                    ]:
                        required_by.append("%s[%s]" % (p.project_name, e))
        extras = {}
        requires = [dep.project_name for dep in dist.requires()]
        make_ext = lambda pkg_name: (pkg_name, True
                                     if pkg_name in installed else False)
        for e in dist.extras:
            extras[e] = [
                make_ext(dep.project_name.lower())
                for dep in dist.requires([e])
                if dep.project_name not in requires
            ]

        if session:
            transport = PipXmlrpcTransport(index_url, session)
            pypi = xmlrpc_client.ServerProxy(index_url, transport)
            pypi_releases = pypi.package_releases(dist.project_name)
            pypi_version = pypi_releases[0] if pypi_releases else 'UNKNOWN'
        else:
            pypi_version = 'UNKNOWN'

        package = {
            'name': dist.project_name,
            'version': dist.version,
            'pypi_version': pypi_version,
            'location': dist.location,
            'requires': requires,
            'required_by': required_by,
            'extras': extras
        }
        file_list = None
        metadata = None
        if isinstance(dist, pkg_resources.DistInfoDistribution):
            # RECORDs should be part of .dist-info metadatas
            if dist.has_metadata('RECORD'):
                lines = dist.get_metadata_lines('RECORD')
                paths = [l.split(',')[0] for l in lines]
                paths = [os.path.join(dist.location, p) for p in paths]
                file_list = [os.path.relpath(p, dist.location) for p in paths]

            if dist.has_metadata('METADATA'):
                metadata = dist.get_metadata('METADATA')
        else:
            # Otherwise use pip's log for .egg-info's
            if dist.has_metadata('installed-files.txt'):
                paths = dist.get_metadata_lines('installed-files.txt')
                paths = [os.path.join(dist.egg_info, p) for p in paths]
                file_list = [os.path.relpath(p, dist.location) for p in paths]
            if dist.has_metadata('PKG-INFO'):
                metadata = dist.get_metadata('PKG-INFO')

        if dist.has_metadata('entry_points.txt'):
            entry_points = dist.get_metadata_lines('entry_points.txt')
            package['entry_points'] = entry_points

        # @todo: Should pkg_resources.Distribution have a
        # `get_pkg_info` method?
        feed_parser = FeedParser()
        feed_parser.feed(metadata)
        pkg_info_dict = feed_parser.close()
        for key in ('metadata-version', 'summary', 'home-page', 'author',
                    'author-email', 'license'):
            package[key] = pkg_info_dict.get(key)

        # use and short-circuit to check for None
        package['files'] = file_list and sorted(file_list)
        yield package
Exemplo n.º 6
0
            if dist.has_metadata('PKG-INFO'):
                metadata = dist.get_metadata('PKG-INFO')

        if dist.has_metadata('entry_points.txt'):
            entry_points = dist.get_metadata_lines('entry_points.txt')
            package['entry_points'] = entry_points

        if dist.has_metadata('INSTALLER'):
            for line in dist.get_metadata_lines('INSTALLER'):
                if line.strip():
                    package['installer'] = line.strip()
                    break

        # @todo: Should pkg_resources.Distribution have a
        # `get_pkg_info` method?
        feed_parser = FeedParser()
        feed_parser.feed(metadata)
        pkg_info_dict = feed_parser.close()
        for key in ('metadata-version', 'summary',
                    'home-page', 'author', 'author-email', 'license'):
            package[key] = pkg_info_dict.get(key)

        # It looks like FeedParser cannot deal with repeated headers
        classifiers = []
        for line in metadata.splitlines():
            if line.startswith('Classifier: '):
                classifiers.append(line[len('Classifier: '):])
        package['classifiers'] = classifiers

        if :
            package['files'] = sorted()
Exemplo n.º 7
0
    def _get_metadata_from_entrypoint(cls, entrypoint, extension_id):
        """Return metadata information from an entrypoint.

        This is used internally to parse and validate package information from
        an entrypoint for use in ExtensionInfo.

        Args:
            entrypoint (pkg_resources.EntryPoint):
                The EntryPoint pointing to the extension class.

            extension_id (unicode):
                The extension's ID.

        Returns:
            dict:
            The resulting metadata dictionary.
        """
        dist = entrypoint.dist

        try:
            # Wheel, or other modern package.
            lines = list(dist.get_metadata_lines('METADATA'))
        except IOError:
            try:
                # Egg, or other legacy package.
                lines = list(dist.get_metadata_lines('PKG-INFO'))
            except IOError:
                lines = []
                logger.error(
                    'No METADATA or PKG-INFO found for the package '
                    'containing the %s extension. Information on '
                    'the extension may be missing.', extension_id)

        # pkg_resources on Python 3 will always give us back Unicode strings,
        # but Python 2 may give us back either Unicode or byte strings.
        if lines and isinstance(lines[0], bytes):
            data = b'\n'.join(lines)

            # Try to decode the PKG-INFO content. If no decoding method is
            # successful then the PKG-INFO content will remain unchanged and
            # processing will continue with the parsing.
            for enc in cls.encodings:
                try:
                    data = data.decode(enc)
                    break
                except UnicodeDecodeError:
                    continue
            else:
                logger.warning(
                    'Failed decoding PKG-INFO content for '
                    'extension %s', entrypoint.name)
        else:
            data = '\n'.join(lines)

        p = FeedParser()
        p.feed(data)
        pkg_info = p.close()

        # Convert from a Message to a dictionary. Note that items() is correct
        # here. six.iteritems() will not work.
        return dict(pkg_info.items())
Exemplo n.º 8
0
    def parse_email(self, message):
        """Parses each email message"""

        fp = FeedParser()
        fp.feed(message)
        return fp.close()
Exemplo n.º 9
0
for k in range(nombreMailsInbox):

    latest_email_id = listeMailsInbox[k]
    id_message = latest_email_id.decode('utf-8')

    #Vérification que ce mail n'a pas déjà été enregistré
    if (id_message not in liste_id):
        result, data = imap_conn.fetch(latest_email_id, "(RFC822)")
        # fetch the email body (RFC822) for the given ID

        raw_email = data[0][1].decode('utf-8')
        # .decode('utf-8') for python 3.x compatibility (bytes -> str)

        #http://stackoverflow.com/questions/4040074/python-email-encoding-problem
        f = FeedParser()
        f.feed(raw_email)
        rootMessage = f.close()

        if (rootMessage.is_multipart()):
            corps = rootMessage.get_payload(0).get_payload(
                decode=True).decode('utf-8')
            # Récupérer le corps du mail en plain/text bien décodé
        else:
            corps = rootMessage.get_payload(decode=True).decode('utf-8')

        #méthode Alex
        # suppression des entêtes de merde avec une regexp
        subject = rootMessage.get('Subject')
        for i in range(len(subject)):
            if subject[i] == "=":
Exemplo n.º 10
0
# Thanks:
#  https://pthree.org/2011/03/24/hashcash-and-mutt/

from email.generator import Generator
from email.parser import FeedParser
from email.utils import getaddresses
import fileinput
import subprocess
import sys
import os

filename = sys.argv[1]
subprocess.call("%s %s" % (os.environ.get("EDITOR", "vim"), filename),
                shell=True)

parser = FeedParser()
for line in fileinput.FileInput(filename, inplace=1):
    parser.feed(line)
msg = parser.close()

# Harvest all email addresses from the header
# Bcc ignored for privacy reasons / can't do multiple mails from editor script
addrs = lambda h: [m[1].lower() for m in getaddresses(msg.get_all(h, []))]
email_addrs = set(addrs("To")).union(set(addrs("Cc")))

# Check if an appropriate token is already generated for the mail
for hash in msg.get_all("X-Hashcash", []):
    email_addrs.discard(hash.split(":")[3])

# Call the hashcash function from the operating system to mint tokens
for email in email_addrs:
Exemplo n.º 11
0
    def send_message(self,
                     issueid,
                     msgid,
                     note,
                     sendto,
                     from_address=None,
                     bcc_sendto=[],
                     subject=None,
                     crypt=False):
        '''Actually send the nominated message from this issue to the sendto
           recipients, with the note appended.
        '''
        users = self.db.user
        messages = self.db.msg
        files = self.db.file

        if msgid is None:
            inreplyto = None
            messageid = None
        else:
            inreplyto = messages.get(msgid, 'inreplyto')
            messageid = messages.get(msgid, 'messageid')

        # make up a messageid if there isn't one (web edit)
        if not messageid:
            # this is an old message that didn't get a messageid, so
            # create one
            messageid = "<%s.%s.%s%s@%s>" % (
                time.time(), b2s(base64.b32encode(random_.token_bytes(10))),
                self.classname, issueid, self.db.config['MAIL_DOMAIN'])
            if msgid is not None:
                messages.set(msgid, messageid=messageid)

        # compose title
        cn = self.classname
        title = self.get(issueid, 'title') or '%s message copy' % cn

        # figure author information
        if msgid:
            authid = messages.get(msgid, 'author')
        else:
            authid = self.db.getuid()
        authname = users.get(authid, 'realname')
        if not authname:
            authname = users.get(authid, 'username', '')
        authaddr = users.get(authid, 'address', '')

        if authaddr and self.db.config.MAIL_ADD_AUTHOREMAIL:
            authaddr = " <%s>" % formataddr(('', authaddr))
        elif authaddr:
            authaddr = ""

        # make the message body
        m = ['']

        # put in roundup's signature
        if self.db.config.EMAIL_SIGNATURE_POSITION == 'top':
            m.append(self.email_signature(issueid, msgid))

        # add author information
        if authid and self.db.config.MAIL_ADD_AUTHORINFO:
            if msgid and len(self.get(issueid, 'messages')) == 1:
                m.append(
                    _("New submission from %(authname)s%(authaddr)s:") %
                    locals())
            elif msgid:
                m.append(
                    _("%(authname)s%(authaddr)s added the comment:") %
                    locals())
            else:
                m.append(_("Change by %(authname)s%(authaddr)s:") % locals())
            m.append('')

        # add the content
        if msgid is not None:
            m.append(messages.get(msgid, 'content', ''))

        # get the files for this message
        message_files = []
        if msgid:
            for fileid in messages.get(msgid, 'files'):
                # check the attachment size
                filesize = self.db.filesize('file', fileid, None)
                if filesize <= self.db.config.NOSY_MAX_ATTACHMENT_SIZE:
                    message_files.append(fileid)
                else:
                    base = self.db.config.TRACKER_WEB
                    link = "".join((base, files.classname, fileid))
                    filename = files.get(fileid, 'name')
                    m.append(
                        _("File '%(filename)s' not attached - "
                          "you can download it from %(link)s.") % locals())

        # add the change note
        if note:
            m.append(note)

        # put in roundup's signature
        if self.db.config.EMAIL_SIGNATURE_POSITION == 'bottom':
            m.append(self.email_signature(issueid, msgid))

        # figure the encoding
        charset = getattr(self.db.config, 'EMAIL_CHARSET', 'utf-8')

        # construct the content and convert to unicode object
        body = s2u('\n'.join(m))

        # make sure the To line is always the same (for testing mostly)
        sendto.sort()

        # make sure we have a from address
        if from_address is None:
            from_address = self.db.config.TRACKER_EMAIL

        # additional bit for after the From: "name"
        from_tag = getattr(self.db.config, 'EMAIL_FROM_TAG', '')
        if from_tag:
            from_tag = ' ' + from_tag

        if subject is None:
            subject = '[%s%s] %s' % (cn, issueid, title)

        author = (authname + from_tag, from_address)

        # send an individual message per recipient?
        if self.db.config.NOSY_EMAIL_SENDING != 'single':
            sendto = [[address] for address in sendto]
        else:
            sendto = [sendto]

        # tracker sender info
        tracker_name = s2u(self.db.config.TRACKER_NAME)
        tracker_name = nice_sender_header(tracker_name, from_address, charset)

        # now send one or more messages
        # TODO: I believe we have to create a new message each time as we
        # can't fiddle the recipients in the message ... worth testing
        # and/or fixing some day
        first = True
        for sendto in sendto:
            # create the message
            mailer = Mailer(self.db.config)

            message = mailer.get_standard_message(multipart=message_files)

            # set reply-to as requested by config option TRACKER_REPLYTO_ADDRESS
            replyto_config = self.db.config.TRACKER_REPLYTO_ADDRESS
            if replyto_config:
                if replyto_config == "AUTHOR":
                    # note that authaddr at this point is already surrounded by < >, so
                    # get the original address from the db as nice_send_header adds < >
                    replyto_addr = nice_sender_header(
                        authname, users.get(authid, 'address', ''), charset)
                else:
                    replyto_addr = replyto_config
            else:
                replyto_addr = tracker_name
            message['Reply-To'] = replyto_addr

            # message ids
            if messageid:
                message['Message-Id'] = messageid
            if inreplyto:
                message['In-Reply-To'] = inreplyto

            # Generate a header for each link or multilink to
            # a class that has a name attribute
            for propname, prop in self.getprops().items():
                if not isinstance(prop, (hyperdb.Link, hyperdb.Multilink)):
                    continue
                cl = self.db.getclass(prop.classname)
                label = None
                if 'name' in cl.getprops():
                    label = 'name'
                if prop.msg_header_property in cl.getprops():
                    label = prop.msg_header_property
                if prop.msg_header_property == "":
                    # if msg_header_property is set to empty string
                    # suppress the header entirely. You can't use
                    # 'msg_header_property == None'. None is the
                    # default value.
                    label = None
                if not label:
                    continue
                if isinstance(prop, hyperdb.Link):
                    value = self.get(issueid, propname)
                    if value is None:
                        continue
                    values = [value]
                else:
                    values = self.get(issueid, propname)
                    if not values:
                        continue
                values = [cl.get(v, label) for v in values]
                values = ', '.join(values)
                header = "X-Roundup-%s-%s" % (self.classname, propname)
                try:
                    values.encode('ascii')
                    message[header] = values
                except UnicodeError:
                    message[header] = Header(values, charset)

            if not inreplyto:
                # Default the reply to the first message
                msgs = self.get(issueid, 'messages')
                # Assume messages are sorted by increasing message number here
                # If the issue is just being created, and the submitter didn't
                # provide a message, then msgs will be empty.
                if msgs and msgs[0] != msgid:
                    inreplyto = messages.get(msgs[0], 'messageid')
                    if inreplyto:
                        message['In-Reply-To'] = inreplyto

            # attach files
            if message_files:
                # first up the text as a part
                part = mailer.get_standard_message()
                part.set_payload(body, part.get_charset())
                message.attach(part)

                for fileid in message_files:
                    name = files.get(fileid, 'name')
                    mime_type = (files.get(fileid, 'type')
                                 or mimetypes.guess_type(name)[0]
                                 or 'application/octet-stream')
                    if mime_type == 'text/plain':
                        content = files.get(fileid, 'content')
                        part = MIMEText('')
                        del part['Content-Transfer-Encoding']
                        try:
                            enc = content.encode('ascii')
                            part = mailer.get_text_message('us-ascii')
                            part.set_payload(enc)
                        except UnicodeError:
                            # the content cannot be 7bit-encoded.
                            # use quoted printable
                            # XXX stuffed if we know the charset though :(
                            part = mailer.get_text_message('utf-8')
                            part.set_payload(content, part.get_charset())
                    elif mime_type == 'message/rfc822':
                        content = files.get(fileid, 'content')
                        main, sub = mime_type.split('/')
                        p = FeedParser()
                        p.feed(content)
                        part = MIMEBase(main, sub)
                        part.set_payload([p.close()])
                    else:
                        # some other type, so encode it
                        content = files.get(fileid, 'binary_content')
                        main, sub = mime_type.split('/')
                        part = MIMEBase(main, sub)
                        part.set_payload(content)
                        encoders.encode_base64(part)
                    cd = 'Content-Disposition'
                    part[cd] = 'attachment;\n filename="%s"' % name
                    message.attach(part)

            else:
                message.set_payload(body, message.get_charset())

            if crypt:
                send_msg = self.encrypt_to(message, sendto)
            else:
                send_msg = message
            mailer.set_message_attributes(send_msg, sendto, subject, author)
            if crypt:
                send_msg['Message-Id'] = message['Message-Id']
                send_msg['Reply-To'] = message['Reply-To']
                if message.get('In-Reply-To'):
                    send_msg['In-Reply-To'] = message['In-Reply-To']

            if sendto:
                mailer.smtp_send(sendto, send_msg.as_string())
            if first:
                if crypt:
                    # send individual bcc mails, otherwise receivers can
                    # deduce bcc recipients from keys in message
                    for bcc in bcc_sendto:
                        send_msg = self.encrypt_to(message, [bcc])
                        send_msg['Message-Id'] = message['Message-Id']
                        send_msg['Reply-To'] = message['Reply-To']
                        if message.get('In-Reply-To'):
                            send_msg['In-Reply-To'] = message['In-Reply-To']
                        mailer.smtp_send([bcc], send_msg.as_string())
                elif bcc_sendto:
                    mailer.smtp_send(bcc_sendto, send_msg.as_string())
            first = False