def process_stream(self, stream, factories): combined_inode = "I%s|S%s/%s" % (stream.fd.name, stream.inode_id, stream.reverse) pyflaglog.log(pyflaglog.DEBUG,"Openning %s for SMTP" % combined_inode) ## We open the file and scan it for emails: fd = self.fsfd.open(inode=combined_inode) dbh=DB.DBO(self.case) p=SMTP(fd,dbh,self.fsfd) ## Iterate over all the messages in this connection for f in p.parse(): if not f: continue ## message number and its offset: count, offset, length = f ## Create the VFS node: path, combined_inode, inode_id =self.fsfd.lookup(inode=combined_inode) path=posixpath.normpath(path+"/../../../../../") new_inode="%s|o%s:%s" % (combined_inode,offset,length) ds_timestamp = Time.convert(stream.ts_sec, case=self.case, evidence_tz="UTC") date_str = ds_timestamp.split(" ")[0] self.fsfd.VFSCreate(None, new_inode, "%s/SMTP/%s/Message_%s" % (path, date_str, count), mtime = stream.ts_sec, size=length ) ## Scan the new file using the scanner train. If ## the user chose the RFC2822 scanner, we will be ## able to understand this: self.scan_as_file(new_inode, factories)
def process_readmessage(self): result = {'type': 'Read', 'message': ''} ## We could get several messages in the same response: root = self.parser.root for message in root.search('message'): result['message_id'] = message.find("mid").innerHTML() try: result['sent'] = Time.parse( message.find("receiveddate").innerHTML()) except: pass result['subject'] = message.find("subject").innerHTML() for tag, field in [('from', 'From'), ('to', 'To')]: result[field] = self.parse_email_address(message, tag) ## now iterate over all the parts: for part in message.search("part"): ## Usually text/html are the main body try: if not result['message'] and part.attributes[ 'type'] == 'text': text = part.find("text") result['message'] = HTML.unquote( HTML.decode_entity(text.innerHTML())) except KeyError: pass self.insert_message(result, "webmail")
def process_readmessage(self): result = {'type': 'Read', 'message':'' } ## We could get several messages in the same response: root = self.parser.root for message in root.search('message'): result['message_id'] = message.find("mid").innerHTML() try: result['sent'] = Time.parse(message.find("receiveddate").innerHTML()) except: pass result['subject'] = message.find("subject").innerHTML() for tag,field in [('from','From'), ('to','To')]: result[field] = self.parse_email_address(message, tag) ## now iterate over all the parts: for part in message.search("part"): ## Usually text/html are the main body try: if not result['message'] and part.attributes['type'] == 'text': text = part.find("text") result['message'] = HTML.unquote(HTML.decode_entity(text.innerHTML())) except KeyError: pass self.insert_message(result, "webmail")
def process_message_yahoo2(self, result, header): try: result['subject'] = header.find(".", {"id":"message_view_subject"}).innerHTML() except AttributeError: pass try: date = header.find("div", {"id":"message_view_date"}).innerHTML() #result['sent'] = ColumnTypes.guess_date(date).__str__() result['sent'] = Time.parse(date, case=self.case, evidence_tz=None) except AttributeError: pass context = None for div in header.search("div"): try: cls = div.attributes['class'] except KeyError: continue if cls == "details" and context: if context not in result: result[context] = div.innerHTML() context = None if cls == "label": a = div.innerHTML().strip() if a.startswith("To:"): context = "To" elif a.startswith("From:"): context = "From" result['message'] = header.innerHTML() return self.insert_message(result, inode_template = "y%s")
def external_process(self,fd): """ This is run on the extracted file """ pyflaglog.log(pyflaglog.VERBOSE_DEBUG, "Decompressing Zip File %s" % fd.inode) cache_key = "%s:%s" % (self.case , self.fd.inode) ## Try to read the fd as a zip file z = zipfile.ZipFile(fd) pathname, inode, inode_id = self.ddfs.lookup(inode = self.inode) ## retrieve evidence timezone, this is necessary because zip files ## store time in localtime evidence_tz = Time.get_evidence_tz_name(self.case, self.fd) ## List all the files in the zip file: dircount = 0 inodes = [] namelist = z.namelist() for i in range(len(namelist)): ## Add the file into the VFS try: ## Convert the time to case timezone t = Time.convert(z.infolist()[i].date_time, case=self.case, evidence_tz=evidence_tz) except: t=0 ## If the entry corresponds to just a directory we ignore it. if not posixpath.basename(namelist[i]): continue info = z.infolist()[i] inode = "%s|Z%s:%s" % (self.inode,info.header_offset, info.compress_size) inodes.append(inode) inode_id = self.ddfs.VFSCreate(None, inode,DB.expand("%s/%s",(pathname,namelist[i])), size=info.file_size, mtime=t, _fast=True) for inode in inodes: ## Now call the scanners on this new file (FIXME limit ## the recursion level here) fd = self.ddfs.open(inode_id = inode_id) Scanner.scanfile(self.ddfs,fd,self.factories)
def process_readmessage(self, fd, parser): result = {'type': 'Read', 'service': self.service} ## Find the subject sbj = parser.root.find('div', {'class': 'ReadMsgSubject'}) if sbj: result['subject'] = HTML.decode_entity(sbj.innerHTML()) context = None for td in parser.root.search('td'): data = td.innerHTML() if context: result[context] = HTML.decode_entity(data) context = None if data.lower().startswith('from:'): context = 'From' elif data.lower().startswith('to:'): context = 'To' elif data.lower().startswith('sent:'): context = 'Sent' msg = parser.root.find('div', {'class': 'ReadMsgContainer'}) ## Try to detect the message ID tag = parser.root.find('div', {'mid': '.'}) if tag: result['message_id'] = tag['mid'] else: result['message_id'] = fd.inode_id try: result['Sent'] = Time.parse(result['Sent']) except: pass if msg: message_urn = "/WebMail/%s/%s" % ( self.service, result['message_id'].replace("/", "_")) fsfd = FileSystem.DBFS(fd.case) try: if fsfd.lookup(path=message_urn): return except RuntimeError: pass pdb.set_trace() message_fd = CacheManager.AFF4_MANAGER.create_cache_data( fd.case, message_urn, inherited=fd.urn) message_fd.write(msg.innerHTML().encode("utf8")) message_fd.insert_to_table("webmail_messages", result) message_fd.close()
def process_readmessage(self, fd, parser): result = {'type': 'Read', 'service':self.service} ## Find the subject sbj = parser.root.find('div', {'class':'ReadMsgSubject'}) if sbj: result['subject'] = HTML.decode_entity(sbj.innerHTML()) context = None for td in parser.root.search('td'): data = td.innerHTML() if context: result[context] = HTML.decode_entity(data) context = None if data.lower().startswith('from:'): context = 'From' elif data.lower().startswith('to:'): context = 'To' elif data.lower().startswith('sent:'): context = 'Sent' msg = parser.root.find('div', {'class':'ReadMsgContainer'}) ## Try to detect the message ID tag = parser.root.find('div', {'mid':'.'}) if tag: result['message_id'] = tag['mid'] else: result['message_id'] = fd.inode_id try: result['Sent'] = Time.parse(result['Sent']) except: pass if msg: message_urn = "/WebMail/%s/%s" % (self.service, result['message_id'].replace("/","_")) fsfd = FileSystem.DBFS(fd.case) try: if fsfd.lookup(path = message_urn): return except RuntimeError: pass pdb.set_trace() message_fd = CacheManager.AFF4_MANAGER.create_cache_data( fd.case, message_urn, inherited = fd.urn) message_fd.write(msg.innerHTML().encode("utf8")) message_fd.insert_to_table("webmail_messages", result) message_fd.close()
def process_readmessage(self, fd): result = {'type': 'Read', 'message': ''} root = self.parser.root tag = root.find('div', {'class': 'ReadMsgContainer'}) if not tag: return ## Find the subject: sbj = tag.find('td', {'class': 'ReadMsgSubject'}) if sbj: result['subject'] = HTML.decode_entity(sbj.innerHTML()) ## Fill in all the other fields: context = None for td in tag.search('td'): data = td.innerHTML() if context: result[context] = HTML.decode_entity(data) context = None if data.lower().startswith('from:'): context = 'From' elif data.lower().startswith('to:'): context = 'To' elif data.lower().startswith('sent:'): context = 'sent' ## Now the message: ## On newer sites its injected using script: for s in root.search('script'): m = re.match( "document\.getElementById\(\"MsgContainer\"\)\.innerHTML='([^']*)'", s.innerHTML()) if m: result['message'] += HTML.decode_unicode( m.group(1).decode("string_escape")) break try: result['sent'] = Time.parse(result['sent']) except: pass return self.insert_message(result)
def process_readmessage(self, message): parser = HTML.HTMLParser(verbose=0) parser.feed(message) parser.close() result = {'type': 'Read', 'Message': ''} ## Find the subject sbj = parser.root.find('td', {'class': 'ReadMsgSubject'}) if sbj: result['Subject'] = HTML.decode_entity(sbj.innerHTML()) context = None for td in parser.root.search('td'): data = td.innerHTML() if context: result[context] = HTML.decode_entity(data) context = None if data.lower().startswith('from:'): context = 'From' elif data.lower().startswith('to:'): context = 'To' elif data.lower().startswith('sent:'): context = 'Sent' msg = parser.root.find('div', {'class': 'ReadMsgContainer'}) if msg: result['Message'] = msg.innerHTML() ## Try to detect the message ID tag = parser.root.find('div', {'mid': '.'}) if tag: result['message_id'] = tag['mid'] try: result[context] = Time.parse(result[context]) except: pass return self.insert_message(result, inode_template='l%s')
def process_readmessage(self, message): parser = HTML.HTMLParser(verbose=0) parser.feed(message) parser.close() result = {'type': 'Read', 'Message':''} ## Find the subject sbj = parser.root.find('td', {'class':'ReadMsgSubject'}) if sbj: result['Subject'] = HTML.decode_entity(sbj.innerHTML()) context = None for td in parser.root.search('td'): data = td.innerHTML() if context: result[context] = HTML.decode_entity(data) context = None if data.lower().startswith('from:'): context = 'From' elif data.lower().startswith('to:'): context = 'To' elif data.lower().startswith('sent:'): context = 'Sent' msg = parser.root.find('div', {'class':'ReadMsgContainer'}) if msg: result['Message'] = msg.innerHTML() ## Try to detect the message ID tag = parser.root.find('div', {'mid':'.'}) if tag: result['message_id'] = tag['mid'] try: result[context] = Time.parse(result[context]) except: pass return self.insert_message(result, inode_template = 'l%s')
def process_readmessage(self,fd): result = {'type': 'Read', 'message':''} root = self.parser.root tag = root.find('div', {'class':'ReadMsgContainer'}) if not tag: return ## Find the subject: sbj = tag.find('td', {'class':'ReadMsgSubject'}) if sbj: result['subject'] = HTML.decode_entity(sbj.innerHTML()) ## Fill in all the other fields: context = None for td in tag.search('td'): data = td.innerHTML() if context: result[context] = HTML.decode_entity(data) context = None if data.lower().startswith('from:'): context = 'From' elif data.lower().startswith('to:'): context = 'To' elif data.lower().startswith('sent:'): context = 'sent' ## Now the message: ## On newer sites its injected using script: for s in root.search('script'): m=re.match("document\.getElementById\(\"MsgContainer\"\)\.innerHTML='([^']*)'", s.innerHTML()) if m: result['message'] += HTML.decode_unicode(m.group(1).decode("string_escape")) break try: result['sent'] = Time.parse(result['sent']) except: pass return self.insert_message(fd, result)
def process_message_yahoo1(self, result, header): """ Handle Yahoo mail from old version (prior to 20080224) """ ## Look through all its rows: context = None for td in header.search("td"): if context: for i in td: if type(i)==str: result[context] = HTML.unquote(HTML.decode_entity(i)) break context = None data = td.innerHTML() if data.lower().strip().startswith('from:'): context = 'From' elif data.lower().strip().startswith('to:'): context = 'To' elif data.lower().strip().startswith('date:'): context = 'Sent' elif data.lower().strip().startswith('subject:'): context = 'Subject' ## Now the message: msgbody = self.parser.root.find('div', {"class":"msgbody"}) if msgbody: result['message'] = msgbody.innerHTML() if 'Sent' in result: #result['Sent'] = ColumnTypes.guess_date(result['Sent']) result['sent'] = Time.parse(result['sent'], case=self.case, evidence_tz=None) ## Find the message id: tag = header.find('input', dict(name='MsgId')) if tag: result['message_id'] = tag['value'] if len(result.keys())>3: return self.insert_message(result, inode_template = "y%s")
def process_mail_listing(self): result = {'type': 'Listed', 'message': ''} root = self.parser.root folder = root.find("folderinfo") if not folder: return result['from'] = folder.innerHTML() listing = "<table><tr><th>From</th><th>To</th><th>Subject</th><th>Received</th></tr>" for message in root.search("messageinfo"): from_tag = message.find("from") listing += "<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>\n" % ( self.parse_email_address(message, 'from'), message.attributes.get("toemail"), message.attributes.get("subject"), Time.parse(message.attributes.get("receiveddate")), ) listing += "<table>" result['message'] = listing self.insert_message(result, "webmail")
def process_stream(self, stream, factories): """ We look for HTTP requests to identify the stream. This allows us to processes HTTP connections on unusual ports. This situation might arise if HTTP proxies are used for example. """ if stream.reverse: combined_inode = "I%s|S%s/%s" % (stream.fd.name, stream.inode_id, stream.reverse) try: fd = self.fsfd.open(inode=combined_inode) ## If we cant open the combined stream, we quit (This could ## happen if we are trying to operate on a combined stream ## already except IOError: return else: fd = stream p=HTTP(fd,self.fsfd) ## Check that this is really HTTP if not p.identify(): return pyflaglog.log(pyflaglog.DEBUG,"Openning %s for HTTP" % combined_inode) ## Iterate over all the messages in this connection for f in p.parse(): if not f: continue offset, size = f ## Create the VFS node: new_inode="%s|H%s:%s" % (combined_inode,offset,size) try: if 'chunked' in p.response['transfer-encoding']: new_inode += "|c0" except KeyError: pass try: if 'gzip' in p.response['content-encoding']: new_inode += "|G1" except KeyError: pass try: if 'deflate' in p.response['content-encoding']: new_inode += "|d1" except KeyError: pass ## stream.ts_sec is already formatted in DB format ## need to convert back to utc/gmt as paths are UTC timestamp = fd.get_packet_ts(offset) ds_timestamp = Time.convert(timestamp, case=self.case, evidence_tz="UTC") try: date_str = ds_timestamp.split(" ")[0] except: date_str = stream.ts_sec.split(" ")[0] path,inode,inode_id=self.fsfd.lookup(inode=combined_inode) ## Try to put the HTTP inodes at the mount point. FIXME: ## This should not be needed when a http stats viewer is ## written. path=posixpath.normpath(path+"/../../../../../") inode_id = self.fsfd.VFSCreate(None,new_inode, "%s/HTTP/%s/%s" % (path,date_str, escape(p.request['url'])), mtime=timestamp, size=size ) ## Update the inode again: #new_inode = new_inode % inode_id ## This updates the inode table with the new inode #self.fsfd.VFSCreate(None,new_inode, # None, update_only = True, # inode_id = inode_id # ) ## Store information about this request in the ## http table: host = p.request.get("host",IP2str(stream.dest_ip)) url = HTML.url_unquote(p.request.get("url")) try: date = p.response["date"] date = Time.parse(date, case=self.case, evidence_tz=None) except (KeyError,ValueError): date = 0 ## Two forms for the referrer: referer = p.request.get('referer', p.request.get('referrer','')) if not url.startswith("http://") and not url.startswith("ftp://"): url = "http://%s%s" % (host, url) ## Not sure if we really care about this? ## Find referred page: ## parent = 0 dbh = DB.DBO(self.case) ## if referer: ## dbh.execute("select inode_id from http where url=%r order by inode_id desc limit 1", referer) ## row = dbh.fetch() ## ## If there is no referrer we just make a psuedo entry ## if not row: ## ## Find out the host ## m=re.match("(http://|ftp://)([^/]+)([^\?\&\=]*)", ## "%s" % referer) ## if m: ## host = m.group(2) ## dbh.insert("http", url=referer, host=host) ## parent = dbh.autoincrement() ## else: ## parent = row['inode_id'] args = dict(inode_id = inode_id, request_packet = p.request.get("packet_id",0), method = p.request.get("method","-"), url = url, response_packet= p.response.get("packet_id"), status = p.response.get("HTTP_code"), content_type = p.response.get("content-type","text/html"), referrer = referer[:500], host = host, tld = make_tld(host), useragent = p.request.get('user-agent', '-'), ) if date: args['date'] = date dbh.insert('http', **args) # parent = parent) ## Replicate the information about the subobjects in the ## connection_details table - this makes it easier to do ## some queries: dbh.insert("connection_details", ts_sec = stream.ts_sec, inode_id = inode_id, src_ip = stream.src_ip, src_port = stream.src_port, dest_ip = stream.dest_ip, dest_port = stream.dest_port, ) ## handle the request's parameters: try: self.handle_parameters(p.request, inode_id) except (KeyError, TypeError): pass ## Only scan the new file using the scanner train if its ## size of bigger than 0: if size>0: self.scan_as_file(new_inode, factories)
def external_process(self, fd): #find the other files we need in order to process cache s = self.fd.stat() filename = "%s%s" % (s['path'], s['name']) data_fds = [ self.ddfs.open("%s_CACHE_001_" % s['path']), self.ddfs.open("%s_CACHE_002_" % s['path']), self.ddfs.open("%s_CACHE_003_" % s['path']) ] mozcache = MozCache.MozCache(fd, data_fds) #print mozcache dbh = DB.DBO(self.case) # process each cache record for record in mozcache.records(): meta = record.get_entry() (method, status, header) = parse_response(meta['MetaData']) # deal with content-encoding (gzip/deflate) encoding_driver = "" encoding = header.getheader("content-encoding") if encoding: if "gzip" in encoding.lower(): encoding_driver = "|G1" elif "deflate" in encoding.lower(): encoding_driver = "|d1" # locate embedded entries length = 0 if record.record['DataLocation']['DataFile'] != 0: fileidx, offset, length = record.get_data_location() inode = '%s|o%s:%s' % (data_fds[fileidx].inode, offset, length) else: inode = self.ddfs.lookup(path="%s%08Xd01" % (s['path'], record.record['HashNumber'].get_value()))[1] inode += "|o0" # differentiate the inode from the existing one # add new entry to the VFS if encoding: length=0 inode_id = self.ddfs.VFSCreate(None, "%s%s" % (inode, encoding_driver), "%s/%08Xd01" % (filename, record.record['HashNumber'].get_value()), _mtime=meta['LastModified'], _atime=meta['LastFetched'], size=length) ## Insert a dodgy pcap entry to represent the ## timestamp of this request dbh.insert('pcap', _fast=True, _ts_sec = 'from_unixtime(%d)' % meta['LastModified'], ts_usec = 0, offset=0, length=0) packet_id = dbh.autoincrement() # add to http table # we parse the date, it is automatically returned in case # timezone. We do not need to supply an evidence timezone as # http date strings contain a timezone specification. try: date = Time.parse(header.getheader("date"), case=self.case, evidence_tz=None) except TypeError: date = 0 # chomp NULL from end url = str(meta['KeyData'])[:-1] if url.startswith("HTTP:"): url = url[len("HTTP:"):] args = dict(inode_id=inode_id, ## urls are always stored normalised in the db url=url_unquote(url), request_packet = packet_id, response_packet = packet_id, method=method, status=status, content_type=header.getheader("content-type"), date=date) host = FlagFramework.find_hostname(url) if host: args['host'] = host args['tld'] = FlagFramework.make_tld(host) dbh.insert("http", _fast=True, **args) ## Now populate the http parameters from the ## URL GET parameters: try: base, query = url.split("?",1) qs = cgi.parse_qs(query) for k,values in qs.items(): for v in values: dbh.insert('http_parameters', _fast=True, inode_id = inode_id, key = k, value = v) except ValueError: pass ## Scan the new file using the scanner train: fd=self.ddfs.open(inode_id=inode_id) Scanner.scanfile(self.ddfs,fd,self.factories)
def process_readmessage(self,fd): ## This is what the message tree looks like (XML): ## <GetDisplayMessageResponse> ## <message> ## <header> ## <part> ## <part> ## <message> ## <message> ## Each message is a seperate message - therefore the same ## HTTP object might relay several messages. root = self.parser.root for message in root.search('message'): result = {'type': 'Read', 'service':self.service } result['message_id'] = message.find("mid").innerHTML() ## Messages are made unique using the message_id. This ## ensures that even if the same message was seen multiple ## times in the traffic, we only retain one copy of it. message_urn = "/Webmail/%s/%s" % (self.service, result['message_id'].replace("/","_")) ## Make sure we dont have duplicates of the same message - ## duplicates may occur in other connections, so we check ## the webmail table for the same yahoo message id fsfd = FileSystem.DBFS(fd.case) try: if fsfd.lookup(path = message_urn): continue except RuntimeError: pass try: result['sent'] = Time.parse(message.find("receiveddate").innerHTML()) except: pass result['subject'] = HTML.unquote(HTML.decode_entity( message.find("subject").innerHTML())) for tag,field in [('from','From'), ('to','To')]: result[field] = self.parse_email_address(message, tag) message_fd = CacheManager.AFF4_MANAGER.create_cache_data( fd.case, message_urn, inherited = fd.urn) message_fd.insert_to_table("webmail_messages", result) ## now iterate over all the parts: for part in message.search("part"): ## Parts are basically message attachments. ct = part.attributes['type'] part_number = part.attributes['partid'] part_urn = "/".join((message_urn, part_number)) ## Usually text/html are the main body data = None if "text" in ct: text = part.find("text") message_fd.write(HTML.unquote(HTML.decode_entity(text.innerHTML()))) elif "image" in ct: message_fd.write(DB.expand("<b>%s</b><br><img src='%s'/>",( self.make_link(part.attributes.get('filename','')), self.make_link(part.attributes['thumbnailurl'])))) message_fd.close()
new_inode += "|G1" except KeyError: pass try: if 'deflate' in p.response['content-encoding']: new_inode += "|d1" except KeyError: pass ## stream.ts_sec is already formatted in DB format ## need to convert back to utc/gmt as paths are UTC timestamp = fd.get_packet_ts(offset) ds_timestamp = Time.convert(timestamp, case=self.case, evidence_tz="UTC") try: date_str = ds_timestamp.split(" ")[0] except: date_str = stream.ts_sec.split(" ")[0] path,inode,inode_id=self.fsfd.lookup(inode=combined_inode) ## Try to put the HTTP inodes at the mount point. FIXME: ## This should not be needed when a http stats viewer is ## written. path=posixpath.normpath(path+"/../../../../../") inode_id = self.fsfd.VFSCreate(None,new_inode, "%s/HTTP/%s/%s" % (path,date_str, escape(p.request['url'])),
def process_readmessage(self, fd): ## This is what the message tree looks like (XML): ## <GetDisplayMessageResponse> ## <message> ## <header> ## <part> ## <part> ## <message> ## <message> ## Each message is a seperate message - therefore the same ## HTTP object might relay several messages. root = self.parser.root for message in root.search('message'): result = {'type': 'Read', 'service': self.service} result['message_id'] = message.find("mid").innerHTML() ## Messages are made unique using the message_id. This ## ensures that even if the same message was seen multiple ## times in the traffic, we only retain one copy of it. message_urn = "/Webmail/%s/%s" % ( self.service, result['message_id'].replace("/", "_")) ## Make sure we dont have duplicates of the same message - ## duplicates may occur in other connections, so we check ## the webmail table for the same yahoo message id fsfd = FileSystem.DBFS(fd.case) try: if fsfd.lookup(path=message_urn): continue except RuntimeError: pass try: result['sent'] = Time.parse( message.find("receiveddate").innerHTML()) except: pass result['subject'] = HTML.unquote( HTML.decode_entity(message.find("subject").innerHTML())) for tag, field in [('from', 'From'), ('to', 'To')]: result[field] = self.parse_email_address(message, tag) message_fd = CacheManager.AFF4_MANAGER.create_cache_data( fd.case, message_urn, inherited=fd.urn) message_fd.insert_to_table("webmail_messages", result) ## now iterate over all the parts: for part in message.search("part"): ## Parts are basically message attachments. ct = part.attributes['type'] part_number = part.attributes['partid'] part_urn = "/".join((message_urn, part_number)) ## Usually text/html are the main body data = None if "text" in ct: text = part.find("text") message_fd.write( HTML.unquote(HTML.decode_entity(text.innerHTML()))) elif "image" in ct: message_fd.write( DB.expand( "<b>%s</b><br><img src='%s'/>", (self.make_link(part.attributes.get( 'filename', '')), self.make_link(part.attributes['thumbnailurl'])))) message_fd.close()