def do_get(handler): parts = urlparse(handler.path) query = parse_qs(parts.query) if parts.path in STATIC: with open(STATIC[parts.path], "r") as f: mt, enc = mimetypes.guess_type(parts.path) if not mt: mt,enc = mimetypes.guess_type(STATIC[parts.path]) write_response(handler, 200, ['Content-type: %s' % (mt)], f.read()) elif parts.path == '/r/': cmd = get_value(query, "cmd") room = get_value(query, "room") prev_ts = float(get_value(query, "since", 0)) msg_dict = { "since": helpers.get_now() } if cmd == "search": term = get_value(query, "term") print "DOING SEARCH", term if term: ret = do_search(term) msg_dict.update(ret) write_response(handler, 200, ['Content-type: text/json'], json.dumps(msg_dict))
def static_file(self, filename, static_root, mimetype="auto", download=False, charset="utf-8"): static_root = self.dirname+os.sep+static_root filename = os.path.join(static_root, filename.strip("/\\")) headers = {} if not filename.startswith(static_root): return HTTPError(HTTPStatus(403)) if not os.path.exists(filename) or not os.path.isfile(filename): return HTTPError(HTTPStatus(404)) if not os.access(filename, os.R_OK): return HTTPError(HTTPStatus(403)) if mimetype=="auto": if download and download!=True: mimetype, encoding = mimetypes.guess_type(download) else: mimetype, encoding = mimetypes.guess_type(filename) if encoding is not None: headers["Content-Encoding"] = encoding if mimetype: if (mimetype.startswith("text/") or mimetype=="application/javascript") and charset and "charset" not in mimetype: mimetype += "; charset={}".format(charset) headers["Content-Type"] = mimetype if download: download = os.path.basename(filename if download else download) stats = os.stat(filename) headers["Content-Length"] = stats.st_size headers["Last-Modified"] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime)) body = open(filename, "rb") return HTTPResponse(body, **headers)
def send_file(request, filepath, last_modified=None, filename=None): fullpath = filepath # Respect the If-Modified-Since header. statobj = os.stat(fullpath) if filename: mimetype, encoding = mimetypes.guess_type(filename) else: mimetype, encoding = mimetypes.guess_type(fullpath) mimetype = mimetype or 'application/octet-stream' response = HttpResponse(open(fullpath, 'rb').read(), mimetype=mimetype) if not last_modified: response["Last-Modified"] = http_date(statobj.st_mtime) else: if isinstance(last_modified, datetime): last_modified = float(dateformat.format(last_modified, 'U')) response["Last-Modified"] = http_date(epoch_seconds=last_modified) response["Content-Length"] = statobj.st_size if encoding: response["Content-Encoding"] = encoding # TODO: Escape filename if filename: response["Content-Disposition"] = "attachment; filename=%s" % filename.encode('utf-8') return response
def get_file_name_mime(self, url): pgctnt, hName, mime = self.wg.getFileNameMime(url) parsed = urllib.parse.urlparse(url) pathname = os.path.split(parsed.path)[-1] if not hName and not mime and not pathname: self.log.error("cannot figure out content type for url: %s", url) return pgctnt, "unknown.unknown", "application/octet-stream" # empty path with mimetype of text/html generally means it's a directory index (or some horrible dynamic shit). if not hName and not pathname and mime == "text/html": self.log.info("No path and root location. Assuming index.html") return pgctnt, "index.html", "text/html" ftype, guessed_mime = mimetypes.guess_type(hName) if ftype: return pgctnt, hName, guessed_mime if not mime else mime ftype, guessed_mime = mimetypes.guess_type(pathname) if ftype: return pgctnt, pathname, guessed_mime if not mime else mime chunks = [hName, pathname] chunks = [chunk for chunk in chunks if chunk] outname = " - ".join(chunks) if mime and mimetypes.guess_extension(mime): newext = mimetypes.guess_extension(mime) else: newext = ".unknown" if not outname: outname = "unknown" return pgctnt, outname+newext, mime if mime else "application/octet-stream"
def put(self, file='', content_type='', content_enc='', isbin=re.compile(r'[\000-\006\177-\277]').search, **kw): headers = self.__get_headers(kw) filetype = type(file) if filetype is type('') and (isbin(file) is None) and \ os.path.exists(file): ob = open(file, 'rb') body = ob.read() ob.close() c_type, c_enc = guess_type(file) elif filetype is FileType: body = file.read() c_type, c_enc = guess_type(file.name) elif filetype is type(''): body = file c_type, c_enc = guess_type(self.url) else: raise ValueError, 'File must be a filename, file or string.' content_type = content_type or c_type content_enc = content_enc or c_enc if content_type: headers['Content-Type'] = content_type if content_enc: headers['Content-Encoding'] = content_enc headers['Content-Length'] = str(len(body)) return self.__snd_request('PUT', self.uri, headers, body)
def _guess_type(self, full_path): """Guess the mime type magically or using the mimetypes module.""" magic = self._match_magic(full_path) if magic is not None: return mimetypes.guess_type(magic.old_path(full_path))[0] or "text/plain" else: return mimetypes.guess_type(full_path)[0] or "text/plain"
def _upload_plot(client, bucket, plot): extra_args = dict(ACL='public-read') url_template = '//{0}.s3.amazonaws.com/{1}/{2}/{3}' with DirectoryContext(plot.directory) as dir_ctx: try: extra_args['ContentType'] = mime.guess_type(plot.content)[0] client.upload_file(plot.content, bucket, path.join(plot.plot_id, plot.version, plot.content), ExtraArgs=extra_args) extra_args['ContentType'] = mime.guess_type(plot.thumbnail)[0] client.upload_file(plot.thumbnail, bucket, path.join(plot.plot_id, plot.version, plot.thumbnail), ExtraArgs=extra_args) if path.exists('resources'): for dir_path, subdir_list, file_list in walk('resources'): for fname in file_list: full_path = path.join(dir_path, fname) extra_args['ContentType'] = mime.guess_type(full_path)[0] client.upload_file(full_path, bucket, path.join(plot.plot_id, plot.version, full_path), ExtraArgs=extra_args) results = [url_template.format(bucket, plot.plot_id, plot.version, plot.content), url_template.format(bucket, plot.plot_id, plot.version, plot.thumbnail)] return pd.Series(results) except botocore.exceptions.ClientError as e: print(e.response) return False
def _static(path): try: with open("www/%s" % path) as f: print guess_type(path)[0] return f.read(), 200, {"Content-Type": guess_type(path)[0]} except IOError: abort(404)
def _get_resource(resource_url: str) -> (str, bytes): """Download or reads a file (online or local). Parameters: resource_url (str): URL or path of resource to load Returns: str, bytes: Tuple containing the resource's MIME type and its data. Raises: NameError: If an HTTP request was made and ``requests`` is not available. ValueError: If ``resource_url``'s protocol is invalid. """ url_parsed = urlparse(resource_url) if url_parsed.scheme in ['http', 'https']: # Requests might not be installed if requests_get is not None: request = requests_get(resource_url) data = request.content if 'Content-Type' in request.headers: mimetype = request.headers['Content-Type'] else: mimetype = mimetypes.guess_type(resource_url) else: raise NameError("HTTP URL found but requests not available") elif url_parsed.scheme == '': # '' is local file with open(resource_url, 'rb') as f: data = f.read() mimetype, _ = mimetypes.guess_type(resource_url) elif url_parsed.scheme == 'data': raise ValueError("Resource path is a data URI", url_parsed.scheme) else: raise ValueError("Not local path or HTTP/HTTPS URL", url_parsed.scheme) return mimetype, data
def emailReturn(eAddress, eBody, eAttach): msg = MIMEMultipart() msg['To'] = eAddress msgBody = MIMEText(eBody) msg.attach(msgBody) for attachment in eAttach: #test if it is an mimeMainType = mimetypes.guess_type(attachment)[0].split('/')[0] mimeSubType = mimetypes.guess_type(attachment)[0].split('/')[1] if mimeMainType == 'image': imgAttachment = open(attachment, 'rb') img = MIMEImage(imgAttachment.read()) imgAttachment.close() img.add_header('Content-Disposition', 'attachment', filename=os.path.basename(attachment)) msg.attach(img) elif mimeMainType == 'text': if mimeSubType == 'plain': plainText = MIMEText(attachment, 'plain') msg.attach(plainText) if mimeSubType == 'html': htmlText = MIMEText(attachment, 'html') msg.attach(htmlText) return msg
def get_detail(self, request, **kwargs): """ Returns a single serialized resource. Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result set and serializes it. Should return a HttpResponse (200 OK). Guess the mimetype of the file and return it as an attachment object """ basic_bundle = self.build_bundle(request=request) obj = self.obj_get(basic_bundle, **kwargs) bundle = self.build_bundle(obj=obj, request=request) bundle = self.full_dehydrate(bundle) bundle = self.alter_detail_data_to_serialize(request, bundle) #return our response here #get extension from the FlowFile object #match this to a dictionary of mimetypes with extensions fb = obj.file.read() mimetype = mimetypes.guess_type(obj.full_path)[0] #if mimetype.index('spreadsheetml') > 0: response = http.HttpResponse(fb, content_type=mimetypes.guess_type(obj.full_path)[0]) #if it's not an image, it is a download link - add the necessary content disposition info if(mimetype.count('image') == 0): response['Content-Disposition'] = 'attachment; filename=%s' % obj.original_filename return response
def load_file(self): fname = askopenfilename(filetypes=(("PNG Image", "*.png"), ("JPG Image", "*.jpg;*.jpeg"), ("GIF Image", "*.gif"), ("Bitmap Image", "*.bmp"), ("All Files", "*"))) print(mimetypes.guess_type(fname)[0]) try: wp = Client('https://your.wordpress.installation/xmlrpc.php', 'Username', 'password') except TimeoutError: self.status.delete(0, END) self.status.insert(0, 'Unable to connect to WP') except gaierror: self.status.config(state=NORMAL) self.status.delete(1.0, END) self.status.insert(1.0, 'DNS lookup failed') self.status.config(state=DISABLED) raise print(MyFrame.path_leaf(fname)) data = {'name': MyFrame.path_leaf(fname), 'type': mimetypes.guess_type(fname)[0]} with open(fname, 'rb') as img: data['bits'] = xmlrpc_client.Binary(img.read()) response = wp.call(media.UploadFile(data)) print(response['url']) self.status.config(state=NORMAL) self.status.delete(1.0, END) self.status.insert(1.0, 'Link: '+response['url']) self.status.config(state=DISABLED)
def push_file(self, pfile, body=None, file_type=None, file_name=None): # FP if not isinstance(pfile, str): if os.fstat(pfile.fileno()).st_size > self.UPLOAD_LIMIT: return 'File too big' if not file_type: file_type, __ = mimetypes.guess_type(pfile.name) if not file_name: file_name = pfile.name payload = {'file_type': file_type, 'file_name': file_name} r = self._s.get(self.UPLOAD_URL, params=payload) _pushbullet_responses(r) file_url = r.json()['file_url'] upload_url = r.json()['upload_url'] data = r.json()['data'] files = {'file': pfile} _pushbullet_responses(requests.post(upload_url, files=files, data=data)) # String/url else: if not file_type: file_type, __ = mimetypes.guess_type(pfile) if not file_name: __, file_name = pfile.rsplit('/', 1) file_url = pfile data = {'type': 'file', 'file_type': file_type, 'file_name': file_name, 'file_url': file_url, 'body': body} return self._push(data)
def s3write(bucket, root, path): #key.set_metadata('mode', str(stat[0])) #key.set_metadata('gid', str(stat[5])) #key.set_metadata('uid', str(stat[4])) #key.set_metadata('mtime', str(stat[8])) # GUESS MIME TYPE #if is_dir: # key.set_contents_from_string("", headers={'Content-Type': 'application/x-directory'}) s3name = FastS3.local_to_s3(root, path) log.debug('Executing S3 WRITE BACK for %s' % s3name) with FileLock(path): key = bucket.get_key(s3name) if key is None: #New File content_type, encoding = mimetypes.guess_type(s3name) key = bucket.new_key(s3name) log.debug('Writing new file to S3 %s' % s3name) key.set_contents_from_filename(path, replace=True, headers={'Content-Type': content_type}) else: #Check existing file content_type, encoding = mimetypes.guess_type(s3name) with open(path, 'rb') as f: localmd5 = key.compute_md5(f)[0] if key.etag[1:-1] != localmd5: log.info('Overwriting File on S3 %s.' % s3name) key.set_contents_from_filename(path, replace=True, headers={'Content-Type': content_type}) log.debug('S3 WRITE BACK complete')
def movieView(request,id,movie): feature=get_object_or_404(CaseFeature, case__pk=id, name='MovieGallery') path=feature.getMoviePath(movie) print mimetypes.guess_type(path)[0] response=HttpResponse(FileWrapper(open(path)), content_type=mimetypes.guess_type(path)[0]) response['Content-Length']=os.path.getsize(path) return response
def run_and_interpret(lookup, module, handler, **params): v = handler(**params) module_path = os.path.dirname(module.__file__) if isinstance(v, dict): fn = os.path.join(module_path, module.__name__.split(".")[-1] + ".html") body = mako.template.Template(filename=fn, lookup=lookup).render(**v).encode("utf-8") return organic.status.OK, {"Content-Type": mimetypes.guess_type(fn)[0]}, body elif isinstance(v, organic.response.TemplateT): fn = os.path.join(module_path, v.path) if os.path.isfile(fn): if "Content-Type" not in v.headers: v.headers["Content-Type"] = mimetypes.guess_type(fn)[0] body = mako.template.Template(filename=fn, lookup=lookup).render(**v.data).encode("utf-8") return v.status, v.headers, body else: raise organic.exception.RouteException(organic.status.SERVER_ERROR) elif isinstance(v, organic.response.Raw): if "Content-Type" not in v.headers: raise organic.exception.RouteException(organic.status.SERVER_ERROR) else: return v.status, v.headers, v.body elif v is False: raise organic.exception.RouteException(organic.status.NOT_FOUND) else: raise organic.exception.RouteException(organic.status.SERVER_ERROR)
def add_image(self, album_id, imagename, title, comment, reduce_size, size, colors): album_url = "/data/feed/api/user/%s/albumid/%s" % (self.email, album_id) mime = mimetypes.guess_type(imagename)[0] if mime in SUPPORTED_MIMES or mime in CONVERTED_MIMES: temp = None if reduce_size is True or colors is True or mime in CONVERTED_MIMES: image = Image.open(imagename) w, h = image.size temp = tempfile.mkstemp(suffix=".png", prefix="picapy_tmp", dir="/tmp")[1] print(("converting from %s to %s" % (imagename, temp))) if reduce_size is True and (w > size or h > size): maximo = size if w > h: h = h * maximo / w w = maximo else: w = w * maximo / h h = maximo image = image.resize([w, h], Image.ANTIALIAS) if colors is True: image = image.convert("P", palette=Image.WEB) image.save(temp) imagename = temp mime = mimetypes.guess_type(imagename)[0] try: photo = self.gd_client.InsertPhotoSimple(album_url, title, comment, imagename, content_type=mime) except GooglePhotosExceptio as e: self.gd_client = gdata.photos.service.PhotosService() self.gd_client.ProgrammaticLogin() photo = self.gd_client.InsertPhotoSimple(album_url, title, comment, imagename, content_type=mime) if temp is not None and os.path.exists(temp): os.remove(temp) return photo
def clean(self): cleaned_data = super(CreateSoundForm, self).clean() url = cleaned_data.get('url') image = cleaned_data.get('image') if image is None: raise forms.ValidationError("The image you uploaded is not an image.") image_type = guess_type(image.name)[0] if 'image' not in image_type: raise forms.ValidationError("The image you uploaded is not an image.") flag = [False, False] content = cleaned_data.get('content') if content is not None: content_type = guess_type(content.name)[0] print content_type if 'video' in content_type or 'audio' in content_type: flag[0] = True if url and 'https://www.youtube.com/watch?v=' in url: flag[1] = True if not any(flag): raise forms.ValidationError('Neither url nor content is valid, or they are not provided.') return cleaned_data
def createThumbnail(path): """Returns the thumbnail created for file at path (as numpy array, returna False if no thumbnail can be created""" fileName, ext = os.path.splitext(path) thumbnailers = loadThumbnailers() if ext.lower() in [".png", ".jpg", ".bmp", ".eps", ".gif", ".im", ".jpeg", ".mps", ".pcx", ".ppm", ".tiff", ".webp", ".ico"]: try: image = Image.open(path) except: return False # If the image is big, scale down to 512x512 first using a faster # but lower quality scaling algorithm (Image.NEAREST) if image.size[0] > 512: image.thumbnail((512, 512), Image.NEAREST) image.thumbnail((256, 256), Image.ANTIALIAS) image.save(path2thumbnail(path)) elif guess_type(path)[0] in thumbnailers.keys(): o = path2thumbnail(path) u = path2url(path) s = "256" command = thumbnailers[guess_type(path)[0]] for (pat, sub) in [("%o", o), ("%i", path), ("%u", u), ("%s", s)]: command = re.sub(pat, sub, command) os.system(command) try: return Image.open(o) except: return False else: return False return image
def create_message_with_attchments_local_files(cls, sender, to, cc, bcc, subject, message_html): message = MIMEMultipart() message['to'] = to message['cc'] = cc message['bcc'] = bcc message['from'] = sender message['subject'] = subject message_html += '<p>Sent from my <a href="http://www.iogrow.com/">ioGrow account </a></p>' msg = MIMEText(smart_str(message_html), 'html') message.attach(msg) path = os.path.join('static/src/img/mail_images', 'sm-iogrow-true.png') content_type, encoding = mimetypes.guess_type(path) path2 = os.path.join('static/src/img/mail_images', 'Logo-iogrow.png') content_type2, encoding2 = mimetypes.guess_type(path2) main_type, sub_type = content_type.split('/', 1) main_type2, sub_type2 = content_type2.split('/', 1) if main_type == 'image': fp = open(path, 'r') msg = MIMEImage(fp.read(), _subtype=sub_type) fp.close() if main_type2 == 'image': fp2 = open(path2, 'rb') msg2 = MIMEImage(fp2.read(), _subtype=sub_type) fp2.close() msg.add_header('Content-Disposition', 'attachment', filename="logo") msg.add_header("Content-ID", "<logo_cid>") message.attach(msg) msg2.add_header('Content-Disposition', 'attachment', filename="user") msg2.add_header("Content-ID", "<user_cid>") message.attach(msg2) return {'raw': base64.urlsafe_b64encode(message.as_string())}
def do_GET(self): mime = {"html":"text/html", "css":"text/css", "png":"image/png", "jpg":"image/jpg", "js":"application/javascript", "json":"application/json"} RequestedFileType = mimetypes.guess_type(self.path)[0] if mimetypes.guess_type(self.path)[0]!=None else 'text/html' aJSON = "{}" try: if self.path == '/': self.path = self.path+'index.html' if self.path == '/getOverviewData': self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() global GUI_dict global extraVerbose if extraVerbose: log.debug(json.dumps(GUI_dict)) self.wfile.write(bytes(json.dumps(GUI_dict), 'UTF-8')) return elif self.path == '/getEvacEvents': self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() self.wfile.write(bytes(json.dumps(mlp.eventsMap), 'UTF-8')) return elif self.path == '/frontEndEventStack': self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() self.wfile.write(bytes(json.dumps(frontEndEventStack), 'UTF-8')) frontEndEventStack.clear() return elif os.path.isfile(root_dir + self.path): self.send_response(200) self.send_header("Content-type", RequestedFileType) self.end_headers() fp = open(root_dir + self.path, 'rb') self.wfile.write(fp.read()) fp.close() return elif len(self.path.split('reboot--'))>1: Thread(target=button_action_reboot, args=(self.path.split('reboot--')[1],), name="rebooterThread").start() log.info('Got command to reboot %r' %(self.path.split('reboot--')[1])) return elif len(self.path.split('actionScaleIn'))>1: Thread(target=button_action_scale, args=('in',), name="scalerThread").start() log.info('Got command to scale-in') return elif len(self.path.split('actionScaleOut'))>1: Thread(target=button_action_scale, args=('out',), name="scalerThread").start() log.info('Got command to scale-out') return else: self.send_response(404, 'File not found') self.send_header("Content-type", 'text/html') self.end_headers() self.wfile.write(bytes('File not found', 'UTF-8')) return except BrokenPipeError: log.error('Failed to complete request in "do_GET"') except KeyboardInterrupt: log.info('KeyboardInterrupt received, quitting.') return
def test_isbinary(self): binary = self.viewer._is_binary for f in ['foo.rdf', 'foo.xml', 'foo.js', 'foo.py' 'foo.html', 'foo.txt', 'foo.dtd', 'foo.xul', 'foo.properties', 'foo.json', 'foo.src', 'CHANGELOG']: m, encoding = mimetypes.guess_type(f) assert not binary(m, f), '%s should not be binary' % f for f in ['foo.dtd', 'foo.xul', 'foo.properties']: m, encoding = mimetypes.guess_type(f) assert not binary(None, f), '%s should not be binary' % f for f in ['foo.png', 'foo.gif', 'foo.exe', 'foo.swf']: m, encoding = mimetypes.guess_type(f) assert binary(m, f), '%s should be binary' % f filename = tempfile.mktemp() for txt in ['#python', u'\0x2']: open(filename, 'w').write(txt) m, encoding = mimetypes.guess_type(filename) assert not binary(m, filename), '%s should not be binary' % txt for txt in ['#!/usr/bin/python', 'MZ']: open(filename, 'w').write(txt) m, encoding = mimetypes.guess_type(filename) assert binary(m, filename), '%s should be binary' % txt
def save_uploaded(): fname, content = get_uploaded_content() print mimetypes.guess_type(content) if content: return save_file(fname, content); else: raise Exception
def theme_create_static_file(request, name): theme = get_object_or_404(Theme, name=name) ret = {} if request.method == 'POST': name = request.POST['name'] if theme.static_files.filter(name=name).count(): ret = {'result':'error', 'message':'Static File already exists.'} else: sf = theme.static_files.create(name=name) if request.POST.get('url', None): sf.url = request.POST['url'] sf.mime_type = mimetypes.guess_type(sf.url)[0] or '' sf.save() else: # Saves an empty file as a starting point file_name = '%s-%s-%s'%(theme.pk, sf.pk, name) content = ContentFile('') sf.file.save(file_name, content) # Detects the mimetype for the given name sf.mime_type = mimetypes.guess_type(file_name)[0] or '' sf.save() ret = {'result':'ok', 'info':{'pk':sf.pk, 'url':sf.get_url()}} return HttpResponse(simplejson.dumps(ret), mimetype='text/javascript')
def _get_rss_item(page): labels = get_page_labels(page) if "draft" in labels or "queue" in labels: return "" if "date" not in page: return "" xml = u"<item>\n" xml += u"\t<title>%s</title>\n" % _escape_xml(page["title"]) xml += u"\t<guid>%s</guid>\n" % _full_url(page["url"]) xml += u"\t<pubDate>%s</pubDate>\n" % _format_rfc_date(page["date"]) if "file" in page: _filename = page["file"].split("/")[-1] mime_type = mimetypes.guess_type(_filename)[0] xml += u"\t<enclosure url='%s' type='%s' length='%s'/>\n" % (page["file"], mime_type, page.get("filesize", "0")) if "illustration" in page: _filename = page["illustration"].split("/")[-1] mime_type = mimetypes.guess_type(_filename)[0] xml += u"\t<enclosure url='%s' type='%s' length='%s'/>\n" % (page["illustration"], mime_type, 0) if get_config("rss_with_bodies") != False: xml += u"\t<description>%s</description>\n" % _escape_xml(_fix_rss_item_description(page.html, page)) author = get_page_author(page) if author is not None: xml += u"\t<author>%s</author>\n" % author.get("email", "*****@*****.**") xml += u"</item>\n" return xml
def add_image_set_by_array(images, parents, name): # check for duplicate name if db.images.find_one({'name': name}) != None: raise ValueError(('An image set with the name %s already exists. Please ' + 'change the folder name and try uploading again.') % name) # put all the parent images into gridFS, save their object IDs parent_list = [] for image in parents: with open(image, 'rb') as f: data = f.read() content_type = guess_type(image)[0] if content_type == None: raise TypeError(('Couldn\'t guess the file extension for %s. ' + 'Check the filename.') % image) parent_id = fs.put(data, content_type=content_type) parent_list.append(parent_id) # put all the images into gridFS, save their object IDs image_list = [] for image in images: with open(image['path'], 'rb') as f: data = f.read() content_type = guess_type(image['path'])[0] if content_type == None: raise TypeError(('Couldn\'t guess the file extension for %s. ' + 'Check the filename.') % image['path']) image_id = fs.put(data, content_type=content_type) image_list.append({'image_id': image_id, 'parent': parent_list[image['category']], 'category': image['category']}) # save the image set, return the return db.images.insert({'name': name, 'parents': parent_list, 'images': image_list})
def TextFileChecker(FileFolder, FileName): FileCheck = False if (mimetypes.guess_type(FileFolder + FileName)[0] == 'text/plain') or (mimetypes.guess_type(FileFolder + FileName)[0] == 'application/x-ns-proxy-autoconfig') or (mimetypes.guess_type(FileFolder + FileName)[0] == None): FileCheck = True return FileCheck
def create_file_message(recipient_address, *args): """ Creates a Python email with file attachments as part of a MIME-class message. :param recipient_address: Email address of the recipient :param *args: List parameter containing filenames :return: MIMEMultipart instance """ message = MIMEMultipart() message["to"] = recipient_address # Only attach image, plain and html text file types, according to the # first element of mimetypes' guess_type method for fn in args: fn = os.path.normpath(fn) if not mimetypes.guess_type(fn)[0]: continue if mimetypes.guess_type(fn)[0].find("image") >= 0: with open(fn, "rb") as f: message.attach(MIMEImage(f.read())) elif mimetypes.guess_type(fn)[0].find("plain") >= 0: with open(fn, "r") as f: message.attach(MIMEText(f.read(), "plain")) elif mimetypes.guess_type(fn)[0].find("html") >= 0: with open(fn, "r") as f: message.attach(MIMEText(f.read(), "html")) return message
def clean_file(self): data = self.cleaned_data['file'] task = self.cleaned_data['solution'].task max_file_size_kb = task.max_file_size max_file_size = 1024 * max_file_size_kb supported_types_re = re.compile(task.supported_file_types) if data: contenttype = mimetypes.guess_type(data.name)[0] # don't rely on the browser: data.content_type could be wrong or empty if (contenttype is None) or (not (supported_types_re.match(contenttype) or ziptype_re.match(contenttype))): raise forms.ValidationError(_('The file of type %s is not supported.' %contenttype)) if ziptype_re.match(contenttype): try: zip = zipfile.ZipFile(data) if zip.testzip(): raise forms.ValidationError(_('The zip file seams to be corrupt.')) if sum(fileinfo.file_size for fileinfo in zip.infolist()) > 1000000: raise forms.ValidationError(_('The zip file is to big.')) for fileinfo in zip.infolist(): (type, encoding) = mimetypes.guess_type(fileinfo.filename) ignorred = SolutionFile.ignorred_file_names_re.search(fileinfo.filename) supported = type and supported_types_re.match(type) if not ignorred and not supported: raise forms.ValidationError(_("The file '%(file)s' of guessed mime type '%(type)s' in this zip file is not supported." %{'file':fileinfo.filename, 'type':type})) # check whole zip instead of contained files #if fileinfo.file_size > max_file_size: # raise forms.ValidationError(_("The file '%(file)s' is bigger than %(size)iKB which is not suported." %{'file':fileinfo.filename, 'size':max_file_size_kb})) except forms.ValidationError: raise except: raise forms.ValidationError(_('Uhoh - something unexpected happened.')) if data.size > max_file_size: raise forms.ValidationError(_("The file '%(file)s' is bigger than %(size)iKB which is not suported." %{'file':data.name, 'size':max_file_size_kb})) return data
def mimeTypeGuesser(name=None, data=None, content_type=None): if name is None and data is None and content_type is None: return None mimeType = mimeTypeGetter(name=name, data=data, content_type=content_type) if name and not mimeType: mimeType, encoding = mimetypes.guess_type(name, strict=True) if not mimeType: mimeType, encoding = mimetypes.guess_type(name, strict=False) # # XXX If `encoding` is not None, we should re-consider the # guess, since the encoding here is Content-Encoding, not # charset. In particular, things like .tar.gz map to # ('application/x-tar', 'gzip'), which may require different # handling, or at least a separate content-type. if data and not mimeType: # no idea, really, but let's sniff a few common things: for prefix, type, charset in _prefix_table: if data.startswith(prefix): mimeType = type break return mimeType
def _serve_asset(self, path, gzipped_asset_bytes, request): """Serves a pre-gzipped static asset from the zip file.""" mimetype = mimetypes.guess_type(path)[0] or "application/octet-stream" return http_util.Respond( request, gzipped_asset_bytes, mimetype, content_encoding="gzip" )
def record_photo(path, library, inotify_event_type=None): if type(library) == Library: library_id = library.id else: library_id = str(library) try: photo_file = PhotoFile.objects.get(path=path) except PhotoFile.DoesNotExist: photo_file = PhotoFile() if inotify_event_type in ['DELETE', 'MOVED_FROM']: return delete_photo_record(photo_file) file_modified_at = datetime.fromtimestamp(os.stat(path).st_mtime, tz=utc) if photo_file and photo_file.file_modified_at == file_modified_at: return False metadata = PhotoMetadata(path) date_taken = None possible_date_keys = ['Date/Time Original', 'Date Time Original', 'Date/Time', 'Date Time', 'GPS Date/Time', 'Modify Date', 'File Modification Date/Time'] for date_key in possible_date_keys: date_taken = parse_datetime(metadata.get(date_key)) if date_taken: break camera = None camera_make = metadata.get('Make') camera_model = metadata.get('Camera Model Name') if camera_model: camera_model = camera_model.replace(camera_make, '').strip() if camera_make and camera_model: try: camera = Camera.objects.get(library_id=library_id, make=camera_make, model=camera_model) if date_taken < camera.earliest_photo: camera.earliest_photo = date_taken camera.save() if date_taken > camera.latest_photo: camera.latest_photo = date_taken camera.save() except Camera.DoesNotExist: camera = Camera(library_id=library_id, make=camera_make, model=camera_model, earliest_photo=date_taken, latest_photo=date_taken) camera.save() lens = None lens_name = metadata.get('Lens ID') if lens_name: try: lens = Lens.objects.get(name=lens_name) if date_taken < lens.earliest_photo: lens.earliest_photo = date_taken lens.save() if date_taken > lens.latest_photo: lens.latest_photo = date_taken lens.save() except Lens.DoesNotExist: lens = Lens(library_id=library_id, name=lens_name, earliest_photo=date_taken, latest_photo=date_taken) lens.save() photo = None if date_taken: try: # TODO: Match on file number/file name as well photo = Photo.objects.get(taken_at=date_taken) except Photo.DoesNotExist: pass latitude = None longitude = None if metadata.get('GPS Position'): latitude, longitude = parse_gps_location(metadata.get('GPS Position')) iso_speed = None if metadata.get('ISO'): try: iso_speed = int(re.search(r'[0-9]+', metadata.get('ISO')).group(0)) except AttributeError: pass if not photo: # Save Photo aperture = None aperturestr = metadata.get('Aperture') if aperturestr: try: aperture = Decimal(aperturestr) if aperture.is_infinite(): aperture = None except: pass photo = Photo( library_id=library_id, taken_at=date_taken, taken_by=metadata.get('Artist') or None, aperture=aperture, exposure=metadata.get('Exposure Time') or None, iso_speed=iso_speed, focal_length=metadata.get('Focal Length') and metadata.get('Focal Length').split(' ', 1)[0] or None, flash=metadata.get('Flash') and 'on' in metadata.get('Flash').lower() or False, metering_mode=metadata.get('Metering Mode') or None, drive_mode=metadata.get('Drive Mode') or None, shooting_mode=metadata.get('Shooting Mode') or None, camera=camera, lens=lens, latitude=latitude, longitude=longitude, altitude=metadata.get('GPS Altitude') and metadata.get('GPS Altitude').split(' ')[0] ) photo.save() width = metadata.get('Image Width') height = metadata.get('Image Height') if metadata.get('Orientation') in ['Rotate 90 CW', 'Rotate 270 CCW', 'Rotate 90 CCW', 'Rotate 270 CW']: old_width = width width = height height = old_width # Save PhotoFile photo_file.photo = photo photo_file.path = path photo_file.width = width photo_file.height = height photo_file.mimetype = mimetypes.guess_type(path)[0] photo_file.file_modified_at = file_modified_at photo_file.bytes = os.stat(path).st_size photo_file.preferred = False # TODO photo_file.save() # Create task to ensure JPEG version of file exists (used for thumbnailing, analysing etc.) Task( type='ensure_raw_processed', subject_id=photo.id, complete_with_children=True ).save() return photo
def upload_artifact(self, name, artifact_object=None, metadata=None, delete_after_upload=False): # type: (str, Optional[object], Optional[dict], bool) -> bool if not Session.check_min_api_version('2.3'): LoggerRoot.get_base_logger().warning( 'Artifacts not supported by your TRAINS-server version, ' 'please upgrade to the latest server version') return False if name in self._artifacts_container: raise ValueError( "Artifact by the name of {} is already registered, use register_artifact" .format(name)) artifact_type_data = tasks.ArtifactTypeData() override_filename_in_uri = None override_filename_ext_in_uri = None uri = None if np and isinstance(artifact_object, np.ndarray): artifact_type = 'numpy' artifact_type_data.content_type = 'application/numpy' artifact_type_data.preview = str(artifact_object.__repr__()) override_filename_ext_in_uri = '.npz' override_filename_in_uri = name + override_filename_ext_in_uri fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri) os.close(fd) np.savez_compressed(local_filename, **{name: artifact_object}) delete_after_upload = True elif pd and isinstance(artifact_object, pd.DataFrame): artifact_type = 'pandas' artifact_type_data.content_type = 'text/csv' artifact_type_data.preview = str(artifact_object.__repr__()) override_filename_ext_in_uri = self._save_format override_filename_in_uri = name fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri) os.close(fd) artifact_object.to_csv(local_filename, compression=self._compression) delete_after_upload = True elif isinstance(artifact_object, Image.Image): artifact_type = 'image' artifact_type_data.content_type = 'image/png' desc = str(artifact_object.__repr__()) artifact_type_data.preview = desc[1:desc.find(' at ')] override_filename_ext_in_uri = '.png' override_filename_in_uri = name + override_filename_ext_in_uri fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri) os.close(fd) artifact_object.save(local_filename) delete_after_upload = True elif isinstance(artifact_object, dict): artifact_type = 'JSON' artifact_type_data.content_type = 'application/json' preview = json.dumps(artifact_object, sort_keys=True, indent=4) override_filename_ext_in_uri = '.json' override_filename_in_uri = name + override_filename_ext_in_uri fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.', suffix=override_filename_ext_in_uri) os.write(fd, bytes(preview.encode())) os.close(fd) artifact_type_data.preview = preview delete_after_upload = True elif (isinstance(artifact_object, six.string_types) and urlparse(artifact_object).scheme in remote_driver_schemes): # we should not upload this, just register local_filename = None uri = artifact_object artifact_type = 'custom' artifact_type_data.content_type = mimetypes.guess_type( artifact_object)[0] elif isinstance( artifact_object, six.string_types + ( Path, pathlib_Path, ) if pathlib_Path is not None else (Path, )): # check if single file artifact_object = Path(artifact_object) artifact_object.expanduser().absolute() # noinspection PyBroadException try: create_zip_file = not artifact_object.is_file() except Exception: # Hack for windows pathlib2 bug, is_file isn't valid. create_zip_file = True else: # We assume that this is not Windows os if artifact_object.is_dir(): # change to wildcard artifact_object /= '*' if create_zip_file: folder = Path('').joinpath(*artifact_object.parts[:-1]) if not folder.is_dir() or not folder.parts: raise ValueError( "Artifact file/folder '{}' could not be found".format( artifact_object.as_posix())) wildcard = artifact_object.parts[-1] files = list(Path(folder).rglob(wildcard)) override_filename_ext_in_uri = '.zip' override_filename_in_uri = folder.parts[ -1] + override_filename_ext_in_uri fd, zip_file = mkstemp( prefix=quote(folder.parts[-1], safe="") + '.', suffix=override_filename_ext_in_uri) try: artifact_type_data.content_type = 'application/zip' artifact_type_data.preview = 'Archive content {}:\n'.format( artifact_object.as_posix()) with ZipFile(zip_file, 'w', allowZip64=True, compression=ZIP_DEFLATED) as zf: for filename in sorted(files): if filename.is_file(): relative_file_name = filename.relative_to( folder).as_posix() artifact_type_data.preview += '{} - {}\n'.format( relative_file_name, humanfriendly.format_size( filename.stat().st_size)) zf.write(filename.as_posix(), arcname=relative_file_name) except Exception as e: # failed uploading folder: LoggerRoot.get_base_logger().warning( 'Exception {}\nFailed zipping artifact folder {}'. format(folder, e)) return False finally: os.close(fd) artifact_object = zip_file artifact_type = 'archive' artifact_type_data.content_type = mimetypes.guess_type( artifact_object)[0] local_filename = artifact_object delete_after_upload = True else: if not artifact_object.is_file(): raise ValueError( "Artifact file '{}' could not be found".format( artifact_object.as_posix())) override_filename_in_uri = artifact_object.parts[-1] artifact_object = artifact_object.as_posix() artifact_type = 'custom' artifact_type_data.content_type = mimetypes.guess_type( artifact_object)[0] local_filename = artifact_object else: raise ValueError("Artifact type {} not supported".format( type(artifact_object))) # remove from existing list, if exists for artifact in self._task_artifact_list: if artifact.key == name: if artifact.type == self._pd_artifact_type: raise ValueError( "Artifact of name {} already registered, " "use register_artifact instead".format(name)) self._task_artifact_list.remove(artifact) break if not local_filename: file_size = None file_hash = None else: # check that the file to upload exists local_filename = Path(local_filename).absolute() if not local_filename.exists() or not local_filename.is_file(): LoggerRoot.get_base_logger().warning( 'Artifact upload failed, cannot find file {}'.format( local_filename.as_posix())) return False file_hash, _ = self.sha256sum(local_filename.as_posix()) file_size = local_filename.stat().st_size uri = self._upload_local_file( local_filename, name, delete_after_upload=delete_after_upload, override_filename=override_filename_in_uri, override_filename_ext=override_filename_ext_in_uri) timestamp = int(time()) artifact = tasks.Artifact( key=name, type=artifact_type, uri=uri, content_size=file_size, hash=file_hash, timestamp=timestamp, type_data=artifact_type_data, display_data=[(str(k), str(v)) for k, v in metadata.items()] if metadata else None) # update task artifacts with self._task_edit_lock: self._task_artifact_list.append(artifact) self._task.set_artifacts(self._task_artifact_list) return True
def create(self): if self.wizard: self.run_wizard() self.generate_projectpath() elif not self.validate_projectname(): err = " %(red)sInvalid Project Name%(reset)s" % values print err elif self.validate_projectname(): self.generate_projectpath() self.generate_secret() if self.template and os.path.isdir(self.template): source_path = os.path.abspath(self.template) else: source_path = os.path.join(SOURCE, "templates") dest_path = os.path.join(self.dest, self.project) if self.verbose: msg = "%(cyan)sGenerating Project...%(reset)s\n" % self.colorize print msg render_dict = { "PROJECT": self.project, "SECRET": self.secret, "HOSTNAME": self.hostname, "PORT": self.port, "DATABASE": self.database, 'CONFIG': self.config, "PATH": self.path, "PROJECTPATH": self.projectpath, "LOCALPATH": self.dest, "VERSION": self.version, "APACHENAME": 'apache2', # TODO: apache2 vs httpd. } for root, dirs, files in os.walk(source_path): folder = root.replace(source_path, '') # If the current directory selected is trunk rename it # to the project name. if '/trunk' in folder and self.project != 'trunk': folder = folder.replace('trunk', self.project) dest = "%s%s" % (dest_path, folder) if self.verbose: values = dict(self.colorize, dest=dest) print "%(cyan)sCopying...%(reset)s %(dest)s" % values if not os.path.isdir(dest): os.mkdir(dest) for f in files: if ".pyc" in f: continue # When config setting 1 or 2 is selected ignore # the nginx.conf since it's not needed. if self.config in [1, 2] and "nginx.conf" in f: continue old_location = os.path.join(root, f) # to avoid installation errors, these django template # files had to be renamed a different extension. if f in ["manage.pyt", "settings.pyt"]: f = f.replace('.pyt', '.py') new_location = os.path.join(dest, f) try: shutil.copy(old_location, new_location) except IOError: pass if self.verbose: values = dict(self.colorize, loc=new_location) msg = "%(cyan)sCopying...%(reset)s %(loc)s" % values print msg # A feature default and custom template configurations. # Since the templates folder might contain template code # don't allow files within the folder to be rendered. if 'templates' in new_location: continue # Is the current file safe for template rendering? file_type = mimetypes.guess_type(f)[0] _fn, ext = os.path.splitext(f) if ext in ALLOWED_EXTS or file_type and 'text' in file_type: file_in = open(new_location) template = Template(file_in.read()) rendered_template = template.render(render_dict) file_in.close() file_out = open(new_location, "w") file_out.write(rendered_template) if self.verbose or self.wizard: print "\n%(cyan)sDone!%(reset)s" % self.colorize
def data_from_file(self, filename): content_type, _encoding = mimetypes.guess_type(filename) with open(filename, 'rb') as f: return f.read(), content_type
def get_content_type(self, filename): return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
emailfrom = "XXX" emailto = "XXX" filesToSend = ["take_off_hold.csv", "trial_expired.csv", "missing_info.csv"] username = "******" password = "******" msg = MIMEMultipart() msg["From"] = emailfrom msg["To"] = emailto msg["Subject"] = "Free Trial Queries" msg["Some Body Text"] msg.preamble = "Free Trial Queries" for fileToSend in filesToSend: ctype, encoding = mimetypes.guess_type(fileToSend) if ctype is None or encoding is not None: ctype = "application/octet-stream" maintype, subtype = ctype.split("/", 1) if maintype == "text": fp = open(fileToSend) # Note: we should handle calculating the charset attachment = MIMEText(fp.read(), _subtype=subtype) fp.close() elif maintype == "image": fp = open(fileToSend, "rb") attachment = MIMEImage(fp.read(), _subtype=subtype) fp.close() elif maintype == "audio":
async def get_mimeType(name): """ - Check mimeType given file - """ mimeType = guess_type(name)[0] if not mimeType: mimeType = 'text/plain' return mimeType
def sendmail(filename='c:/1.xlsx', receiver=['*****@*****.**'], ccreceiver=[], subject="擎天柱基金资产净值明细表", pwfile='./emailpanna.json'): mailinfo = file2dict(pwfile) sender = mailinfo['sender'] smtpserver = mailinfo['smtpserver'] username = mailinfo['username'] password = mailinfo['password'] # Create message container - the correct MIME type is multipart/alternative. msg = MIMEMultipart('alternative') msg['Subject'] = subject msg['From'] = sender msg['To'] = ';'.join(receiver) msg['Cc'] = ';'.join(ccreceiver) # 定义发送时间(不定义的可能有的邮件客户端会不显示发送时间) msg['date'] = time.strftime('%a, %d %b %Y %H:%M:%S %z') # Create the body of the message (a plain-text and an HTML version). text = "你好,\n 附件是擎天柱基金资产净值明细表,请查收。" html = """\ <html> <head></head> <body> <p>Hi!<br> How are you?<br> Here is the <a href="http://www.python.org">link</a> you wanted. </p> </body> </html> """ # Record the MIME types of both parts - text/plain and text/html. part1 = MIMEText(text, 'plain') part2 = MIMEText(html, 'html') msg.attach(part1) # msg.attach(part2) basename = os.path.basename(filename) ctype, encoding = mimetypes.guess_type(filename) if ctype is None or encoding is not None: # No guess could be made, or the file is encoded (compressed), so # use a generic bag-of-bits type. ctype = 'application/octet-stream' print("none.......") maintype, subtype = ctype.split('/', 1) fp = open(filename, 'rb') part3 = MIMEBase(maintype, subtype) part3.set_payload(fp.read()) encoders.encode_base64(part3) part3.add_header('Content-Disposition', 'attachment', filename=('gbk', '', basename)) msg.attach(part3) try: smtp = smtplib.SMTP() smtp.connect(smtpserver) smtp.login(username, password) smtp.sendmail(sender, receiver + ccreceiver, msg.as_string()) smtp.quit() except Exception as e: print(str(e))
def file_ops(file_path): mime_type = guess_type(file_path)[0] mime_type = mime_type if mime_type else "text/plain" file_name = file_path.split("/")[-1] return file_name, mime_type
async def parse_tanner_response(self, requested_name, detection): content_type = None content = None status_code = 200 headers = {} # Creating a regex object for the pattern of multiple contiguous forward slashes p = re.compile('/+') # Substituting all occurrences of the pattern with single forward slash requested_name = p.sub('/', requested_name) if detection['type'] == 1: query_start = requested_name.find('?') if query_start != -1: requested_name = requested_name[:query_start] if requested_name == '/': requested_name = self.run_args.index_page try: if requested_name[-1] == '/': requested_name = requested_name[:-1] requested_name = unquote(requested_name) file_name = self.meta[requested_name]['hash'] content_type = self.meta[requested_name]['content_type'] except KeyError: status_code = 404 else: path = os.path.join(self.dir, file_name) if os.path.isfile(path): with open(path, 'rb') as fh: content = fh.read() if content_type: if 'text/html' in content_type: content = await self.html_handler.handle_content(content) elif detection['type'] == 2: payload_content = detection['payload'] if payload_content['page']: try: file_name = self.meta[payload_content['page']]['hash'] content_type = self.meta[payload_content['page']]['content_type'] page_path = os.path.join(self.dir, file_name) with open(page_path, encoding='utf-8') as p: content = p.read() except KeyError: content = '<html><body></body></html>' content_type = r'text\html' soup = BeautifulSoup(content, 'html.parser') script_tag = soup.new_tag('div') script_tag.append(BeautifulSoup(payload_content['value'], 'html.parser')) soup.body.append(script_tag) content = str(soup).encode() else: content_type = mimetypes.guess_type(payload_content['value'])[0] content = payload_content['value'].encode('utf-8') if 'headers' in payload_content: headers = payload_content['headers'] else: payload_content = detection['payload'] status_code = payload_content['status_code'] return (content, content_type, headers, status_code)
async def send_file( filename_or_io: Union[FilePath, BytesIO], mimetype: Optional[str]=None, as_attachment: bool=False, attachment_filename: Optional[str]=None, add_etags: bool=True, cache_timeout: Optional[int]=None, conditional: bool=False, last_modified: Optional[datetime]=None, ) -> Response: """Return a Reponse to send the filename given. Arguments: filename_or_io: The filename (path) to send, remember to use :func:`safe_join`. mimetype: Mimetype to use, by default it will be guessed or revert to the DEFAULT_MIMETYPE. as_attachment: If true use the attachment filename in a Content-Disposition attachment header. attachment_filename: Name for the filename, if it differs add_etags: Set etags based on the filename, size and modification time. last_modified: Used to override the last modified value. cache_timeout: Time in seconds for the response to be cached. """ file_body: ResponseBody etag: Optional[str] = None if isinstance(filename_or_io, BytesIO): file_body = current_app.response_class.io_body_class(filename_or_io) else: file_path = file_path_to_path(filename_or_io) if attachment_filename is None: attachment_filename = file_path.name file_body = current_app.response_class.file_body_class(file_path) if last_modified is None: last_modified = datetime.fromtimestamp(file_path.stat().st_mtime) if cache_timeout is None: cache_timeout = current_app.get_send_file_max_age(file_path) etag = "{}-{}-{}".format( file_path.stat().st_mtime, file_path.stat().st_size, adler32(bytes(file_path)), ) if mimetype is None and attachment_filename is not None: mimetype = mimetypes.guess_type(attachment_filename)[0] or DEFAULT_MIMETYPE if mimetype is None: raise ValueError( "The mime type cannot be infered, please set it manually via the mimetype argument." ) response = current_app.response_class(file_body, mimetype=mimetype) if as_attachment: response.headers.add('Content-Disposition', 'attachment', filename=attachment_filename) if last_modified is not None: response.last_modified = last_modified response.cache_control.public = True if cache_timeout is not None: response.cache_control.max_age = cache_timeout response.expires = datetime.utcnow() + timedelta(seconds=cache_timeout) if add_etags and etag is not None: response.set_etag(etag) if conditional: await response.make_conditional(request.range) return response
def content_type(self): return guess_type(self.displayname)[0]
me = 'gslee@localhost' # 내 주소 receiver = ['gslee@localhost', 'jangc@localhost'] # 받을 사람 주소 리스트 subject = '첨부 파일 메일 보내기' outer = MIMEBase('multipart', 'mixed') outer['Subject'] = Header(subject.encode('utf-8'), 'utf-8') outer['From'] = me outer['To'] = ', '.join(receiver) # 수신자 문자열 만들기 outer.preamble = 'This is a multi-part message in MIME format.\n\n' outer.epilogue = '' # 이렇게 하면 멀티파트 경계 다음에 줄 바꿈 코드가 삽입 됨 msg = MIMEText('파일들을 첨부합니다.'.encode('utf-8'), _charset='utf-8') outer.attach(msg) files = glob.glob('*.*') for fileName in files: ctype, encoding = mimetypes.guess_type(fileName) if ctype is None or encoding is not None: ctype = 'application/octet-stream' maintype, subtype = ctype.split('/', 1) if maintype == 'text': fd = open(fileName, encoding='utf-8') # utf-8이라 가정.. msg = MIMEText(fd.read().encode('utf-8'), _subtype=subtype, _charset='utf-8') elif maintype == 'image': fd = open(fileName, 'rb') msg = MIMEImage(fd.read(), _subtype=subtype) elif maintype == 'audio': fd = open(fileName, 'rb') msg = MIMEAudio(fd.read(), _subtype=subtype) else:
def is_image(path): mt, me = mimetypes.guess_type(path) return mt is not None and mt.startswith("image/")
def _GetMimetype(local_path): mime_type, _ = mimetypes.guess_type(local_path) return mime_type or 'application/octet-stream'
def item_enclosure_mime_type(self, item): return mimetypes.guess_type(item.audio_file.path)[0]
def method(self, **kwargs): for name in kwargs.iterkeys(): if name not in argmap: raise TypeError('Got an unexpected keyword argument "%s"' % name) for name in required_params: if name not in kwargs: raise TypeError('Missing required parameter "%s"' % name) for name, regex in pattern_params.iteritems(): if name in kwargs: if re.match(regex, kwargs[name]) is None: raise TypeError( 'Parameter "%s" value "%s" does not match the pattern "%s"' % (name, kwargs[name], regex)) for name, enums in enum_params.iteritems(): if name in kwargs: if kwargs[name] not in enums: raise TypeError( 'Parameter "%s" value "%s" is not an allowed value in "%s"' % (name, kwargs[name], str(enums))) actual_query_params = {} actual_path_params = {} for key, value in kwargs.iteritems(): to_type = param_type.get(key, 'string') # For repeated parameters we cast each member of the list. if key in repeated_params and type(value) == type([]): cast_value = [_cast(x, to_type) for x in value] else: cast_value = _cast(value, to_type) if key in query_params: actual_query_params[argmap[key]] = cast_value if key in path_params: actual_path_params[argmap[key]] = cast_value body_value = kwargs.get('body', None) media_filename = kwargs.get('media_body', None) if self._developerKey: actual_query_params['key'] = self._developerKey headers = {} headers, params, query, body = self._model.request( headers, actual_path_params, actual_query_params, body_value) expanded_url = uritemplate.expand(pathUrl, params) url = urlparse.urljoin(self._baseUrl, expanded_url + query) if media_filename: (media_mime_type, encoding) = mimetypes.guess_type(media_filename) if media_mime_type is None: raise UnknownFileType(media_filename) if not mimeparse.best_match([media_mime_type], ','.join(accept)): raise UnacceptableMimeTypeError(media_mime_type) # Check the maxSize if maxSize > 0 and os.path.getsize(media_filename) > maxSize: raise MediaUploadSizeError(media_filename) # Use the media path uri for media uploads expanded_url = uritemplate.expand(mediaPathUrl, params) url = urlparse.urljoin(self._baseUrl, expanded_url + query) if body is None: headers['content-type'] = media_mime_type # make the body the contents of the file f = file(media_filename, 'rb') body = f.read() f.close() else: msgRoot = MIMEMultipart('related') # msgRoot should not write out it's own headers setattr(msgRoot, '_write_headers', lambda self: None) # attach the body as one part msg = MIMENonMultipart(*headers['content-type'].split('/')) msg.set_payload(body) msgRoot.attach(msg) # attach the media as the second part msg = MIMENonMultipart(*media_mime_type.split('/')) msg['Content-Transfer-Encoding'] = 'binary' f = file(media_filename, 'rb') msg.set_payload(f.read()) f.close() msgRoot.attach(msg) body = msgRoot.as_string() # must appear after the call to as_string() to get the right boundary headers['content-type'] = ( 'multipart/related; ' 'boundary="%s"') % msgRoot.get_boundary() logging.info('URL being requested: %s' % url) return self._requestBuilder(self._http, self._model.response, url, method=httpMethod, body=body, headers=headers, methodId=methodId)
def on_get(self, req, resp, name): resp.content_type = mimetypes.guess_type(name)[0] resp.stream = io.open(self._image_store + '/' + name, 'rb')
def import_compressed_theme_data(self, data): compressed = zipfile.ZipFile(StringIO(data), 'r') paths = compressed.namelist() namespaces = [] main_dir = None for nl in paths: if nl.count('/') == 2: main_dir, nl, _ = nl.split('/') namespaces.append(nl) if not namespaces: raise Exception('No namespaces indentified') else: messages = [] for namespace in namespaces: i = 2 suffix = '' while is_local_theme_namespace(namespace + suffix): suffix = ' ' + str(i) i += 1 while namespace + suffix in self.theme_namespaces: suffix = ' ' + str(i) i += 1 calculated_namespace = namespace + suffix theme = Theme(namespace=calculated_namespace) for p in paths: if p.startswith(main_dir + '/' + namespace + '/templates/') and p.endswith('.body'): filename = p.split(main_dir + '/' + namespace + '/templates/')[1][:-5] theme.body_template_names.append(filename) theme.body_template_contents.append( validated_body_template( zipfile.ZipFile.read(compressed, p))) elif p.startswith(main_dir + '/' + namespace + '/css/') and p.endswith('.css'): filename = p.split(main_dir + '/' + namespace + '/css/')[1] theme.css_filenames.append(filename) theme.css_contents.append( zipfile.ZipFile.read(compressed, p)) elif p.startswith(main_dir + '/' + namespace + '/js/') and p.endswith('.js'): filename = p.split(main_dir + '/' + namespace + '/js/')[1] theme.js_filenames.append(filename) theme.js_contents.append( zipfile.ZipFile.read(compressed, p)) elif p.startswith( main_dir + '/' + namespace + '/images/' ) and p != main_dir + '/' + namespace + '/images/': filename = p.split(main_dir + '/' + namespace + '/images/')[1] content_type, _ = mimetypes.guess_type(filename) data = zipfile.ZipFile.read(compressed, p) handle = files.blobstore.create( mime_type=content_type, _blobinfo_uploaded_filename=filename) with files.open(handle, 'a') as f: f.write(data) files.finalize(handle) key = files.blobstore.get_blob_key(handle) theme.image_filenames.append(filename) theme.image_keys.append(key) key = theme.put() self.theme_keys.append(key) self.theme_namespaces.append(calculated_namespace) self.update() if messages: raise Exception(messages)
msg.attach(img) for to_address in to_addresses: msg["To"] = to_address if args.track: tracking_uuid = create_tracking_uuid(to_address) altered_email_text = inject_tracking_uuid( email_text, tracking_uuid) msg.attach(MIMEText(altered_email_text, 'html', 'utf-8')) else: msg.attach(MIMEText(email_text, 'html', 'utf-8')) if args.attachment_filename is not None: ctype, encoding = mimetypes.guess_type( args.attachment_filename) if ctype is None or encoding is not None: # No guess could be made, or the file is encoded (compressed), so # use a generic bag-of-bits type. ctype = 'application/octet-stream' maintype, subtype = ctype.split('/', 1) with open(args.attachment_filename, "rb") as attachment_file: inner = MIMEBase(maintype, subtype) inner.set_payload(attachment_file.read()) encoders.encode_base64(inner) inner.add_header('Content-Disposition', 'attachment', filename=args.attachment_filename) msg.attach(inner) server.sendmail(args.from_address, to_address, msg.as_string())
req.respond(HTTP_OK, web.ctype) return content try: fctx = webutil.filectx(web.repo, req) except error.LookupError, inst: try: content = manifest(web, req, tmpl) req.respond(HTTP_OK, web.ctype) return content except ErrorResponse: raise inst path = fctx.path() text = fctx.data() mt = mimetypes.guess_type(path)[0] if mt is None: mt = binary(text) and 'application/octet-stream' or 'text/plain' req.respond(HTTP_OK, mt, path, len(text)) return [text] def _filerevision(web, tmpl, fctx): f = fctx.path() text = fctx.data() parity = paritygen(web.stripecount) if binary(text): mt = mimetypes.guess_type(f)[0] or 'application/octet-stream' text = '(binary:%s)' % mt
def on_get(self, req, resp, name): resp.content_type = mimetypes.guess_type(name)[0] try: resp.stream, resp.stream_len = self._image_store.open(name) except IOError: raise falcon.HTTPNotFound()
def stream_individual_file(self, filesystem_path): """ Return a flask response that's streaming the download of an individual file, and gzip compressing it if the browser supports it. """ use_gzip = self.should_use_gzip() # gzip compress the individual file, if it hasn't already been compressed if use_gzip: if filesystem_path not in self.gzip_individual_files: gzip_filename = tempfile.mkstemp("wb+")[1] self._gzip_compress(filesystem_path, gzip_filename, 6, None) self.gzip_individual_files[filesystem_path] = gzip_filename # Make sure the gzip file gets cleaned up when onionshare stops self.cleanup_filenames.append(gzip_filename) file_to_download = self.gzip_individual_files[filesystem_path] filesize = os.path.getsize(self.gzip_individual_files[filesystem_path]) else: file_to_download = filesystem_path filesize = os.path.getsize(filesystem_path) path = request.path # Tell GUI the individual file started history_id = self.cur_history_id self.cur_history_id += 1 # Only GET requests are allowed, any other method should fail if request.method != "GET": return self.web.error405(history_id) self.web.add_request( self.web.REQUEST_INDIVIDUAL_FILE_STARTED, path, {"id": history_id, "filesize": filesize}, ) def generate(): chunk_size = 102400 # 100kb fp = open(file_to_download, "rb") done = False while not done: chunk = fp.read(chunk_size) if chunk == b"": done = True else: try: yield chunk # Tell GUI the progress downloaded_bytes = fp.tell() percent = (1.0 * downloaded_bytes / filesize) * 100 if ( not self.web.is_gui or self.common.platform == "Linux" or self.common.platform == "BSD" ): sys.stdout.write( "\r{0:s}, {1:.2f}% ".format( self.common.human_readable_filesize( downloaded_bytes ), percent, ) ) sys.stdout.flush() self.web.add_request( self.web.REQUEST_INDIVIDUAL_FILE_PROGRESS, path, { "id": history_id, "bytes": downloaded_bytes, "filesize": filesize, }, ) done = False except: # Looks like the download was canceled done = True # Tell the GUI the individual file was canceled self.web.add_request( self.web.REQUEST_INDIVIDUAL_FILE_CANCELED, path, {"id": history_id}, ) fp.close() if self.common.platform != "Darwin": sys.stdout.write("\n") basename = os.path.basename(filesystem_path) r = Response(generate()) if use_gzip: r.headers.set("Content-Encoding", "gzip") r.headers.set("Content-Length", filesize) filename_dict = { "filename": unidecode(basename), "filename*": "UTF-8''%s" % url_quote(basename), } r.headers.set("Content-Disposition", "inline", **filename_dict) r = self.web.add_security_headers(r) (content_type, _) = mimetypes.guess_type(basename, strict=False) if content_type is not None: r.headers.set("Content-Type", content_type) return r
def _get_content_type(filename, real_filename=''): _content_type, encoding = mimetypes.guess_type(real_filename) if not _content_type: _content_type = subprocess.check_output('file -b --mime-type %s' % filename, shell=True) _content_type = re.search(r'([\S]+)', _content_type).group() return _content_type
def do_GET(self): path, query = self.path.split('?', 1) if '?' in self.path else (self.path, "") params = {} content = None skip = False if hasattr(self, "data"): params.update(_urllib.parse.parse_qs(self.data)) if query: params.update(_urllib.parse.parse_qs(query)) for key in params: if params[key]: params[key] = params[key][-1] if path == '/': path = "index.html" path = path.strip('/') extension = os.path.splitext(path)[-1].lower() if hasattr(self, "_%s" % path): content = getattr(self, "_%s" % path)(params) else: path = path.replace('/', os.path.sep) path = os.path.abspath(os.path.join(HTML_DIR, path)).strip() if not os.path.isfile(path) and os.path.isfile("%s.html" % path): path = "%s.html" % path if any((config.IP_ALIASES,)) and self.path.split('?')[0] == "/js/main.js": content = open(path, "rb").read() content = re.sub(r"\bvar IP_ALIASES =.+", "var IP_ALIASES = {%s};" % ", ".join('"%s": "%s"' % (_.split(':', 1)[0].strip(), _.split(':', 1)[-1].strip()) for _ in config.IP_ALIASES), content) self.send_response(_http_client.OK) elif ".." not in os.path.relpath(path, HTML_DIR) and os.path.isfile(path) and (extension not in DISABLED_CONTENT_EXTENSIONS or os.path.split(path)[-1] in CONTENT_EXTENSIONS_EXCLUSIONS): mtime = time.gmtime(os.path.getmtime(path)) if_modified_since = self.headers.get(HTTP_HEADER.IF_MODIFIED_SINCE) if if_modified_since and extension not in (".htm", ".html"): if_modified_since = [_ for _ in if_modified_since.split(';') if _.upper().endswith("GMT")][0] if time.mktime(mtime) <= time.mktime(time.strptime(if_modified_since, HTTP_TIME_FORMAT)): self.send_response(_http_client.NOT_MODIFIED) self.send_header(HTTP_HEADER.CONNECTION, "close") skip = True if not skip: content = open(path, "rb").read() last_modified = time.strftime(HTTP_TIME_FORMAT, mtime) self.send_response(_http_client.OK) self.send_header(HTTP_HEADER.CONNECTION, "close") self.send_header(HTTP_HEADER.CONTENT_TYPE, mimetypes.guess_type(path)[0] or "application/octet-stream") self.send_header(HTTP_HEADER.LAST_MODIFIED, last_modified) # For CSP policy directives see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/ self.send_header(HTTP_HEADER.CONTENT_SECURITY_POLICY, "default-src 'self'; style-src 'self' 'unsafe-inline'; img-src *; " + "script-src 'self' 'unsafe-eval' https://stat.ripe.net; " + "frame-src *; object-src 'none'; block-all-mixed-content;") if extension not in (".htm", ".html"): self.send_header(HTTP_HEADER.EXPIRES, "Sun, 17-Jan-2038 19:14:07 GMT") # Reference: http://blog.httpwatch.com/2007/12/10/two-simple-rules-for-http-caching/ self.send_header(HTTP_HEADER.CACHE_CONTROL, "max-age=3600, must-revalidate") # Reference: http://stackoverflow.com/a/5084555 else: self.send_header(HTTP_HEADER.CACHE_CONTROL, "no-cache") else: self.send_response(_http_client.NOT_FOUND) self.send_header(HTTP_HEADER.CONNECTION, "close") content = b'<!DOCTYPE html><html lang="en"><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested URL %s was not found on this server.</p></body></html>' % self.path.split('?')[0] if content is not None: if isinstance(content, six.text_type): content = content.encode(UNICODE_ENCODING) for match in re.finditer(b"<\\!(\\w+)\\!>", content): name = match.group(1).decode(UNICODE_ENCODING) _ = getattr(self, "_%s" % name.lower(), None) if _: content = self._format(content, **{ name: _() }) if "gzip" in self.headers.get(HTTP_HEADER.ACCEPT_ENCODING): self.send_header(HTTP_HEADER.CONTENT_ENCODING, "gzip") _ = six.BytesIO() compress = gzip.GzipFile("", "w+b", 9, _) compress._stream = _ compress.write(content) compress.flush() compress.close() content = compress._stream.getvalue() self.send_header(HTTP_HEADER.CONTENT_LENGTH, str(len(content))) self.end_headers() try: if content: self.wfile.write(content) self.wfile.flush() except: pass
for name in files: # Create the enclosing (outer) message outer = MIMEMultipart() outer['Subject'] = 'MESSAGE ' + str(cnt) outer['To'] = 'test' + str(cnt) + '@test.local' outer['From'] = '*****@*****.**' outer.preamble = 'MIME mail okuyucuda gozukmez.\n' attach_file = os.path.join(dirpath, name) filename, file_extension = os.path.splitext(attach_file) eml_file = os.path.join(EML_DIR, os.path.basename(filename) + '.eml') if os.path.isfile(attach_file): print(name, ' dosyasindan eml dosyasi', EML_DIR, ' dizininde olusturuluyor..') path = os.path.join(dirpath, name) ctype, encoding = mimetypes.guess_type(path) if ctype is None or encoding is not None: # No guess could be made, or the file is encoded # (compressed), so # use a generic bag-of-bits type. ctype = 'application/octet-stream' maintype, subtype = ctype.split('/', 1) if maintype == 'text': fp = open(path) # Note: we should handle calculating the charset msg = MIMEText(fp.read(), _subtype=subtype) fp.close() elif maintype == 'image': fp = open(path, 'rb') msg = MIMEImage(fp.read(), _subtype=subtype) fp.close()
def send_file_response(rw, message, content_type='text/plain', status=200, status_remark='OK'): # first line rw.write("HTTP/1.1 {} {}\n".format(status, status_remark)) # headers rw.write("content-type: {}\n".format(content_type)) rw.write("content-length: {}\n".format(len(message))) # blank line rw.write("\n") # message body rw.socket.sendall(message) if __name__ == "__main__": listener = socket.socket() listener.bind(("", 8080)) listener.listen(0) while True: (sock, addr) = listener.accept() rw = reader_writer(sock) filename = "Webpage.html" with open(filename, 'rb') as file: data = file.read() mtype = mimetypes.guess_type(filename) print(mtype) send_file_response(rw, data, content_type=mtype) sock.close()
def create_Message_with_attachment(sender, to, cc, subject, message_text_plain, attached_file): message = MIMEMultipart() message['to'] = to message['CC'] = cc message['from'] = sender message['subject'] = subject message.attach(MIMEText(message_text_plain, 'plain')) my_mimetype, encoding = mimetypes.guess_type(attached_file) if my_mimetype is None or encoding is not None: my_mimetype = 'application/octet-stream' main_type, sub_type = my_mimetype.split('/', 1) if main_type == 'text': # print("text") temp = open( attached_file, 'r' ) # 'rb' will send this error: 'bytes' object has no attribute 'encode' attachement = MIMEText(temp.read(), _subtype=sub_type) temp.close() elif main_type == 'image': # print("image") temp = open(attached_file, 'rb') attachement = MIMEImage(temp.read(), _subtype=sub_type) temp.close() elif main_type == 'audio': # print("audio") temp = open(attached_file, 'rb') attachement = MIMEAudio(temp.read(), _subtype=sub_type) temp.close() elif main_type == 'application' and sub_type == 'pdf': temp = open(attached_file, 'rb') attachement = MIMEApplication(temp.read(), _subtype=sub_type) temp.close() else: attachement = MIMEBase(main_type, sub_type) temp = open(attached_file, 'rb') attachement.set_payload(temp.read()) temp.close() encoders.encode_base64(attachement) filename = os.path.basename(attached_file) attachement.add_header('Content-Disposition', 'attachment', filename=filename) # name preview in email message.attach(attachement) message_as_bytes = message.as_bytes( ) # the message should converted from string to bytes. message_as_base64 = base64.urlsafe_b64encode( message_as_bytes) #encode in base64 (printable letters coding) raw = message_as_base64.decode( ) # need to JSON serializable (no idea what does it means) return {'raw': raw}
def route(conn,request): charset = DEFAULT_CHARSET headers = [] filepath = os.path.normpath( os.path.join(ROOT,request.address.strip('/')) ) typ,enc = mimetypes.guess_type(filepath) if typ is None: typ = 'application/octet-stream' filepath = unquote(filepath) print('-'* 10) print(filepath,typ) if request.address == "/": answer = render_html(ROOT) return send_answer(conn, typ="text/html", charset=charset, data=answer) else: # если запрашиваемый ресурс - директория, выводим листинг if os.path.exists(filepath): # если путь - директория if os.path.isdir(filepath): # генерируем html для рендеринга листинга файлов answer = render_html(request.address,charset=DEFAULT_CHARSET) # отправляем данные клиенту (браузеру) send_answer(conn, typ="text/html", charset=DEFAULT_CHARSET, data=answer, headers=[("Cache-Control", "no-cache")] ) # иначе - отображаем ресурс в браузере else: modified = True headers = dict(request.headers) if 'If-Modified-Since' in headers: if not is_modified_since(headers['If-Modified-Since'],filepath): modified = False if 'If-None-Match' in headers: if not is_none_match(headers['If-None-Match'],filepath): modified = False if 'Cache-Control' in headers: max_age = get_params_from_header(headers['Cache-Control'], "max-age", delim=',') if max_age == 0: pass # ?? if not modified: # если ресурс не изменился - отправляем клиенту (браузеру) код 304, # чтобы он взял закэшированный ресурс gmdate = time_to_rfc2616() timetuple = time_last_modified_source(filepath).timetuple() last_modified = time_to_rfc2616(timetuple) headers = [ ("Date", gmdate), ("ETag",etag(filepath)), ("Last-Modified",last_modified) ] return send_answer(conn, status="304 Not Modified", headers=-1 ) # если файл текстовый - определяем кодировку для того, # чтобы браузер мог его правильно отобразить if text_types.match(typ): charset = detect_encoding(filepath) # или выводим диалог сохранения файла else: # добавляем заголовки для показа браузером диалога сохранения файла if not browser_types.match(typ): headers = [ ('Content-Description', 'File Transfer'), ('Content-Transfer-Encoding','binary'), ('Content-Disposition', 'attachment;filename=%s' % quote( os.path.basename(filepath) )) ] charset = None #------------------------------------ data,size = read_file(filepath) timetuple = time_last_modified_source(filepath).timetuple() #print(timetuple) last_modified = time_to_rfc2616(timetuple) # добавляем заголовки клиентского кэширования headers = [] headers.append(("ETag",etag(filepath))) headers.append(("Last-Modified",last_modified)) headers.append(("Cache-Control", # "max-age=%s" % MAX_AGE)) "max-age=%s, must-revalidate" % MAX_AGE)) #"max-age=%s, must-revalidate, private, no-cache" % 600)) # c no-cache не кэширует send_answer(conn, typ=typ, charset=charset, data=data, binary=True, headers=headers ) #------------------------------------- # если файла не существует else: # генерируем html для рендеринга ошибки answer = render_error( charset=charset, title='Ой! Ошибочка вышла...', status_code=404, message='Page Not Found', traceback=filepath) # отправляем данные клиенту (браузеру) send_answer(conn, typ="text/html", status="404 Not Found", charset=charset, data=answer)