def guess_type(self, url, strict=True): (scheme, url) = urllib.parse.splittype(url) if scheme == 'data': comma = url.find(',') if comma < 0: return (None, None) semi = url.find(';', 0, comma) if semi >= 0: type = url[:semi] else: type = url[:comma] if '=' in type or '/' not in type: type = 'text/plain' return (type, None) (base, ext) = posixpath.splitext(url) while ext in self.suffix_map: (base, ext) = posixpath.splitext(base + self.suffix_map[ext]) if ext in self.encodings_map: encoding = self.encodings_map[ext] (base, ext) = posixpath.splitext(base) else: encoding = None types_map = self.types_map[True] if ext in types_map: return (types_map[ext], encoding) if ext.lower() in types_map: return (types_map[ext.lower()], encoding) if strict: return (None, encoding) types_map = self.types_map[False] if ext in types_map: return (types_map[ext], encoding) if ext.lower() in types_map: return (types_map[ext.lower()], encoding) return (None, encoding)
def _extract_file_format(self, url, headers): """ Makes a best guess at the file format. /path/to/a_file.csv has format "CSV" /path/to/a_file.csv.zip has format "CSV / Zip" First this function tries to extract the file-extensions from the url, and deduce the format from there. If no file-extension is found, then the mimetype from the headers is passed to `mimetypes.guess_extension()`. """ formats = [] parsed_url = urlparse.urlparse(url) path = parsed_url.path base, extension = posixpath.splitext(path) while extension: formats.append(extension[1:].upper()) # strip leading '.' from extension base, extension = posixpath.splitext(base) if formats: extension = ".".join(formats[::-1]).lower() format_tuple = ckan_helpers.resource_formats().get(extension) if format_tuple: return format_tuple[1] return " / ".join(formats[::-1]) # No file extension found, attempt to extract format using the mimetype stripped_mimetype = self._extract_mimetype(headers) # stripped of charset format_tuple = ckan_helpers.resource_formats().get(stripped_mimetype) if format_tuple: return format_tuple[1] extension = mimetypes.guess_extension(stripped_mimetype) if extension: return extension[1:].upper()
def _extract_file_format(self, url, headers): """ Makes a best guess at the file format. Returns a list of strings, with formats[0] being the outermost format. If no format can be found, then returns an empty list. /path/to/a_file.csv has format "csv" /path/to/a_file.csv.gz.torrent has format "torrent:gz:csv" (and inner-form "csv") First this function tries to extract the file-extensions from the url, and deduce the format from there. If no file-extension is found, then the mimetype from the headers is passed to `mimetypes.guess_extension()`. """ # Try to extract format from the file extension(s) formats = [] parsed_url = urlparse.urlparse(url) path = parsed_url.path base, extension = posixpath.splitext(path) while extension: formats.append(extension[1:]) # strip leading '.' from extension base, extension = posixpath.splitext(base) if formats: return formats # No file extension found, attempt to extract format using the mimetype stripped_mimetype = self._extract_mimetype(headers) # stripped of charset extension = mimetypes.guess_extension(stripped_mimetype) return [extension[1:]] if extension else []
def check_make_bibliography(engine, filenames): allowed_exts = {'.bst', '.bib', '.aux'} filenames_by_ext = dict( (posixpath.splitext(filename)[1], filename) for filename in filenames ) engine_name = engine.__name__.rsplit('.', 1)[-1] for ext in filenames_by_ext: if ext not in allowed_exts: raise ValueError(ext) with cd_tempdir() as tempdir: copy_files(filenames) bib_name = posixpath.splitext(filenames_by_ext['.bib'])[0] bst_name = posixpath.splitext(filenames_by_ext['.bst'])[0] if not '.aux' in filenames_by_ext: write_aux('test.aux', bib_name, bst_name) filenames_by_ext['.aux'] = 'test.aux' with errors.capture() as captured_errors: # FIXME check error messages engine.make_bibliography(filenames_by_ext['.aux']) result_name = posixpath.splitext(filenames_by_ext['.aux'])[0] + '.bbl' with io.open_unicode(result_name) as result_file: result = result_file.read() correct_result_name = '{0}_{1}.{2}.bbl'.format(bib_name, bst_name, engine_name) correct_result = pkgutil.get_data('pybtex.tests.data', correct_result_name).decode(io.get_default_encoding()) assert result == correct_result, diff(correct_result, result)
def write_latex(outdir,images,prefix,query_image): otex=posixpath.join(outdir,'{}.tex'.format(prefix)) with open(otex,'w') as f: print(r'''\documentclass{article} \usepackage{graphicx} \usepackage{fullpage} \usepackage{paralist} \usepackage{multirow} \usepackage{caption} \usepackage{subcaption} \usepackage{amssymb,amsmath} \usepackage{tikz} \usetikzlibrary{arrows} \begin{document}''',file=f) x=query_image pname=posixpath.join(outdir,'{}query{}'.format(prefix,posixpath.splitext(x)[1])) shutil.copyfile(x,pname) print(r'''\begin{figure}[h] \centering \includegraphics[width=2.0in]{%s} \caption{query} \label{fig:%s} \end{figure}''' % (posixpath.split(pname)[1],prefix+'query'),file=f) print(r'\begin{figure}',file=f) for i,x in enumerate(images): pname=posixpath.join(outdir,'{}{:03}{}'.format(prefix,i,posixpath.splitext(x)[1])) shutil.copyfile(x,pname) print(r'''\begin{minipage}[b]{.5\linewidth} \centering \includegraphics[width=1.0in]{%s} \subcaption{A subfigure}\label{fig:%s} \end{minipage}''' % (posixpath.split(pname)[1],prefix+str(i)),file=f) print(r'\end{figure}',file=f) print(r'''\end{document}''',file=f)
def dnd_get_image(md, image_exts=IMAGE_EXTENSIONS): ''' Get the image in the QMimeData object md. :return: None, None if no image is found QPixmap, None if an image is found, the pixmap is guaranteed not null url, filename if a URL that points to an image is found ''' if dnd_has_image(md): for x in md.formats(): x = unicode(x) if x.startswith('image/'): cdata = bytes(md.data(x)) pmap = QPixmap() pmap.loadFromData(cdata) if not pmap.isNull(): return pmap, None break # No image, look for a URL pointing to an image if md.hasUrls(): urls = [unicode(u.toString()) for u in md.urls()] purls = [urlparse(u) for u in urls] # First look for a local file images = [u2p(x) for x in purls if x.scheme in ('', 'file')] images = [x for x in images if posixpath.splitext(urllib.unquote(x))[1][1:].lower() in image_exts] images = [x for x in images if os.path.exists(x)] p = QPixmap() for path in images: try: with open(path, 'rb') as f: p.loadFromData(f.read()) except: continue if not p.isNull(): return p, None # No local images, look for remote ones # First, see if this is from Firefox rurl, fname = get_firefox_rurl(md, image_exts) if rurl and fname: return rurl, fname # Look through all remaining URLs remote_urls = [x for x in purls if x.scheme in ('http', 'https', 'ftp') and posixpath.splitext(x.path)[1][1:].lower() in image_exts] if remote_urls: rurl = remote_urls[0] fname = posixpath.basename(urllib.unquote(rurl.path)) return urlunparse(rurl), fname return None, None
def guess_type(self, url, strict = True): """Guess the type of a file based on its URL. Return value is a tuple (type, encoding) where type is None if the type can't be guessed (no or unknown suffix) or a string of the form type/subtype, usable for a MIME Content-type header; and encoding is None for no encoding or the name of the program used to encode (e.g. compress or gzip). The mappings are table driven. Encoding suffixes are case sensitive; type suffixes are first tried case sensitive, then case insensitive. The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped to '.tar.gz'. (This is table-driven too, using the dictionary suffix_map.) Optional `strict' argument when False adds a bunch of commonly found, but non-standard types. """ scheme, url = urllib.splittype(url) if scheme == 'data': comma = url.find(',') if comma < 0: return (None, None) semi = url.find(';', 0, comma) if semi >= 0: type = url[:semi] else: type = url[:comma] if '=' in type or '/' not in type: type = 'text/plain' return (type, None) else: base, ext = posixpath.splitext(url) while ext in self.suffix_map: base, ext = posixpath.splitext(base + self.suffix_map[ext]) if ext in self.encodings_map: encoding = self.encodings_map[ext] base, ext = posixpath.splitext(base) else: encoding = None types_map = self.types_map[True] if ext in types_map: return (types_map[ext], encoding) if ext.lower() in types_map: return (types_map[ext.lower()], encoding) if strict: return (None, encoding) types_map = self.types_map[False] if ext in types_map: return (types_map[ext], encoding) if ext.lower() in types_map: return (types_map[ext.lower()], encoding) return (None, encoding) return
def songbook_rename(path, title): """Rename songbook at path to new-style path based off title. Returns new path""" old_path = posixpath.normpath(path) old_path_base = posixpath.splitext(old_path)[0] new_path = posixpath.normpath(c.gen_unique_path('songbooks/%s.xml', title, orig_path=old_path)) new_path_base = posixpath.splitext(new_path)[0] for fn in glob.glob(old_path_base+'.*'): # glob because of comments fn = posixpath.normpath(fn) os.rename(fn, fn.replace(old_path_base, new_path_base)) return new_path
def Extract(path,Result): for Files in Result: try: name=use.splitext(use.basename(Files))[0] os.mkdir(use.join(path,name)) os.chdir(use.join(path,name)) for files in zipfile.ZipFile(Files).namelist(): if use.dirname(files)=='': if use.splitext(files)[1]=='.txt': zipfile.ZipFile(Files).extract(files) os.chdir(path) except OSError:pass
def guess_type(url): """Guess the type of a file based on its URL. Return value is a tuple (type, encoding) where type is None if the type can't be guessed (no or unknown suffix) or a string of the form type/subtype, usable for a MIME Content-type header; and encoding is None for no encoding or the name of the program used to encode (e.g. compress or gzip). The mappings are table driven. Encoding suffixes are case sensitive; type suffixes are first tried case sensitive, then case insensitive. The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped to ".tar.gz". (This is table-driven too, using the dictionary suffix_map). """ if not inited: init() scheme, url = urllib.splittype(url) if scheme == 'data': # syntax of data URLs: # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data # mediatype := [ type "/" subtype ] *( ";" parameter ) # data := *urlchar # parameter := attribute "=" value # type/subtype defaults to "text/plain" comma = string.find(url, ',') if comma < 0: # bad data URL return None, None semi = string.find(url, ';', 0, comma) if semi >= 0: type = url[:semi] else: type = url[:comma] if '=' in type or '/' not in type: type = 'text/plain' return type, None # never compressed, so encoding is None base, ext = posixpath.splitext(url) while suffix_map.has_key(ext): base, ext = posixpath.splitext(base + suffix_map[ext]) if encodings_map.has_key(ext): encoding = encodings_map[ext] base, ext = posixpath.splitext(base) else: encoding = None if types_map.has_key(ext): return types_map[ext], encoding elif types_map.has_key(string.lower(ext)): return types_map[string.lower(ext)], encoding else: return None, encoding
def HTML(path): file_=open(use.join(path,use.basename(path)+'.htm'),"w") Str="<html><title>"+use.basename(path)+"</title><body>" Dict={} for files in os.listdir(path): if use.splitext(files)[1]=='.txt': Dict[int(use.splitext(files)[0])]=files list_=Dict.keys() list_.sort() for files in list_: Str+="<img src="+Dict[files]+">" file_.write(Str+"<br><br><font color='red'>© AUTOMATIC file generator</font></body></center></html>") file_.close()
def _MaybeMarkdown(self, path): if posixpath.splitext(path)[1] != '.html': return path dirname, file_name = posixpath.split(path) if dirname != '': dirname = dirname + '/' file_list = self.file_system.ReadSingle(dirname).Get() if file_name in file_list: return path if posixpath.splitext(file_name)[0] + '.md' in file_list: return posixpath.splitext(path)[0] + '.md' return path
def _MaybeMarkdown(self, path): if posixpath.splitext(path)[1] != ".html": return path dirname, file_name = posixpath.split(path) if dirname != "": dirname = dirname + "/" file_list = self.file_system.ReadSingle(dirname).Get() if file_name in file_list: return path if posixpath.splitext(file_name)[0] + ".md" in file_list: return posixpath.splitext(path)[0] + ".md" return path
def decode_mtl_texture(line, effect, aux_file_loader): texture_data = aux_file_loader(line) if texture_data is None: return (None, None) texture_slug = slugify(posixpath.splitext(line)[0]) texture_path = texture_slug + posixpath.splitext(line)[1] cimage = collada.material.CImage(texture_slug, "./%s" % texture_path) cimage.data = texture_data surface = collada.material.Surface(texture_slug + "-surface", cimage) sampler = collada.material.Sampler2D(texture_slug + "-sampler", surface) _map = collada.material.Map(sampler, "TEX0") effect.params.append(surface) effect.params.append(sampler) return (cimage, _map)
def splitext(path, force_posix=False): """ Return a tuple of strings (basename, extension) for a path. The basename is the file name minus its extension. Return an empty extension string for a directory. A directory is identified by ending with a path separator. Not the same as os.path.splitext. For example: >>> expected = 'path', '.ext' >>> assert expected == splitext('C:\\dir\path.ext') Directories even with dotted names have no extension: >>> import ntpath >>> expected = 'path.ext', '' >>> assert expected == splitext('C:\\dir\\path.ext' + ntpath.sep) >>> expected = 'path.ext', '' >>> assert expected == splitext('/dir/path.ext/') >>> expected = 'file', '.txt' >>> assert expected == splitext('/some/file.txt') Composite extensions for tarballs are properly handled: >>> expected = 'archive', '.tar.gz' >>> assert expected == splitext('archive.tar.gz') """ base_name = EMPTY_STRING extension = EMPTY_STRING if not path: return base_name, extension ppath = as_posixpath(path) name = resource_name(path, force_posix) name = name.strip(POSIX_PATH_SEP + WIN_PATH_SEP) if ppath.endswith(POSIX_PATH_SEP): # directories never have an extension base_name = name extension = EMPTY_STRING elif name.startswith(DOT) and DOT not in name[1:]: # .dot files base name is the full name and they do not have an extension base_name = name extension = EMPTY_STRING else: base_name, extension = posixpath.splitext(name) # handle composed extensions of tar.gz, bz, zx,etc if base_name.endswith(b'.tar' if on_linux else '.tar'): base_name, extension2 = posixpath.splitext(base_name) extension = extension2 + extension return base_name, extension
def unzip(f): z = mkZipFileRd(f) names = z.namelist() if len(names) != 1: raise IOError('more than one item in zip file; which to use? %s' % names) # noqa member = names[0] log.info('extracting %s from %s', member, f) # x.zip -> x -> x # x.db.zip -> x.db -> x destdir = splitext(splitext(f)[0])[0] dest = destdir + '.db' z.extract(member, destdir) rename(path_join(destdir, member), dest) rmdir(destdir) return dest
def minify_sources(sources, ext, fs_root='', timestamp=False): """Use utilities to minify javascript or css. :param sources: Paths of source files :param ext: Type of files :param fs_root: root of file (normally public dir) :type sources: string :type ext: js or css :type fs_root: string :returns: List of paths to minified sources """ if 'js' in ext: js_minify = JavascriptMinify() minified_sources = [] for source in sources: # generate full path to source no_ext_source = path.splitext(source)[0] full_source = path.join(fs_root, (no_ext_source + ext).lstrip('/')) # generate minified source path full_source = path.join(fs_root, (source).lstrip('/')) no_ext_full_source = path.splitext(full_source)[0] minified = no_ext_full_source + ext f_minified_source = open(minified, 'w') try: # minify js source (read stream is auto-closed inside) if 'js' in ext: js_minify.minify(open(full_source, 'r'), f_minified_source) # minify css source if 'css' in ext: sheet = cssutils.parseFile(full_source) cssutils.setSerializer(CSSUtilsMinificationSerializer()) cssutils.ser.prefs.useMinified() f_minified_source.write(sheet.cssText) finally: f_minified_source.close() if no_ext_source.endswith('COMBINED'): minified_sources.append(no_ext_source + ext) else: minified_sources.append(no_ext_source + generate_timestamp(timestamp) + ext) return minified_sources
def guess_type(self, path): """Guess the type of a file. Argument is a PATH (a filename). Return value is a string of the form type/subtype, usable for a MIME Content-type header. The default implementation looks the file's extension up in the table self.extensions_map, using application/octet-stream as a default; however it would be permissible (if slow) to look inside the data to make a better guess. """ base, ext = posixpath.splitext(path) if ext in self.extensions_map: return self.extensions_map[ext] ext = ext.lower() if ext in self.extensions_map: return self.extensions_map[ext] else: return self.extensions_map['']
def _CompileContent(self, path, text): assert text is not None, path try: _, ext = posixpath.splitext(path) mimetype = _MIMETYPE_OVERRIDES.get(ext, mimetypes.guess_type(path)[0]) if ext == '.md': # See http://pythonhosted.org/Markdown/extensions # for details on "extensions=". content = markdown(ToUnicode(text), extensions=('extra', 'headerid', 'sane_lists')) mimetype = 'text/html' if self._supports_templates: content = Motemplate(content, name=path) elif mimetype is None: content = text mimetype = 'text/plain' elif mimetype == 'text/html': content = ToUnicode(text) if self._supports_templates: content = Motemplate(content, name=path) elif (mimetype.startswith('text/') or mimetype in ('application/javascript', 'application/json')): content = ToUnicode(text) else: content = text return ContentAndType(content, mimetype, self.file_system.Stat(path).version) except Exception as e: logging.warn('In file %s: %s' % (path, e.message)) return ContentAndType('', mimetype, self.file_system.Stat(path).version)
def _StartClient(vm, server_ip, client_thread_count): """Pushes and starts the client workload script. Args: vm: The client VM. server_ip: The server's ip address. client_thread_count: The client thread count used for this particular run. Returns: Stdout from CLIENT_SCRIPT Raises: ClientWorkloadScriptExecutionError: if an error occurred during execution of CLIENT_SCRIPT (detected by looking at stderr). """ stdout, stderr = vm.RemoteCommand( 'python {0} --server={1}:{2} --image_directory={3} ' '--runtime={4} --num_threads={5}'.format( posixpath.join(INSTALL_DIR, CLIENT_SCRIPT), server_ip, SERVER_PORT, posixpath.join(INSTALL_DIR, posixpath.splitext(ILSVRC_VALIDATION_IMAGES_TAR)[0]), FLAGS.tf_serving_runtime, client_thread_count), should_log=True) # Ensure that stderr from the client script is empty. # If it is, stderr from the remote command should contain a single line: # Warning: Permanently added {ip} (ECDSA) to the list of known hosts. if len(stderr.splitlines()) > 1: raise ClientWorkloadScriptExecutionError( 'Exception occurred during execution of client script: {0}'.format( stderr)) return stdout
def _get(self, key): if not self._filter(key): raise KeyError(key) try: path = self.frs.joinpath(self.vpath, key) st = self.frs.stat(path) except OSError: raise KeyError(key) if stat.S_ISDIR(st.st_mode): obj = Folder(self.frs, path) elif stat.S_ISREG(st.st_mode): ext = posixpath.splitext(path)[1].lower() if ext in [".gif", ".bmp", ".jpg", ".jpeg", ".png"]: obj = Image(self.frs, path) elif ext in [".html", ".htm", ".rst", ".md"]: obj = Page(self.frs, path) else: obj = File(self.frs, path) else: raise KeyError(key) obj.__parent__ = self obj.__name__ = key return obj
def IsVirus(mi, log): """Test: a virus is any message with an attached executable I've also noticed the viruses come in as wav and midi attachements so I trigger on those as well. This is a very paranoid detector, since someone might send me a binary for valid reasons. I white-list everyone who's sent me email before so it doesn't affect me. """ for part in mi.msg.walk(): if part.get_main_type() == 'multipart': continue filename = part.get_filename() if filename is None: if part.get_type() in ["application/x-msdownload", "audio/x-wav", "audio/x-midi"]: log.pass_test(VIRUS) return ("it has a virus-like content-type (%s)" % part.get_type()) else: extensions = "bat com exe pif ref scr vbs wsh".split() base, ext = posixpath.splitext(filename) if ext[1:].lower() in extensions: log.pass_test(VIRUS) return "it has a virus-like attachment (%s)" % ext[1:] return False
def _get(self, key): key = key.encode('utf-8') # key is unicode by default if not self._filter(key): raise KeyError(key) try: path = self.frs.joinpath(self.vpath, key) st = self.frs.stat(path) except OSError: raise KeyError(key) if stat.S_ISDIR(st.st_mode): obj = Folder(self.frs_name, path) elif stat.S_ISREG(st.st_mode): obj = File(self.frs_name, path) ext = posixpath.splitext(path)[1].lower() if ext in ['.gif', '.bmp', '.jpg', '.jpeg', '.png']: alsoProvides(obj, IFRSImage) elif ext in ['.html', '.htm', '.txt', '.rst']: alsoProvides(obj, IFRSDocument) else: metadata = obj.metadata dc_type = metadata.get('main', {}).get('contenttype', '') if dc_type == 'Document': alsoProvides(obj, IFRSDocument) elif dc_type == 'Image': alsoProvides(obj, IFRSImage) else: raise KeyError(key) obj.__parent__ = self obj.__name__ = key return obj
def splitext(path): """Like os.path.splitext, but take off .tar too""" base, ext = posixpath.splitext(path) if base.lower().endswith('.tar'): ext = base[-4:] + ext base = base[:-4] return base, ext
def render(self, context): """ Build the filepath by appending the extension. """ module_path = self.path.resolve(context) if not settings.SYSTEMJS_ENABLED: if settings.SYSTEMJS_DEFAULT_JS_EXTENSIONS: name, ext = posixpath.splitext(module_path) if not ext: module_path = '{}.js'.format(module_path) if settings.SYSTEMJS_SERVER_URL: tpl = """<script src="{url}{app}" type="text/javascript"></script>""" else: tpl = """<script type="text/javascript">System.import('{app}');</script>""" return tpl.format(app=module_path, url=settings.SYSTEMJS_SERVER_URL) # else: create a bundle rel_path = System.get_bundle_path(module_path) url = staticfiles_storage.url(rel_path) tag_attrs = {'type': 'text/javascript'} for key, value in self.tag_attrs.items(): if not isinstance(value, bool): value = value.resolve(context) tag_attrs[key] = value return """<script{attrs} src="{url}"></script>""".format( url=url, attrs=flatatt(tag_attrs) )
def WriteSourceVariables(out, target, project): # gn separates the sheep from the goats based on file extensions. # A full separation is done here because of flag handing (see Compile flags). source_types = {'cxx':[], 'c':[], 'asm':[], 'obj':[], 'obj_target':[], 'input':[], 'other':[]} # TODO .def files on Windows for source in target.properties.get('sources', []): _, ext = posixpath.splitext(source) source_abs_path = project.GetAbsolutePath(source) source_types[source_file_types.get(ext, 'other')].append(source_abs_path) for input_path in target.properties.get('inputs', []): input_abs_path = project.GetAbsolutePath(input_path) source_types['input'].append(input_abs_path) # OBJECT library dependencies need to be listed as sources. # Only executables and non-OBJECT libraries may reference an OBJECT library. # https://gitlab.kitware.com/cmake/cmake/issues/14778 if target.gn_type in gn_target_types_that_absorb_objects: object_dependencies = set() project.GetObjectSourceDependencies(target.gn_name, object_dependencies) for dependency in object_dependencies: cmake_dependency_name = GetCMakeTargetName(dependency) obj_target_sources = '$<TARGET_OBJECTS:' + cmake_dependency_name + '>' source_types['obj_target'].append(obj_target_sources) sources = {} for source_type, sources_of_type in source_types.items(): if sources_of_type: sources[source_type] = '${target}__' + source_type + '_srcs' SetVariableList(out, sources[source_type], sources_of_type) return sources
def get_type(self): """ Guess type according to the file extension. Might not be the best way how to do it, but it works for now. Items can be of type: - ITEM_UNKNOWN = 0 - ITEM_IMAGE = 1 - ITEM_STYLE = 2 - ITEM_SCRIPT = 3 - ITEM_NAVIGATION = 4 - ITEM_VECTOR = 5 - ITEM_FONT = 6 - ITEM_VIDEO = 7 - ITEM_AUDIO = 8 - ITEM_DOCUMENT = 9 We map type according to the extensions which are defined in ebooklib.EXTENSIONS. :Returns: Returns type of the item as number. """ _, ext = zip_path.splitext(self.get_name()) ext = ext.lower() for uid, ext_list in six.iteritems(ebooklib.EXTENSIONS): if ext in ext_list: return uid return ebooklib.ITEM_UNKNOWN
def organization_avatar_upload(instance, filename): import posixpath import uuid _, file_ext = posixpath.splitext(filename) final_filename = uuid.uuid4().hex + file_ext return posixpath.join('avatars', 'organization', instance.name, final_filename)
def compute_fingerprint(self, path, filename): normalized_filename = posixpath.basename(filename).lower() date_bytes = str(self.extract_date(path)).encode('utf8') filename_root, filename_ext = posixpath.splitext(normalized_filename) #@UnusedVariable return hashlib.md5(normalized_filename.encode('utf8') + date_bytes).digest()[:4] + filename_root[-2:].encode('utf8')
def _launch_simulations(self, remote_filepaths): jobids = set() try: ftp = self._client.open_sftp() for remote_filepath in remote_filepaths: bsub_data = self._create_bsub(remote_filepath) bsub_filepath = posixpath.splitext(remote_filepath)[0] + '.lsf' ftp.open(bsub_filepath, 'w').write(bsub_data) command = 'bsub < %s' % bsub_filepath _, stdout, _ = self._client.exec_command(command) logging.debug('"%s" sent', command) stdout = list(stdout) if not stdout: raise IOError("Problem in submitting job") jobid = int(self._JOBID_PATTERN.findall(stdout[0])[0]) logging.debug("jobid: %i", jobid) jobids.add(jobid) self._jobids.add(jobid) # Add to allow job kill on exception finally: ftp.close() return jobids
def guess_type(self, path): """Guess the type of a file. Argument is a PATH (a filename). Return value is a string of the form type/subtype, usable for a MIME Content-type header. The default implementation looks the file's extension up in the table self.extensions_map, using application/octet-stream as a default; however it would be permissible (if slow) to look inside the data to make a better guess. """ base, ext = posixpath.splitext(path) if ext in self.extensions_map: return self.extensions_map[ext] ext = ext.lower() if ext in self.extensions_map: return self.extensions_map[ext] guess, _ = mimetypes.guess_type(path) if guess: return guess return 'application/octet-stream'
def file_ext(self) -> str: return posixpath.splitext(self._file_path)[1].lower()
def get_dependent_url(url_path, suffix, ext=None): url_directory, url_filename = posixpath.split(url_path) url_base, url_ext = posixpath.splitext(url_filename) if ext is None: ext = url_ext return posixpath.join(url_directory, url_base + u'@' + suffix + ext)
def run(self, nodes, master, user, user_shell, volumes): log.info("Installing mysql-cluster-server on all nodes...") for node in nodes: self.pool.simple_job(self._install_mysql_cluster, (node), jobid=node.alias) self.pool.wait(len(nodes)) mconn = master.ssh mconn.execute('rm -f /usr/mysql-cluster/*') # Get IPs for all nodes self.mgm_ip = master.private_ip_address if not self._dedicated_query: self.storage_ips = [x.private_ip_address for x in nodes[1:]] self.query_ips = self.storage_ips self.data_nodes = nodes[1:] self.query_nodes = nodes else: self.data_nodes = nodes[1:self._num_data_nodes + 1] self.query_nodes = nodes[self._num_data_nodes + 1:] self.query_nodes.append(master) self.storage_ips = [x.private_ip_address for x in self.data_nodes] self.query_ips = [x.private_ip_address for x in self.query_nodes] # Create backup dir and change ownership of mysql-cluster dir log.info('Backing up and stopping all mysql processes on all nodes') for node in nodes: self.pool.simple_job(self._backup_and_reset, (node), jobid=node.alias) self.pool.wait(len(nodes)) # Generate and place ndb_mgmd configuration file log.info('Generating ndb_mgmd.cnf...') ndb_mgmd = mconn.remote_file('/etc/mysql/ndb_mgmd.cnf') ndb_mgmd.write(self.generate_ndb_mgmd()) ndb_mgmd.close() # Generate and place my.cnf configuration file on each data node log.info('Generating my.cnf on all nodes') for node in nodes: self.pool.simple_job(self._write_my_cnf, (node), jobid=node.alias) self.pool.wait(len(nodes)) # Restart mysql-ndb-mgm on master log.info('Restarting mysql-ndb-mgm on master node...') mconn.execute('/etc/init.d/mysql-ndb-mgm restart') # Start mysqld-ndb on data nodes log.info('Restarting mysql-ndb on all data nodes...') for node in self.data_nodes: self.pool.simple_job(node.ssh.execute, ('/etc/init.d/mysql-ndb restart'), jobid=node.alias) self.pool.wait(len(self.data_nodes)) # Start mysql on query nodes log.info('Starting mysql on all query nodes') for node in self.query_nodes: self.pool.simple_job(node.ssh.execute, ('/etc/init.d/mysql restart'), dict(ignore_exit_status=True), jobid=node.alias) self.pool.wait(len(self.query_nodes)) # Import sql dump dump_file = self._dump_file dump_dir = '/mnt/mysql-cluster-backup' if posixpath.isabs(self._dump_file): dump_dir, dump_file = posixpath.split(self._dump_file) else: log.warn("%s is not an absolute path, defaulting to %s" % (self._dump_file, posixpath.join(dump_dir, dump_file))) name, ext = posixpath.splitext(dump_file) sc_path = posixpath.join(dump_dir, name + '.sc' + ext) orig_path = posixpath.join(dump_dir, dump_file) if not mconn.isdir(dump_dir): log.info("Directory %s does not exist, creating..." % dump_dir) mconn.makedirs(dump_dir) if mconn.isfile(sc_path): mconn.execute('mysql < %s' % sc_path) elif mconn.isfile(orig_path): mconn.execute('mysql < %s' % orig_path) else: log.info('No dump file found, not importing.') log.info('Adding MySQL dump cronjob to master node') cronjob = self.generate_mysqldump_crontab(sc_path) mconn.remove_lines_from_file('/etc/crontab', '#starcluster-mysql') crontab_file = mconn.remote_file('/etc/crontab', 'a') crontab_file.write(cronjob) crontab_file.close() log.info('Management Node: %s' % master.alias) log.info('Data Nodes: \n%s' % '\n'.join([x.alias for x in self.data_nodes])) log.info('Query Nodes: \n%s' % '\n'.join([x.alias for x in self.query_nodes]))
def is_ok(path): ext = posixpath.splitext(path)[1][1:].lower() if allow_all_extensions and ext and ext not in filter_exts: return True return ext in exts and ext not in filter_exts
def __setup(self): """Construct the series of shell commands, i.e., fill in self.__commands""" if self.__url: # Set the name of the tarball, untarred package directory, and # source directory inside the extracted directory tarball = posixpath.basename(self.__url) match = re.search(r'(.*)(?:(?:\.tar)|(?:\.tar\.gz)|(?:\.tgz)|(?:\.tar\.bz2)|(?:\.tar\.xz))$', tarball) if match: pkgdir = match.group(1) else: raise RuntimeError('unrecognized package format') # directory containing the unarchived package if self.__directory: if posixpath.isabs(self.__directory): directory = self.__directory else: directory = posixpath.join(self.__wd, self.__directory) else: directory = posixpath.join(self.__wd, pkgdir) # Download source from web self.__commands.append(self.download_step(url=self.__url, directory=self.__wd)) # Untar source package self.__commands.append(self.untar_step( tarball=posixpath.join(self.__wd, tarball), directory=self.__wd)) if self.__repository: # Clone git repository self.__commands.append(self.clone_step( branch=self.__branch, commit=self.__commit, path=self.__wd, repository=self.__repository)) directory = posixpath.join(self.__wd, posixpath.splitext( posixpath.basename(self.__repository))[0]) # Preconfigure setup if self.__preconfigure: # Assume the preconfigure commands should be run from the # source directory self.__commands.append('cd {}'.format(directory)) self.__commands.extend(self.__preconfigure) # Configure environment = [] if self.__environment: for key, val in sorted(self.__environment.items()): environment.append('{0}={1}'.format(key, val)) self.__commands.append(self.configure_step( build_directory=self.__build_directory, directory=directory, environment=environment, toolchain=self.__toolchain)) # Build if self.__make: self.__commands.append(self.build_step()) # Check the build if self.__check: self.__commands.append(self.check_step()) # Install if self.__install: self.__commands.append(self.install_step()) if self.__postinstall: # Assume the postinstall commands should be run from the # install directory self.__commands.append('cd {}'.format(self.prefix)) self.__commands.extend(self.__postinstall) # Cleanup # Cleanup remove = [directory] if self.__url: remove.append(posixpath.join(self.__wd, tarball)) if self.__build_directory: if posixpath.isabs(self.__build_directory): remove.append(self.__build_directory) self.__commands.append(self.cleanup_step(items=remove))
def _locate_code(self, zf, path_to_zip_file): names = [ x if isinstance(x, unicode) else x.decode('utf-8') for x in zf.namelist() ] names = [x[1:] if x[0] == '/' else x for x in names] plugin_name = None for name in names: name, ext = posixpath.splitext(name) if name.startswith('plugin-import-name-') and ext == '.txt': plugin_name = name.rpartition('-')[-1] if plugin_name is None: c = 0 while True: c += 1 plugin_name = 'dummy%d' % c if plugin_name not in self.loaded_plugins: break else: if self._identifier_pat.match(plugin_name) is None: raise InvalidPlugin( ('The plugin at %r uses an invalid import name: %r' % (path_to_zip_file, plugin_name))) pynames = [x for x in names if x.endswith('.py')] candidates = [ posixpath.dirname(x) for x in pynames if x.endswith('/__init__.py') ] candidates.sort(key=lambda x: x.count('/')) valid_packages = set() for candidate in candidates: parts = candidate.split('/') parent = '.'.join(parts[:-1]) if parent and parent not in valid_packages: continue valid_packages.add('.'.join(parts)) names = OrderedDict() for candidate in pynames: parts = posixpath.splitext(candidate)[0].split('/') package = '.'.join(parts[:-1]) if package and package not in valid_packages: continue name = '.'.join(parts) names[name] = zf.getinfo(candidate) # Legacy plugins if '__init__' not in names: for name in list(names.iterkeys()): if '.' not in name and name.endswith('plugin'): names['__init__'] = names[name] break if '__init__' not in names: raise InvalidPlugin( ('The plugin in %r is invalid. It does not ' 'contain a top-level __init__.py file') % path_to_zip_file) with self._lock: self.loaded_plugins[plugin_name] = (path_to_zip_file, names) return plugin_name
def splitext(npath): return posixpath.splitext(npath)
def load_team(self, team_url, team_name): team_d = self.visit_url(team_url) # load team data team, created = Team.objects.get_or_create(tournament=self.tournament, name=team_name, slug=slugify(team_name), defaults={'rank': 1}) print(created, file=self.stdout) photo_url = team_d.cssselect('.content-section-1 img')[0].get('src') filename = slugify(team_name) + posixpath.splitext(photo_url)[1] team.photo.save(filename, ContentFile(urllib2.urlopen(photo_url).read())) charity_p = team_d.cssselect('.content-section-3 p')[ 0 if self.tournament.slug == "starcraft-2-season-1" else 1] charity_name = charity_p.cssselect('a')[0].text charity, created = Charity.objects.get_or_create(name=charity_name) charity.link = charity_p.cssselect('a')[0].get('href') if not charity.desc: print("charity desc") charity.desc = charity_p.text_content().strip()[ 2:] # "".join(list(charity_p.itertext())[1:]) if not charity.logo: try: charity_photo_url = team_d.cssselect( '.content-section-4 img')[0].get('src') filename = slugify(charity_name) + posixpath.splitext( charity_photo_url)[1] charity.logo.save( filename, ContentFile(urllib2.urlopen(charity_photo_url).read())) except IndexError: print( "{team} did not have expected image section for charity, leaving blank" .format(team=team_name), file=self.stderr) charity.full_clean() charity.save() team.charity = charity # load profiles of members for member_li in team_d.cssselect("ul.player-list-1.cf li"): char_name = member_li.cssselect("h2")[0].text member_a = member_li.cssselect("a")[0] member_url = member_a.get("href") player = self.load_player(member_url, team, char_name) if player: profile, membership = player member_thumbnail_url = member_a.cssselect('img')[0].get('src') if member_thumbnail_url != self.unknown_photo: filename = slugify(profile.name) + posixpath.splitext( member_thumbnail_url)[1] profile.custom_thumb.save( filename, ContentFile( urllib2.urlopen(member_thumbnail_url).read())) team.full_clean() team.save() return team
def splitextTest(self, path: str, filename: str, ext: str) -> None: self.assertEqual(posixpath.splitext(path), (filename, ext)) self.assertEqual(posixpath.splitext("/" + path), ("/" + filename, ext)) self.assertEqual(posixpath.splitext("abc/" + path), ("abc/" + filename, ext)) self.assertEqual(posixpath.splitext("abc.def/" + path), ("abc.def/" + filename, ext)) self.assertEqual(posixpath.splitext("/abc.def/" + path), ("/abc.def/" + filename, ext)) self.assertEqual(posixpath.splitext(path + "/"), (filename + ext + "/", "")) pathb = bytes(path, "ASCII") filenameb = bytes(filename, "ASCII") extb = bytes(ext, "ASCII") self.assertEqual(posixpath.splitext(pathb), (filenameb, extb)) self.assertEqual(posixpath.splitext(b"/" + pathb), (b"/" + filenameb, extb)) self.assertEqual(posixpath.splitext(b"abc/" + pathb), (b"abc/" + filenameb, extb)) self.assertEqual(posixpath.splitext(b"abc.def/" + pathb), (b"abc.def/" + filenameb, extb)) self.assertEqual(posixpath.splitext(b"/abc.def/" + pathb), (b"/abc.def/" + filenameb, extb)) self.assertEqual(posixpath.splitext(pathb + b"/"), (filenameb + extb + b"/", b""))
def guess_type(self, path): base, ext = posixpath.splitext(path) if not mimetypes.inited: # pragma: no cover mimetypes.init() return mimetypes.types_map.get(ext, 'application/octet-stream')
def getObj(self): f = self.filename if f and splitext(f)[-1] in self._allowed_exts: return self if self._imgobj else None
def load_player(self, member_url, team, char_name=None): """ Loads player and team membership data, and adds as member to team. Return profile, membership """ try: member_d = self.visit_url(member_url) except IOError: profile_name = " ".join( (word.capitalize() for word in member_url.strip("/").split("/")[-1].split("-"))) print( "Page not found, constructing from {0} name and {1} charname". format(profile_name, char_name)) # create profile and membership profile, created = Profile(name=profile_name, user=self.master_user), True profile.save() membership = TeamMembership(team=team, profile=profile, char_name=char_name, active=False) membership.save() return profile, membership if "Player not found in database" in tostring(member_d): print("Player not found...skipping", file=self.stdout) return info_ps = member_d.cssselect('.content-section-1 p') info_h3s = member_d.cssselect('.content-section-1 h3') profile_name = info_ps[1].text if char_name is None: char_name = info_ps[4].text if "." in char_name: char_name = char_name.split(".", 1)[0] if Profile.objects.filter(name=profile_name).count(): profile, created = Profile.objects.get(name=profile_name), False membership, membership_created = TeamMembership.objects.get_or_create( team=team, profile=profile, defaults={'char_name': char_name}) membership.char_name = char_name else: try: membership = TeamMembership.objects.get(team=team, char_name=char_name) profile, created = membership.profile, False except TeamMembership.DoesNotExist: # create profile and membership profile, created = Profile(name=profile_name, user=self.master_user), True profile.save() membership = TeamMembership(team=team, profile=profile, char_name=char_name) print(created, file=self.stdout) profile.name = profile_name member_photo_url = info_ps[0].cssselect('img')[0].get('src') if member_photo_url != self.unknown_photo: filename = slugify( profile.name) + posixpath.splitext(member_photo_url)[1] profile.photo.save( filename, ContentFile(urllib2.urlopen(member_photo_url).read())) if info_ps[3].text: profile.title = info_ps[3].text if info_ps[-1].text: # deal with blank race if info_h3s[-1].text.strip().lower() == "champion": membership.champion = info_ps[-1].text.strip() else: try: membership.race = info_ps[-1].text[0].upper() membership.clean_fields() except ValidationError: membership.race = None try: membership.questions_answers = tostring( member_d.cssselect('div.content-section-2 ol')[0]) except: pass try: profile.full_clean() except ValidationError as e: print("Profile did not validate! {profile_name} {e}".format( profile_name=profile_name.encode('ascii', 'ignore'), e=e), file=self.stderr) else: profile.save() membership.save() return profile, membership
def load_match(self, match_url, week=None): match_d = self.visit_url(match_url) if not match_d.cssselect('a.first-title'): print("Not a real match....skipping", file=self.stderr) return home_team = Team.objects.get(slug=slugify( match_d.cssselect('a.first-title')[0].text.strip()), tournament=self.tournament) away_team = Team.objects.get(slug=slugify( match_d.cssselect('a.second-title')[0].text.strip()), tournament=self.tournament) if week is None: week = int( re.search('week[^/]*([\d]+)[^/]*/', match_url).group(1)) - 1 print("{0} week".format(week), file=self.stdout) creation_date = self.first_week_match + self.a_week * week round = self.find_round(home_team, away_team, creation_date) match, match_created = Match.objects.get_or_create( home_team=home_team, away_team=away_team, creation_date=creation_date, tournament=self.tournament, defaults={'tournament_round': round}) match.published = True match.publish_date = match.creation_date + self.a_week + datetime.timedelta( days=5) match.home_submitted = True match.away_submitted = True match.save(notify=False) # add games for order, game_li in enumerate(match_d.cssselect('li.cf'), start=1): map = " ".join( game_li.cssselect('.video-link-container h3')[0].text.split() [3:]) map = self.coerse_mapname(map.strip()) # Map creation map, created = Map.objects.get_or_create(name=map) if created or not map.photo: print("Created map {name}".format( name=map.name.encode('ascii', 'ignore')), file=self.stdout) map_photo_url = game_li.cssselect('a.video-link > img')[0].get( 'src') filename = slugify( map.name) + posixpath.splitext(map_photo_url)[1] map.photo.save( filename, ContentFile(urllib2.urlopen(map_photo_url).read())) map.full_clean() map.save() self.tournament.map_pool.add(map) # Game creation game, game_created = Game.objects.get_or_create( match=match, order=order, defaults={"map": map}) game.map = map # just assure the current coersed version #if game_created: # print("Created game {order}".format(order=order), file=self.stdout) # only load players if players play individual games if not self.options['whole_team']: home_player = game_li.cssselect( '.video-player-link-container h3')[0].text away_player = game_li.cssselect( '.video-player-link-container.last h3')[0].text home_player_url = game_li.cssselect( '.video-player-link-container a')[0].get('href') away_player_url = game_li.cssselect( '.video-player-link-container.last a')[0].get('href') members = ("home", "away") for team, char_name, url, member in zip( (match.home_team, match.away_team), (home_player, away_player), (home_player_url, away_player_url), members): try: setattr( game, "_".join((member, "player")), TeamMembership.objects.get( team=team, char_name__iexact=char_name)) except TeamMembership.DoesNotExist: if char_name == "???" or "#" in url: if char_name != "???": print("Player {0} not found...ignoring".format( char_name), file=self.stderr) else: profile, membership = self.load_player( url, team, char_name) setattr(game, "_".join((member, "player")), membership) vod = game_li.cssselect( '.video-link-container > a.video-link')[0].get('href') if vod and not "afterhoursgaming.tv" in vod: game.vod = vod replay_a = game_li.cssselect('.video-link-container > p > a') if replay_a: self.save_replay(game, replay_a) if game.order == 5: game.is_ace = True game.save() match.save(notify=False) return match
def download_step(self, allow_unknown_filetype=True, recursive=False, unpack=True, wd=hpccm.config.g_wd): """Get source code""" if not self.repository and not self.package and not self.url: raise RuntimeError('must specify a package, repository, or a URL') if sum([bool(self.package), bool(self.repository), bool(self.url)]) > 1: raise RuntimeError( 'must specify exactly one of a package, repository, or a URL') # Check if the caller inherits from the annotate template annotate = getattr(self, 'add_annotation', None) commands = [] if self.url: # Download package commands.append(hpccm.templates.wget().download_step(url=self.url, directory=wd)) if unpack: commands.append( self.__unpack( self.url, wd, allow_unknown_filetype=allow_unknown_filetype)) if callable(annotate): self.add_annotation('url', self.url) elif self.package: # Use an already available package if unpack: commands.append( self.__unpack( self.package, wd, allow_unknown_filetype=allow_unknown_filetype)) if callable(annotate): self.add_annotation('package', self.package) elif self.repository: # Clone git repository commands.append(hpccm.templates.git().clone_step( branch=self.branch, commit=self.commit, path=wd, recursive=recursive, repository=self.repository)) # Set directory where to find source self.src_directory = posixpath.join( wd, posixpath.splitext(posixpath.basename(self.repository))[0]) # Add annotations if callable(annotate): self.add_annotation('repository', self.repository) if self.branch: self.add_annotation('branch', self.branch) if self.commit: self.add_annotation('commit', self.commit) if hpccm.config.g_ctype == container_type.DOCKER: return ' && \\\n '.join(commands) elif hpccm.config.g_ctype == container_type.SINGULARITY: return '\n '.join(commands) elif hpccm.config.g_ctype == container_type.BASH: return '\n'.join(commands) else: raise RuntimeError('Unknown container type')
def nameonly(s): import posixpath return posixpath.splitext(posixpath.split(s)[1])[0]
def calculate_canonical_name(self): name_without_ext = posixpath.splitext(self.doc.name)[0] return "%s%s" % (name_without_ext, self.ext)
def printindex(outfilename,headfilename,levels,titles,tables): # Read in the header file headbuf = '' if posixpath.exists(headfilename) : try: fd = open(headfilename,'r') except: print 'Error reading file',headfilename exit() headbuf = fd.read() headbuf = replace(headbuf,'PETSC_DIR','../../../') fd.close() else: print 'Header file \'' + headfilename + '\' does not exist' # Now open the output file. try: fd = open(outfilename,'w') except: print 'Error writing to file',outfilename exit() # Add the HTML Header info here. fd.write(headbuf) # Add some HTML separators fd.write('\n<P>\n') fd.write('<TABLE>\n') for i in range(len(levels)): level = levels[i] title = titles[i] if len(tables[i]) == 0: # If no functions in 'None' category, then don't print # this category. if level == 'none': continue else: # If no functions in any other category, then print # the header saying no functions in this cagetory. fd.write('<TR><TD WIDTH=250 COLSPAN="3">') fd.write('<B>' + 'No ' + level +' routines' + '</B>') fd.write('</TD></TR>\n') continue fd.write('<TR><TD WIDTH=250 COLSPAN="3">') #fd.write('<B>' + upper(title[0])+title[1:] + '</B>') fd.write('<B>' + title + '</B>') fd.write('</TD></TR>\n') # Now make the entries in the table column oriented tables[i] = maketranspose(tables[i],3) for filename in tables[i]: path,name = posixpath.split(filename) func_name,ext = posixpath.splitext(name) mesg = ' <TD WIDTH=250><A HREF="'+ './' + name + '">' + \ func_name + '</A></TD>\n' fd.write(mesg) if tables[i].index(filename) % 3 == 2 : fd.write('<TR>\n') fd.write('</TABLE>\n') # Add HTML tail info here fd.write('<BR><A HREF="../../index.html">Table of Contents</A>\n') fd.close()
def getLVCPosition(self): res = self.favor2.query( "SELECT * FROM scheduler_targets WHERE type='LVC' AND status=get_scheduler_target_status_id('active') ORDER BY time_created DESC", simplify=False) # If there are no unobserved targets, we may try to re-observe already completed ones # We try to re-observe targets last observed not later than 2 hours ago and created no more than day ago # if not res: # res = self.favor2.query("SELECT * FROM scheduler_targets WHERE type='LVC' AND status=get_scheduler_target_status_id('completed') AND (time_completed < favor2_timestamp()-'2 hours'::interval AND time_created > favor2_timestamp()-'1 day'::interval) ORDER BY time_created DESC", simplify=False) for r in res: base = posixpath.join('TRIGGERS', 'LVC', str(r['external_id'])) #print base mapnames = glob.glob(posixpath.join(base, '*.fits.gz')) mapnames.sort() mapnames.reverse( ) # So the lalinference.fits is before bayestar.fits for mapname in mapnames: # Map scaled for scheduler resolution scaledname = posixpath.split(mapname)[-1] if '.gz' in scaledname: scaledname = posixpath.splitext(scaledname)[0] scaledname = posixpath.join(base, "scaled_" + scaledname) #print mapname if posixpath.exists(scaledname): skymap, header = hp.read_map(scaledname, h=True, verbose=False) else: skymap, header = hp.read_map(mapname, h=True, verbose=False) header = dict(header) skymap = hp.ud_grade(skymap, self.nside, power=-2) hp.write_map(scaledname, skymap) # print "start" # res2 = self.favor2.query("SELECT * FROM images WHERE type='LVC' and keywords->'TARGET UUID'=%s", (r['uuid'],)) # print len(res2) # for r2 in res2: # dist = ang_sep(r2['ra0'],r2['dec0'],self.ra,self.dec) # skymap[dist<7.5] *= 0.9 # print "done" #print hp.npix2nside(len(skymap)), self.nside #print "Absolute:", np.sum(skymap), self.getMaximumRADec(skymap) #print "Restricted:", np.sum(skymap*self.restrictions), self.getMaximumRADec(skymap*self.restrictions) if np.sum(skymap * self.restrictions_weak) < 0.05: # The target is most probably unobservable continue suggestion = {} suggestion['id'] = r['id'] suggestion['uuid'] = r['uuid'] suggestion['name'] = r['name'] suggestion['type'] = r['type'] suggestion['ra'], suggestion['dec'] = self.getMaximumRADec( skymap * self.restrictions_weak) # TODO: this should depend on the size of probability region and time since trigger suggestion['filter'] = 'Clear' suggestion['exposure'] = 60.0 suggestion['repeat'] = 10 suggestion['priority'] = r['priority'] return suggestion return None
def process_filename(self): self.filename = locale_decode(self.info.filename) self.is_file = not self.filename.endswith("/") self.name = posixpath.split(self.filename)[1] self.ext = posixpath.splitext( self.filename)[1].lower()[1:] if self.is_file else "dir"
def do_common(self): self.cliargs = cliargs = self.server.cliargs # cache lookup logging.debug('-' * 50) logging.debug('server path is: %s', cliargs._spath) host = self.headers['Host'] if host is None: # was not present !!!!!!! sname = cliargs.sname or self.server_name host = '{}:{}'.format(sname, self.server.server_port) self._netloc = 'http://{}'.format(host) self._url = '{}{}'.format(self._netloc, self.path) urlparts = urlparse(self.path) rootpath = urlparts.path rp = posixpath.normpath(rootpath) query = urlparts.query relpath = rootpath[1:] # remove leading slash logging.debug('self.path is: %s', self.path) logging.debug('rootpath: %s', rootpath) logging.debug('relpath: %s', relpath) logging.debug('query: %s', query) target = posixpath.join(cliargs._spath, relpath) targetname = posixpath.basename(target) logging.debug('target: %s', target) if cliargs.api_url: logging.debug('checking api_url: %s', cliargs.api_url) if rootpath.startswith(cliargs.api_url): logging.debug('api url matched for get. Returning data') if query: logging.debug('api: get with query: %s', query) qd = parse_qs(query) res = list(cliargs.api_idata.values()) for k, vs in qd.items(): # ret is key / list of values igetter = operator.itemgetter(k) for v in vs: res = [x for x in res if v in igetter(x)] content = json.dumps(res) elif len(rp) > len(cliargs.api_url): # api_url is normalized # return the id (only thing left in url) epath = posixpath.basename(rootpath) logging.debug('api: get with extra path: %s', epath) key = int(epath) content = json.dumps(cliargs.api_idata.get(int(key), {})) else: logging.debug('api: get ... mean an lean') content = json.dumps(list(cliargs.api_idata.values())) return self._sendcontent(content, 'application/json') is_anpylar = targetname == 'anpylar.js' if cliargs.auto_serve: if targetname == 'index.py': logging.debug('serving auto_script') return self._checkfile(cliargs.auto_serve) if not is_anpylar and rootpath == '/': # return the index file in any other case logging.debug('Serving auto index.html') return self._sendcontent(Template_Auto_Index, 'text/html') if rootpath == '/': # root directory is only valid directory logging.debug('Root directory sought: %s', target) tfile = posixpath.join(target, cliargs.index) logging.debug('looking for: %s', tfile) if os.path.isfile(tfile): logging.debug('Index in directory') return self._checkfile(tfile) return self._notfound() elif os.path.isfile(target) or is_anpylar: # is a file, return it logging.debug('Found file: %s', target) if targetname == cliargs.index: # index file directly sought - Send to containing directory logging.debug('Index file, redirecting') return self._redir(posixpath.dirname(rootpath), query) if cliargs.dev: logging.debug('Checking serving of anpylar.js') if targetname == 'anpylar.js': logging.debug('serving development anpylar.js') anpylar_js = self._make_bundle() return self._sendcontent(anpylar_js, 'text/javascript') logging.debug('Other file, returning') return self._checkfile(target) # no index file, return it # else logging.debug('Neither root nor real file sought, checking imports') bname = targetname _, ext = posixpath.splitext(bname) logging.debug('bname is: %s and ext %s:', bname, ext) if ext == '.py' and query: # import attempt and was no file logging.debug('Failed .py import attempt: %s', self.path) return self._notfound() elif ext == '.js' or bname.endswith('.pyc.js'): logging.debug('Failed .js or .pyc.js import: %s', self.path) return self._notfound() elif bname in ['favicon.ico']: # avoid redirects logging.debug('Skipping file: %s', bname) return self._notfound() # no file, no root dir and no import ... redirect to root with route qs0 = {'route': self.path} localquery = urlencode(qs0) logging.debug('Redir to root with query: %s - %s', query, localquery) return self._redir('/', '&'.join((query or '', localquery)))
def _StoryNameFromUrl(url): """Turns e.g. 'file://path/to/name.html' into just 'name'.""" # Strip off URI scheme, params and query; keep only netloc and path. uri = urlparse.urlparse(url) filepath = posixpath.basename(uri.netloc + uri.path) return posixpath.splitext(posixpath.basename(filepath))[0]
def remote_urls_from_qurl(qurls, allowed_exts): for qurl in qurls: if qurl.scheme() in {'http', 'https', 'ftp'} and posixpath.splitext( qurl.path())[1][1:].lower() in allowed_exts: yield bytes(qurl.toEncoded()).decode('utf-8'), posixpath.basename(qurl.path())
def format_emote_filename(filename): """format a filename to an emote name as discord does when you upload an emote image""" left, sep, right = posixpath.splitext(filename)[0].rpartition('-') return (left or right).replace(' ', '')
def splitextTest(self, path, filename, ext): self.assertEqual(posixpath.splitext(path), (filename, ext)) self.assertEqual(posixpath.splitext("/" + path), ("/" + filename, ext)) self.assertEqual(posixpath.splitext("abc/" + path), ("abc/" + filename, ext)) self.assertEqual(posixpath.splitext("abc.def/" + path), ("abc.def/" + filename, ext)) self.assertEqual(posixpath.splitext("/abc.def/" + path), ("/abc.def/" + filename, ext)) self.assertEqual(posixpath.splitext(path + "/"), (filename + ext + "/", "")) path = bytes(path, "ASCII") filename = bytes(filename, "ASCII") ext = bytes(ext, "ASCII") self.assertEqual(posixpath.splitext(path), (filename, ext)) self.assertEqual(posixpath.splitext(b"/" + path), (b"/" + filename, ext)) self.assertEqual(posixpath.splitext(b"abc/" + path), (b"abc/" + filename, ext)) self.assertEqual(posixpath.splitext(b"abc.def/" + path), (b"abc.def/" + filename, ext)) self.assertEqual(posixpath.splitext(b"/abc.def/" + path), (b"/abc.def/" + filename, ext)) self.assertEqual(posixpath.splitext(path + b"/"), (filename + ext + b"/", b""))
def testSiteVerificationFile(self): # The site verification file should not redirect. self._AssertIdentity(SITE_VERIFICATION_FILE) self._AssertRedirect(SITE_VERIFICATION_FILE, posixpath.splitext(SITE_VERIFICATION_FILE)[0])
def parseCompany(path, company): for root, dirs, files in os.walk(posixpath.join(path, company)): for file in files: filename, file_extension = posixpath.splitext(file) if file_extension == ".svd": parseFile(company, filename)
def url_has_any_extension(url, extensions): return posixpath.splitext(parse_url(url).path)[1].lower() in extensions
def guess_type(self, url, strict=True): """Guess the type of a file which is either a URL or a path-like object. Return value is a tuple (type, encoding) where type is None if the type can't be guessed (no or unknown suffix) or a string of the form type/subtype, usable for a MIME Content-type header; and encoding is None for no encoding or the name of the program used to encode (e.g. compress or gzip). The mappings are table driven. Encoding suffixes are case sensitive; type suffixes are first tried case sensitive, then case insensitive. The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped to '.tar.gz'. (This is table-driven too, using the dictionary suffix_map.) Optional `strict' argument when False adds a bunch of commonly found, but non-standard types. """ url = os.fspath(url) scheme, url = urllib.parse._splittype(url) if scheme == 'data': # syntax of data URLs: # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data # mediatype := [ type "/" subtype ] *( ";" parameter ) # data := *urlchar # parameter := attribute "=" value # type/subtype defaults to "text/plain" comma = url.find(',') if comma < 0: # bad data URL return None, None semi = url.find(';', 0, comma) if semi >= 0: type = url[:semi] else: type = url[:comma] if '=' in type or '/' not in type: type = 'text/plain' return type, None # never compressed, so encoding is None base, ext = posixpath.splitext(url) while ext in self.suffix_map: base, ext = posixpath.splitext(base + self.suffix_map[ext]) if ext in self.encodings_map: encoding = self.encodings_map[ext] base, ext = posixpath.splitext(base) else: encoding = None types_map = self.types_map[True] if ext in types_map: return types_map[ext], encoding elif ext.lower() in types_map: return types_map[ext.lower()], encoding elif strict: return None, encoding types_map = self.types_map[False] if ext in types_map: return types_map[ext], encoding elif ext.lower() in types_map: return types_map[ext.lower()], encoding else: return None, encoding
def splitext(path): return posixpath.splitext(normsep(path))