def dashboard_asf_jira_links(project_key, project_id, components=None): try: from urllib.parse import quote_plus as url_escape except ImportError: from urllib import quote_plus as url_escape links = [] all_issues_jql = "project = {}".format(project_key) open_issues_jql = all_issues_jql + " and resolution is null" if components is not None: components = ", ".join(["\"{}\"".format(x) for x in components]) constraint = " and component in ({})".format(components) open_issues_jql += constraint all_issues_jql += constraint summary_url = "https://issues.apache.org/jira/projects/{}".format(project_key) open_issues_url = "https://issues.apache.org/jira/issues/?jql={}".format(url_escape(open_issues_jql)) all_issues_url = "https://issues.apache.org/jira/issues/?jql={}".format(url_escape(all_issues_jql)) create_issue_url = "https://issues.apache.org/jira/secure/CreateIssue!default.jspa?pid={}".format(project_id) links.append("<a href=\"{}\">Summary</a>".format(summary_url)) links.append("<a href=\"{}\">Open issues</a>".format(open_issues_url)) links.append("<a href=\"{}\">All issues</a>".format(all_issues_url)) links.append("<a href=\"{}\">Create issue</a>".format(create_issue_url)) return " • ".join(links)
def _json_for_name(name: str, kind: str): # check the name name = name.replace('.json', '') def match_in_index(name, index, kind): return [(obj['organism'], obj[kind + '_name']) for obj in index[kind + 's'] if obj[kind + '_name'] == name] try: index = server_index() except URLError: raise Exception('Could not connect to the Escher server') match = match_in_index(name, server_index(), kind) if len(match) == 0: raise Exception(f'Could not find the {kind} {name} on the server') org, name = match[0] url = ( get_url(kind + '_download') + '/'.join([url_escape(x) for x in [org, name + '.json']]) ) print('Downloading %s from %s' % (kind.title(), url)) try: download = urlopen(url) except URLError: raise ValueError('No %s found in at %s' % (kind, url)) data = _decode_response(download) return data
def _json_for_name(name: str, kind: str): # check the name name = name.replace('.json', '') def match_in_index(name, index, kind): return [(obj['organism'], obj[kind + '_name']) for obj in index[kind + 's'] if obj[kind + '_name'] == name] try: index = server_index() except URLError: raise Exception('Could not connect to the Escher server') match = match_in_index(name, server_index(), kind) if len(match) == 0: raise Exception(f'Could not find the {kind} {name} on the server') org, name = match[0] url = (get_url(kind + '_download') + '/'.join([url_escape(x) for x in [org, name + '.json']])) print('Downloading %s from %s' % (kind.title(), url)) try: download = urlopen(url) except URLError: raise ValueError('No %s found in at %s' % (kind, url)) data = _decode_response(download) return data
def _listobjs(self, objs, name): if objs: yield "<p>{} {}(s):</p>".format(len(objs), name) yield "<ul>" for o in sorted(objs, key=attrgetter("name")): title = html_escape(o.name) url = html_escape("{}/{}".format(self.url, url_escape(o.name))) yield '<li><a href="{}">{}</a></li>'.format(url, title) yield "</ul><br/>"
def add_anchor(match): uuid = match.group(1) if uuid not in tokens: return match.group(0) original = match.group(0) entry_type = tokens[uuid][0] entry_name = url_escape(tokens[uuid][1]) anchor_name = f"//apple_ref/cpp/{entry_type}/{entry_name}" return f"{original}<a name=\"{anchor_name}\" class=\"dashAnchor\"></a>"
def links(self): if self.is_dir: for f in self._files: yield dict( rel="enclosure", href="{}/{}".format(self.url, url_escape(f.name)), length=str(f.stat().st_size), type=get_mimetype(f.name), ) else: yield dict( rel="enclosure", href=self.url, length=str(self.size), type=get_mimetype(self.url), )
def populate_config(self): config = self.config for s in self.CONFIG_SPECIALS: config[s] = {} for opt, val in self.parser.items(u'global'): if opt in self.CONFIG_SPECIALS: raise ValueError('"%s" is an invalid ' '[global] option' % opt) config[opt] = val config['tls_verify_client'] = self.parser.getboolean( 'global', 'tls_verify_client', fallback=False) config['debug'] = self.parser.getboolean('global', 'debug', fallback=False) config['makedirs'] = self.parser.getboolean('global', 'makedirs', fallback=False) if self.args.debug: config['debug'] = self.args.debug config['auditlog'] = os.path.abspath(config.get('auditlog')) config['umask'] = int(config.get('umask', '027'), 8) url = config.get('server_url') sock = config.get('server_socket') if url and sock: raise ValueError( "'server_url' and 'server_socket' are mutually exclusive.") if not url and not sock: # no option but, use default socket path socketdir = self.parser.get(u'DEFAULT', u'socketdir') name = self.args.instance if self.args.instance else 'custodia' sock = os.path.join(socketdir, name + '.sock') if sock: server_socket = os.path.abspath(sock) config['server_url'] = 'http+unix://{}/'.format( url_escape(server_socket, ''))
def __init__(self, base_url, start_path, rel_path): self.title = os.path.basename(rel_path) self.url = "{}/{}".format(base_url, url_escape(rel_path)) self.path = os.path.join(start_path, rel_path) self.is_dir = os.path.isdir(self.path) stat = os.stat(self.path) self._files, self._dirs = [], [] if self.is_dir: self.size = None for x in os.scandir(self.path): if x.is_file(): self._files.append(x) elif x.is_dir(): self._dirs.append(x) else: self.size = stat.st_size self.date = datetime.utcfromtimestamp( stat.st_mtime).replace(tzinfo=timezone.utc)
def tack_on_reports(self): """ Copy every file into the data_dir and bung in a link to it here. """ links = OrderedDict() # Go through the already-sorted list of samples. for s_name in self.samples_list: links[s_name] = "<span class='alt_col_link'>" # Cycle through the reports. Presumably there are one or two. reps = sorted([r for r in self.html_reports if r.sample == s_name], key=lambda r: r.read) for n, rep in enumerate(reps): # This is a little funky but seems most legible... rep_label = "{}_{}".format( rep.sample, rep.read) if len(reps) > 1 else rep.sample link_label = rep_label if n == 0 else "..._{}".format(rep.read) fname = os.path.basename(rep.file) shutil.copy(rep.file, os.path.join(config.data_dir, fname)) file_relpath = os.path.join(config.data_dir_name, fname) links[ s_name] += "<a href='{f}' title='{rl} FastQC Report'>{ll}</a> ".format( f=url_escape(file_relpath), rl=html_escape(rep_label), ll=html_escape(link_label)) links[s_name] += "</span>" #Output in sorted order. if not links: links['error'] = "No FastQC HTML plots were found." return "<div>" + " ".join(links[k] for k in sorted(links)) + "</div>"