def _parse_log(self, line): # limit=<N>? pLIMIT = r'limit=(\d+)' # revprops=all|(<REVPROP> ...)? pREVPROPS = r'revprops=(all|\(([^)]+)\))' m = _match(line, pPATHS, pREVRANGE, [ pLIMIT, 'discover-changed-paths', 'strict', 'include-merged-revisions', pREVPROPS ]) paths = [urllib_parse_unquote(x) for x in m.group(1).split()] left = int(m.group(2)) right = int(m.group(3)) if m.group(5) is None: limit = 0 else: limit = int(m.group(5)) discover_changed_paths = m.group(6) is not None strict = m.group(7) is not None include_merged_revisions = m.group(8) is not None if m.group(10) == 'all': revprops = None else: if m.group(11) is None: revprops = [] else: revprops = [ urllib_parse_unquote(x) for x in m.group(11).split() ] self.handle_log(paths, left, right, limit, discover_changed_paths, strict, include_merged_revisions, revprops) return line[m.end():]
def _parse_log(self, line): # limit=<N>? pLIMIT = r'limit=(\d+)' # revprops=all|(<REVPROP> ...)? pREVPROPS = r'revprops=(all|\(([^)]+)\))' m = _match(line, pPATHS, pREVRANGE, [pLIMIT, 'discover-changed-paths', 'strict', 'include-merged-revisions', pREVPROPS]) paths = [urllib_parse_unquote(x) for x in m.group(1).split()] left = int(m.group(2)) right = int(m.group(3)) if m.group(5) is None: limit = 0 else: limit = int(m.group(5)) discover_changed_paths = m.group(6) is not None strict = m.group(7) is not None include_merged_revisions = m.group(8) is not None if m.group(10) == 'all': revprops = None else: if m.group(11) is None: revprops = [] else: revprops = [urllib_parse_unquote(x) for x in m.group(11).split()] self.handle_log(paths, left, right, limit, discover_changed_paths, strict, include_merged_revisions, revprops) return line[m.end():]
def _parse_switch(self, line): m = _match(line, pPATH, pPATHREV, [pDEPTH]) from_path = urllib_parse_unquote(m.group(1)) to_path = urllib_parse_unquote(m.group(2)) to_rev = int(m.group(3)) depth = _parse_depth(m.group(5)) self.handle_switch(from_path, to_path, to_rev, depth) return line[m.end():]
def _parse_diff_2paths(self, line, m): from_path = urllib_parse_unquote(m.group(1)) from_rev = int(m.group(2)) to_path = urllib_parse_unquote(m.group(3)) to_rev = int(m.group(4)) depth = _parse_depth(m.group(6)) ignore_ancestry = m.group(7) is not None self.handle_diff_2paths(from_path, from_rev, to_path, to_rev, depth, ignore_ancestry) return line[m.end() :]
def _parse_diff_2paths(self, line, m): from_path = urllib_parse_unquote(m.group(1)) from_rev = int(m.group(2)) to_path = urllib_parse_unquote(m.group(3)) to_rev = int(m.group(4)) depth = _parse_depth(m.group(6)) ignore_ancestry = m.group(7) is not None self.handle_diff_2paths(from_path, from_rev, to_path, to_rev, depth, ignore_ancestry) return line[m.end():]
def _parse_open(self, line): pINT = r'(\d+)' pCAP = r'cap=\(([^)]*)\)' pCLIENT = pWORD m = _match(line, pINT, pCAP, pPATH, pCLIENT, pCLIENT) protocol = int(m.group(1)) if m.group(2) is None: capabilities = [] else: capabilities = m.group(2).split() path = m.group(3) ra_client = urllib_parse_unquote(m.group(4)) client = urllib_parse_unquote(m.group(5)) self.handle_open(protocol, capabilities, path, ra_client, client) return line[m.end():]
def _parse_status(self, line): m = _match(line, pPATH, pREVNUM, [pDEPTH]) path = urllib_parse_unquote(m.group(1)) revision = int(m.group(2)) depth = _parse_depth(m.group(4)) self.handle_status(path, revision, depth) return line[m.end():]
def _parse_get_file_revs(self, line): m = _match(line, pPATH, pREVRANGE, ['include-merged-revisions']) path = urllib_parse_unquote(m.group(1)) left = int(m.group(2)) right = int(m.group(3)) include_merged_revisions = m.group(4) is not None self.handle_get_file_revs(path, left, right, include_merged_revisions) return line[m.end():]
def _parse_get_location_segments(self, line): m = _match(line, pPATHREV, pREVRANGE) path = urllib_parse_unquote(m.group(1)) peg = int(m.group(2)) left = int(m.group(3)) right = int(m.group(4)) self.handle_get_location_segments(path, peg, left, right) return line[m.end():]
def handle_starttag(self, tag, attrs): if tag == "a": myarchors = ( urllib_parse_unquote(x[1]) for x in attrs if x[0] == "href" and x[1] not in self.PL_anchors ) self.PL_anchors.extend(myarchors)
def _parse_diff_1path(self, line, m): path = urllib_parse_unquote(m.group(1)) left = int(m.group(2)) right = int(m.group(3)) depth = _parse_depth(m.group(5)) ignore_ancestry = m.group(6) is not None self.handle_diff_1path(path, left, right, depth, ignore_ancestry) return line[m.end() :]
def _parse_update(self, line): m = _match(line, pPATH, pREVNUM, [pDEPTH, 'send-copyfrom-args']) path = urllib_parse_unquote(m.group(1)) revision = int(m.group(2)) depth = _parse_depth(m.group(4)) send_copyfrom_args = m.group(5) is not None self.handle_update(path, revision, depth, send_copyfrom_args) return line[m.end():]
def _parse_diff_1path(self, line, m): path = urllib_parse_unquote(m.group(1)) left = int(m.group(2)) right = int(m.group(3)) depth = _parse_depth(m.group(5)) ignore_ancestry = m.group(6) is not None self.handle_diff_1path(path, left, right, depth, ignore_ancestry) return line[m.end():]
def task_update(request, task_id=None): context = {} if not task_id: parsed_params = h.get_parsed_params(request) task_id = int(parsed_params['id']) else: parsed_params = h.get_parsed_params(request) task = get_object_or_404(Task, id=task_id, user=request.user) updated_task_description = \ urllib_parse_unquote(parsed_params.get('description', '')) updated_task_is_complete = \ urllib_parse_unquote(parsed_params.get('is_complete', '')) is_csr = \ urllib_parse_unquote(parsed_params.get('is_csr', '')) if updated_task_description \ and updated_task_description != task.description: task.description = updated_task_description task.save() elif updated_task_is_complete: if updated_task_is_complete == 'true': task.is_complete = True elif updated_task_is_complete == 'false': task.is_complete = False task.save() if is_csr: return HttpResponse(f""" <script> hDispatch('task-update-csr', {{ id: {task_id}, description: "{updated_task_description}", is_complete: '{updated_task_is_complete}' }}); hStatusMessageDisplay("Task updated", 'success'); </script> """) else: messages.success(request, 'Task updated') context.update({'task': task}) return h.render_with_messages(request, 'tasks/get_task.html', context)
def _parse_get_mergeinfo(self, line): # <I> pMERGEINFO_INHERITANCE = pWORD pINCLUDE_DESCENDANTS = pWORD m = _match(line, pPATHS, pMERGEINFO_INHERITANCE, ["include-descendants"]) paths = [urllib_parse_unquote(x) for x in m.group(1).split()] inheritance = _parse_mergeinfo_inheritance(m.group(2)) include_descendants = m.group(3) is not None self.handle_get_mergeinfo(paths, inheritance, include_descendants) return line[m.end() :]
def _parse_get_mergeinfo(self, line): # <I> pMERGEINFO_INHERITANCE = pWORD pINCLUDE_DESCENDANTS = pWORD m = _match(line, pPATHS, pMERGEINFO_INHERITANCE, ['include-descendants']) paths = [urllib_parse_unquote(x) for x in m.group(1).split()] inheritance = _parse_mergeinfo_inheritance(m.group(2)) include_descendants = m.group(3) is not None self.handle_get_mergeinfo(paths, inheritance, include_descendants) return line[m.end():]
def _parse_get_locks(self, line): m = _match(line, pPATH) self.handle_get_locks(urllib_parse_unquote(m.group(1))) return line[m.end():]
def _parse_replay(self, line): m = _match(line, pPATH, pREVNUM) path = urllib_parse_unquote(m.group(1)) revision = int(m.group(2)) self.handle_replay(path, revision) return line[m.end():]
def _parse_stat(self, line): m = _match(line, pPATHREV) path = urllib_parse_unquote(m.group(1)) revnum = int(m.group(2)) self.handle_stat(path, revnum) return line[m.end():]
def _parse_unlock(self, line): m = _match(line, pPATHS, ['break']) paths = [urllib_parse_unquote(x) for x in m.group(1).split()] self.handle_unlock(paths, m.group(2) is not None) return line[m.end():]
def _parse_rev_prop(self, line): m = _match(line, pREVNUM, pPROPERTY) self.handle_rev_prop(int(m.group(1)), urllib_parse_unquote(m.group(2))) return line[m.end():]
def _parse_get_file(self, line): m = _match(line, pPATH, pREVNUM, ['text', 'props']) self.handle_get_file(urllib_parse_unquote(m.group(1)), int(m.group(2)), m.group(3) is not None, m.group(4) is not None) return line[m.end():]
def handle_starttag(self,tag,attrs): if tag == "a": for x in attrs: if x[0] == 'href': if x[1] not in self.PL_anchors: self.PL_anchors.append(urllib_parse_unquote(x[1]))
def _parse_get_locations(self, line): m = _match(line, pPATH, pREVNUMS) path = urllib_parse_unquote(m.group(1)) revnums = [int(x) for x in m.group(2).split()] self.handle_get_locations(path, revnums) return line[m.end():]
def _parse_get_dir(self, line): m = _match(line, pPATH, pREVNUM, ["text", "props"]) self.handle_get_dir( urllib_parse_unquote(m.group(1)), int(m.group(2)), m.group(3) is not None, m.group(4) is not None ) return line[m.end() :]