def getRawChanges(self): branch = bzrlib.branch.Branch.open_containing(self.url)[0] if self.branch_name is FULL: branch_name = self.url elif self.branch_name is SHORT: branch_name = branch.nick else: # presumably a string or maybe None branch_name = self.branch_name changes = [] change = generate_change( branch, blame_merge_author=self.blame_merge_author) if (self.last_revision is None or change['revision'] > self.last_revision): change['branch'] = branch_name change['category'] = self.category changes.append(change) if self.last_revision is not None: while self.last_revision + 1 < change['revision']: change = generate_change( branch, new_revno=change['revision']-1, blame_merge_author=self.blame_merge_author) change['branch'] = branch_name changes.append(change) changes.reverse() return changes
def getRawChanges(self): branch = bzrlib.branch.Branch.open_containing(self.url)[0] if self.branch_name is FULL: branch_name = self.url elif self.branch_name is SHORT: branch_name = branch.nick else: # presumably a string or maybe None branch_name = self.branch_name changes = [] change = generate_change( branch, blame_merge_author=self.blame_merge_author) if (self.last_revision is None or change['revision'] > self.last_revision): change['branch'] = branch_name change['category'] = self.category changes.append(change) if self.last_revision is not None: while self.last_revision + 1 < change['revision']: change = generate_change( branch, new_revno=change['revision'] - 1, blame_merge_author=self.blame_merge_author) change['branch'] = branch_name changes.append(change) changes.reverse() return changes
def _parse_changes(self, query): dom = minidom.parseString(query.read()) items = dom.getElementsByTagName("item") changes = [] for i in items: d = dict() for k in ["description", "link", "author", "pubDate"]: d[k] = i.getElementsByTagName(k)[0].firstChild.wholeText # strip out HTML newlines d["description"] = d["description"].replace("<br/>","") # need to parse date with timezone, and turn into a UTC timestamp d["pubDate"] = rfc822.mktime_tz(rfc822.parsedate_tz(d["pubDate"]) ) changes.append(d) changes = [c for c in changes if c["pubDate"] > self.lastChange] changes.reverse() # want t hem in reverse chronological order return changes
def _parse_changes(self, query): dom = minidom.parseString(query.read()) items = dom.getElementsByTagName("item") changes = [] for i in items: d = dict() for k in ["description", "link", "author", "pubDate"]: d[k] = i.getElementsByTagName(k)[0].firstChild.wholeText # strip out HTML newlines d["description"] = d["description"].replace("<br/>", "") # need to parse date with timezone, and turn into a UTC timestamp d["pubDate"] = rfc822.mktime_tz(rfc822.parsedate_tz(d["pubDate"])) changes.append(d) changes = [c for c in changes if c["pubDate"] > self.lastChange] changes.reverse() # want t hem in reverse chronological order return changes
def _parse_changes(query, lastChange): dom = minidom.parseString(query) items = dom.getElementsByTagName("entry") changes = [] for i in items: d = {} for k in ["title", "updated"]: d[k] = i.getElementsByTagName(k)[0].firstChild.wholeText d["updated"] = parse_date_string(d["updated"]) d["changeset"] = d["title"].split(" ")[1] nameNode = i.getElementsByTagName("author")[0].childNodes[1] d["author"] = nameNode.firstChild.wholeText d["link"] = i.getElementsByTagName("link")[0].getAttribute("href") if d["updated"] > lastChange: changes.append(d) changes.reverse() # want them in chronological order return changes
def _parse_changes(self, query): dom = minidom.parseString(query) entries = dom.getElementsByTagName("entry") changes = [] # Entries come in reverse chronological order for i in entries: d = {} # revision is the last part of the 'id' url d["revision"] = i.getElementsByTagName( "id")[0].firstChild.data.split('/')[-1] if d["revision"] == self.lastChange: break # no more new changes d["when"] = timegm( strptime( i.getElementsByTagName("updated")[0].firstChild.data, "%Y-%m-%dT%H:%M:%SZ")) d["author"] = i.getElementsByTagName( "author")[0].getElementsByTagName("name")[0].firstChild.data # files and commit msg are separated by 2 consecutive <br/> content = i.getElementsByTagName( "content")[0].firstChild.data.split("<br/>\n <br/>") # Remove the action keywords from the file list fl = content[0].replace( u' \xa0\xa0\xa0\xa0Add\xa0\xa0\xa0\xa0', '').replace( u' \xa0\xa0\xa0\xa0Delete\xa0\xa0\xa0\xa0', '').replace(u' \xa0\xa0\xa0\xa0Modify\xa0\xa0\xa0\xa0', '') # Get individual files and remove the 'header' d["files"] = fl.encode("ascii", "replace").split("<br/>")[1:] d["files"] = [f.strip() for f in d["files"]] try: d["comments"] = content[1].encode("ascii", "replace") except: d["comments"] = "No commit message provided" changes.append(d) changes.reverse() # want them in chronological order return changes
def _parse_changes(self, query): dom = minidom.parseString(query) entries = dom.getElementsByTagName("entry") changes = [] # Entries come in reverse chronological order for i in entries: d = {} # revision is the last part of the 'id' url d["revision"] = i.getElementsByTagName( "id")[0].firstChild.data.split('/')[-1] if d["revision"] == self.lastChange: break # no more new changes d["when"] = timegm(strptime( i.getElementsByTagName("updated")[0].firstChild.data, "%Y-%m-%dT%H:%M:%SZ")) d["author"] = i.getElementsByTagName( "author")[0].getElementsByTagName("name")[0].firstChild.data # files and commit msg are separated by 2 consecutive <br/> content = i.getElementsByTagName( "content")[0].firstChild.data.split("<br/>\n <br/>") # Remove the action keywords from the file list fl = content[0].replace( u' \xa0\xa0\xa0\xa0Add\xa0\xa0\xa0\xa0', '').replace( u' \xa0\xa0\xa0\xa0Delete\xa0\xa0\xa0\xa0', '').replace( u' \xa0\xa0\xa0\xa0Modify\xa0\xa0\xa0\xa0', '') # Get individual files and remove the 'header' d["files"] = fl.encode("ascii", "replace").split("<br/>")[1:] d["files"] = [f.strip() for f in d["files"]] try: d["comments"] = content[1].encode("ascii", "replace") except: d["comments"] = "No commit message provided" changes.append(d) changes.reverse() # want them in chronological order return changes
def _parse_changes(data): pushes = json.loads(data) changes = [] for push_id, push_data in pushes.iteritems(): push_time = push_data['date'] push_user = push_data['user'] for cset in push_data['changesets']: change = {} change['updated'] = push_time change['author'] = push_user change['changeset'] = cset['node'] change['files'] = cset['files'] change['branch'] = cset['branch'] change['comments'] = cset['desc'] changes.append(change) # Sort by push date # Changes in the same push have their order preserved because python list # sorts are stable. The leaf of each push is sorted at the end of the list # of changes for that push. changes.sort(key=lambda c: c['updated']) return changes
def _parse_changes(data): pushes = json.loads(data) changes = [] for push_id, push_data in pushes.iteritems(): push_time = push_data['date'] push_user = push_data['user'] for cset in push_data['changesets']: change = {} change['updated'] = push_time change['author'] = push_user change['changeset'] = cset['node'] change['files'] = cset['files'] change['branch'] = cset['branch'] change['comments'] = cset['desc'] changes.append(change) # Sort by push date # Changes in the same push have their order preserved because python list # sorts are stable. The leaf of each push is sorted at the end of the list # of changes for that push. changes.sort(key=lambda c:c['updated']) return changes
def getRawChanges(self): branch = bzrlib.branch.Branch.open_containing(self.url)[0] branch_name = self.branch_name changes = [] change = generate_change( branch, blame_merge_author=self.blame_merge_author) if (self.last_revision is None or change['revision'] != self.last_revision): change['branch'] = branch_name change['branch_id'] = self.branch_id change['category'] = self.category changes.append(change) if self.last_revision is not None: while self.last_revision + 1 < change['revision']: change = generate_change( branch, new_revno=change['revision']-1, blame_merge_author=self.blame_merge_author) change['branch'] = branch_name change['branch_id'] = self.branch_id change.setdefault('category', self.category) changes.append(change) changes.reverse() return changes
def getRawChanges(self): branch = bzrlib.branch.Branch.open_containing(self.url)[0] branch_name = self.branch_name changes = [] change = generate_change(branch, blame_merge_author=self.blame_merge_author) if (self.last_revision is None or change['revision'] != self.last_revision): change['branch'] = branch_name change['branch_id'] = self.branch_id change['category'] = self.category changes.append(change) if self.last_revision is not None: while self.last_revision + 1 < change['revision']: change = generate_change( branch, new_revno=change['revision'] - 1, blame_merge_author=self.blame_merge_author) change['branch'] = branch_name change['branch_id'] = self.branch_id change.setdefault('category', self.category) changes.append(change) changes.reverse() return changes