Example #1
0
    def _deserialize(self, stream):
        """:param from_rev_list: if true, the stream format is coming from the rev-list command
        Otherwise it is assumed to be a plain data stream from our object"""
        readline = stream.readline
        self.tree = Tree(self.odb, hex_to_bin(readline().split()[1]), Tree.tree_id << 12, '')

        self.parents = list()
        next_line = None
        while True:
            parent_line = readline()
            if not parent_line.startswith('parent'):
                next_line = parent_line
                break
            # END abort reading parents
            self.parents.append(type(self)(self.odb, hex_to_bin(parent_line.split()[-1])))
        # END for each parent line
        self.parents = tuple(self.parents)

        self.author, self.authored_date, self.author_tz_offset = parse_actor_and_date(next_line)
        self.committer, self.committed_date, self.committer_tz_offset = parse_actor_and_date(readline())

        # now we can have the encoding line, or an empty line followed by the optional
        # message.
        self.encoding = self.default_encoding
        # read encoding or empty line to separate message
        enc = readline()
        enc = enc.strip()
        if enc:
            self.encoding = enc[enc.find(' ') + 1:]
            # now comes the message separator
            readline()
        # END handle encoding

        # decode the authors name
        try:
            self.author.name = self.author.name.decode(self.encoding)
        except UnicodeDecodeError:
            print >> sys.stderr, "Failed to decode author name '%s' using encoding %s" % (
                self.author.name, self.encoding)
        # END handle author's encoding

        # decode committer name
        try:
            self.committer.name = self.committer.name.decode(self.encoding)
        except UnicodeDecodeError:
            print >> sys.stderr, "Failed to decode committer name '%s' using encoding %s" % (
                self.committer.name, self.encoding)
        # END handle author's encoding

        # a stream from our data simply gives us the plain message
        # The end of our message stream is marked with a newline that we strip
        self.message = stream.read()
        try:
            self.message = self.message.decode(self.encoding)
        except UnicodeDecodeError:
            print >> sys.stderr, "Failed to decode message '%s' using encoding %s" % (self.message, self.encoding)
        # END exception handling
        return self
Example #2
0
	def _deserialize(self, stream):
		""":param from_rev_list: if true, the stream format is coming from the rev-list command
		Otherwise it is assumed to be a plain data stream from our object"""
		readline = stream.readline
		self.tree = Tree(self.odb, hex_to_bin(readline().split()[1]), Tree.tree_id<<12, '')

		self.parents = list()
		next_line = None
		while True:
			parent_line = readline()
			if not parent_line.startswith('parent'):
				next_line = parent_line
				break
			# END abort reading parents
			self.parents.append(type(self)(self.odb, hex_to_bin(parent_line.split()[-1])))
		# END for each parent line
		self.parents = tuple(self.parents)
		
		self.author, self.authored_date, self.author_tz_offset = parse_actor_and_date(next_line)
		self.committer, self.committed_date, self.committer_tz_offset = parse_actor_and_date(readline())
		
		
		# now we can have the encoding line, or an empty line followed by the optional
		# message.
		self.encoding = self.default_encoding
		# read encoding or empty line to separate message
		enc = readline()
		enc = enc.strip()
		if enc:
			self.encoding = enc[enc.find(' ')+1:]
			# now comes the message separator 
			readline()
		# END handle encoding
		
		# decode the authors name
		try:
			self.author.name = self.author.name.decode(self.encoding) 
		except UnicodeDecodeError:
			print >> sys.stderr, "Failed to decode author name '%s' using encoding %s" % (self.author.name, self.encoding)
		# END handle author's encoding
		
		# decode committer name
		try:
			self.committer.name = self.committer.name.decode(self.encoding) 
		except UnicodeDecodeError:
			print >> sys.stderr, "Failed to decode committer name '%s' using encoding %s" % (self.committer.name, self.encoding)
		# END handle author's encoding
		
		# a stream from our data simply gives us the plain message
		# The end of our message stream is marked with a newline that we strip
		self.message = stream.read()
		try:
			self.message = self.message.decode(self.encoding)
		except UnicodeDecodeError:
			print >> sys.stderr, "Failed to decode message '%s' using encoding %s" % (self.message, self.encoding)
		# END exception handling 
		return self
Example #3
0
    def _set_cache_(self, attr):
        """Cache all our attributes at once"""
        if attr in TagObject.__slots__:
            ostream = self.repo.odb.stream(self.binsha)
            lines = ostream.read().splitlines()

            obj, hexsha = lines[0].split(" ")  # object <hexsha>
            type_token, type_name = lines[1].split(" ")  # type <type_name>
            self.object = get_object_type_by_name(type_name)(
                self.repo, hex_to_bin(hexsha))

            self.tag = lines[2][4:]  # tag <tag name>

            tagger_info = lines[3][7:]  # tagger <actor> <date>
            self.tagger, self.tagged_date, self.tagger_tz_offset = parse_actor_and_date(
                tagger_info)

            # line 4 empty - it could mark the beginning of the next header
            # in case there really is no message, it would not exist. Otherwise
            # a newline separates header from message
            if len(lines) > 5:
                self.message = "\n".join(lines[5:])
            else:
                self.message = ''
        # END check our attributes
        else:
            super(TagObject, self)._set_cache_(attr)
Example #4
0
	def _set_cache_(self, attr):
		"""Cache all our attributes at once"""
		if attr in TagObject.__slots__:
			ostream = self.odb.stream(self.binsha)
			lines = ostream.read().splitlines()
			
			obj, hexsha = lines[0].split(" ")		# object <hexsha>
			type_token, type_name = lines[1].split(" ") # type <type_name>
			self.object = get_object_type_by_name(type_name)(self.odb, hex_to_bin(hexsha))
			
			self.tag = lines[2][4:]	 # tag <tag name>
			
			tagger_info = lines[3][7:]# tagger <actor> <date>
			self.tagger, self.tagged_date, self.tagger_tz_offset = parse_actor_and_date(tagger_info)
			
			# line 4 empty - it could mark the beginning of the next header
			# in case there really is no message, it would not exist. Otherwise 
			# a newline separates header from message
			if len(lines) > 5:
				self.message = "\n".join(lines[5:])
			else:
				self.message = ''
		# END check our attributes
		else:
			super(TagObject, self)._set_cache_(attr)
Example #5
0
    def _deserialize(self, stream):
        """:param from_rev_list: if true, the stream format is coming from the rev-list command
        Otherwise it is assumed to be a plain data stream from our object"""
        readline = stream.readline
        self.tree = Tree(self.repo, hex_to_bin(readline().split()[1]), Tree.tree_id << 12, '')

        self.parents = list()
        next_line = None
        while True:
            parent_line = readline()
            if not parent_line.startswith('parent'):
                next_line = parent_line
                break
            # END abort reading parents
            self.parents.append(type(self)(self.repo, hex_to_bin(parent_line.split()[-1])))
        # END for each parent line
        self.parents = tuple(self.parents)

        self.author, self.authored_date, self.author_tz_offset = parse_actor_and_date(next_line)
        self.committer, self.committed_date, self.committer_tz_offset = parse_actor_and_date(readline())

        # we might run into one or more mergetag blocks, skip those for now
        next_line = readline()
        while next_line.startswith('mergetag '):
            next_line = readline()
            while next_line.startswith(' '):
                next_line = readline()

        # now we can have the encoding line, or an empty line followed by the optional
        # message.
        self.encoding = self.default_encoding

        # read headers
        enc = next_line
        buf = enc.strip()
        while buf != "":
            if buf[0:10] == "encoding ":
                self.encoding = buf[buf.find(' ') + 1:]
            elif buf[0:7] == "gpgsig ":
                sig = buf[buf.find(' ') + 1:] + "\n"
                is_next_header = False
                while True:
                    sigbuf = readline()
                    if sigbuf == "": break
                    if sigbuf[0:1] != " ":
                        buf = sigbuf.strip()
                        is_next_header = True
                        break
                    sig += sigbuf[1:]
                self.gpgsig = sig.rstrip("\n")
                if is_next_header:
                    continue
            buf = readline().strip()

        # decode the authors name
        try:
            self.author.name = self.author.name.decode(self.encoding)
        except UnicodeDecodeError:
            print >> sys.stderr, "Failed to decode author name '%s' using encoding %s" % (self.author.name, self.encoding)
        # END handle author's encoding

        # decode committer name
        try:
            self.committer.name = self.committer.name.decode(self.encoding)
        except UnicodeDecodeError:
            print >> sys.stderr, "Failed to decode committer name '%s' using encoding %s" % (self.committer.name, self.encoding)
        # END handle author's encoding

        # a stream from our data simply gives us the plain message
        # The end of our message stream is marked with a newline that we strip
        self.message = stream.read()
        try:
            self.message = self.message.decode(self.encoding)
        except UnicodeDecodeError:
            print >> sys.stderr, "Failed to decode message '%s' using encoding %s" % (self.message, self.encoding)
        # END exception handling
        return self