def urlcrawlstatus(self):
   if self.urlcrawlstatus_ is None:
     self.lazy_init_lock_.acquire()
     try:
       if self.urlcrawlstatus_ is None: self.urlcrawlstatus_ = URLCrawlStatusTag()
     finally:
       self.lazy_init_lock_.release()
   return self.urlcrawlstatus_
Esempio n. 2
0
 def urlcrawlstatus(self):
     if self.urlcrawlstatus_ is None:
         self.lazy_init_lock_.acquire()
         try:
             if self.urlcrawlstatus_ is None:
                 self.urlcrawlstatus_ = URLCrawlStatusTag()
         finally:
             self.lazy_init_lock_.release()
     return self.urlcrawlstatus_
class UrlHistoryTag_FetchData(ProtocolBuffer.ProtocolMessage):
  def __init__(self, contents=None):
    self.timestamp_ = 0
    self.lastmodified_ = 0
    self.pagesize_ = 0
    self.timetofetch_ = 0
    self.contentchecksum_ = 0
    self.linkchecksum_ = 0
    self.newlinks_ = 0
    self.deprecated_pagerank_ = 0
    self.deprecated_sourcetag_ = 0
    self.docid_ = 0
    self.deprecated_segment_ = 0
    self.deprecated_fetchtype_ = 0
    self.documentarchived_ = 0
    self.urlcrawlstatus_ = None
    self.contentdups_ = None
    self.canonicalfp_ = 0
    self.has_timestamp_ = 0
    self.has_lastmodified_ = 0
    self.has_pagesize_ = 0
    self.has_timetofetch_ = 0
    self.has_contentchecksum_ = 0
    self.has_linkchecksum_ = 0
    self.has_newlinks_ = 0
    self.has_deprecated_pagerank_ = 0
    self.has_deprecated_sourcetag_ = 0
    self.has_docid_ = 0
    self.has_deprecated_segment_ = 0
    self.has_deprecated_fetchtype_ = 0
    self.has_documentarchived_ = 0
    self.has_urlcrawlstatus_ = 0
    self.has_contentdups_ = 0
    self.has_canonicalfp_ = 0
    self.lazy_init_lock_ = thread.allocate_lock()
    if contents is not None: self.MergeFromString(contents)

  def timestamp(self): return self.timestamp_

  def set_timestamp(self, x):
    self.has_timestamp_ = 1
    self.timestamp_ = x

  def clear_timestamp(self):
    self.has_timestamp_ = 0
    self.timestamp_ = 0

  def has_timestamp(self): return self.has_timestamp_

  def lastmodified(self): return self.lastmodified_

  def set_lastmodified(self, x):
    self.has_lastmodified_ = 1
    self.lastmodified_ = x

  def clear_lastmodified(self):
    self.has_lastmodified_ = 0
    self.lastmodified_ = 0

  def has_lastmodified(self): return self.has_lastmodified_

  def pagesize(self): return self.pagesize_

  def set_pagesize(self, x):
    self.has_pagesize_ = 1
    self.pagesize_ = x

  def clear_pagesize(self):
    self.has_pagesize_ = 0
    self.pagesize_ = 0

  def has_pagesize(self): return self.has_pagesize_

  def timetofetch(self): return self.timetofetch_

  def set_timetofetch(self, x):
    self.has_timetofetch_ = 1
    self.timetofetch_ = x

  def clear_timetofetch(self):
    self.has_timetofetch_ = 0
    self.timetofetch_ = 0

  def has_timetofetch(self): return self.has_timetofetch_

  def contentchecksum(self): return self.contentchecksum_

  def set_contentchecksum(self, x):
    self.has_contentchecksum_ = 1
    self.contentchecksum_ = x

  def clear_contentchecksum(self):
    self.has_contentchecksum_ = 0
    self.contentchecksum_ = 0

  def has_contentchecksum(self): return self.has_contentchecksum_

  def linkchecksum(self): return self.linkchecksum_

  def set_linkchecksum(self, x):
    self.has_linkchecksum_ = 1
    self.linkchecksum_ = x

  def clear_linkchecksum(self):
    self.has_linkchecksum_ = 0
    self.linkchecksum_ = 0

  def has_linkchecksum(self): return self.has_linkchecksum_

  def newlinks(self): return self.newlinks_

  def set_newlinks(self, x):
    self.has_newlinks_ = 1
    self.newlinks_ = x

  def clear_newlinks(self):
    self.has_newlinks_ = 0
    self.newlinks_ = 0

  def has_newlinks(self): return self.has_newlinks_

  def deprecated_pagerank(self): return self.deprecated_pagerank_

  def set_deprecated_pagerank(self, x):
    self.has_deprecated_pagerank_ = 1
    self.deprecated_pagerank_ = x

  def clear_deprecated_pagerank(self):
    self.has_deprecated_pagerank_ = 0
    self.deprecated_pagerank_ = 0

  def has_deprecated_pagerank(self): return self.has_deprecated_pagerank_

  def deprecated_sourcetag(self): return self.deprecated_sourcetag_

  def set_deprecated_sourcetag(self, x):
    self.has_deprecated_sourcetag_ = 1
    self.deprecated_sourcetag_ = x

  def clear_deprecated_sourcetag(self):
    self.has_deprecated_sourcetag_ = 0
    self.deprecated_sourcetag_ = 0

  def has_deprecated_sourcetag(self): return self.has_deprecated_sourcetag_

  def docid(self): return self.docid_

  def set_docid(self, x):
    self.has_docid_ = 1
    self.docid_ = x

  def clear_docid(self):
    self.has_docid_ = 0
    self.docid_ = 0

  def has_docid(self): return self.has_docid_

  def deprecated_segment(self): return self.deprecated_segment_

  def set_deprecated_segment(self, x):
    self.has_deprecated_segment_ = 1
    self.deprecated_segment_ = x

  def clear_deprecated_segment(self):
    self.has_deprecated_segment_ = 0
    self.deprecated_segment_ = 0

  def has_deprecated_segment(self): return self.has_deprecated_segment_

  def deprecated_fetchtype(self): return self.deprecated_fetchtype_

  def set_deprecated_fetchtype(self, x):
    self.has_deprecated_fetchtype_ = 1
    self.deprecated_fetchtype_ = x

  def clear_deprecated_fetchtype(self):
    self.has_deprecated_fetchtype_ = 0
    self.deprecated_fetchtype_ = 0

  def has_deprecated_fetchtype(self): return self.has_deprecated_fetchtype_

  def documentarchived(self): return self.documentarchived_

  def set_documentarchived(self, x):
    self.has_documentarchived_ = 1
    self.documentarchived_ = x

  def clear_documentarchived(self):
    self.has_documentarchived_ = 0
    self.documentarchived_ = 0

  def has_documentarchived(self): return self.has_documentarchived_

  def urlcrawlstatus(self):
    if self.urlcrawlstatus_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.urlcrawlstatus_ is None: self.urlcrawlstatus_ = URLCrawlStatusTag()
      finally:
        self.lazy_init_lock_.release()
    return self.urlcrawlstatus_

  def mutable_urlcrawlstatus(self): self.has_urlcrawlstatus_ = 1; return self.urlcrawlstatus()

  def clear_urlcrawlstatus(self):
    #Warning: this method does not acquire the lock.
    self.has_urlcrawlstatus_ = 0;
    if self.urlcrawlstatus_ is not None: self.urlcrawlstatus_.Clear()

  def has_urlcrawlstatus(self): return self.has_urlcrawlstatus_

  def contentdups(self):
    if self.contentdups_ is None:
      self.lazy_init_lock_.acquire()
      try:
        if self.contentdups_ is None: self.contentdups_ = UrlHistoryTag_FetchDataContentdups()
      finally:
        self.lazy_init_lock_.release()
    return self.contentdups_

  def mutable_contentdups(self): self.has_contentdups_ = 1; return self.contentdups()

  def clear_contentdups(self):
    #Warning: this method does not acquire the lock.
    self.has_contentdups_ = 0;
    if self.contentdups_ is not None: self.contentdups_.Clear()

  def has_contentdups(self): return self.has_contentdups_

  def canonicalfp(self): return self.canonicalfp_

  def set_canonicalfp(self, x):
    self.has_canonicalfp_ = 1
    self.canonicalfp_ = x

  def clear_canonicalfp(self):
    self.has_canonicalfp_ = 0
    self.canonicalfp_ = 0

  def has_canonicalfp(self): return self.has_canonicalfp_


  def MergeFrom(self, x):
    assert x is not self
    if (x.has_timestamp()): self.set_timestamp(x.timestamp())
    if (x.has_lastmodified()): self.set_lastmodified(x.lastmodified())
    if (x.has_pagesize()): self.set_pagesize(x.pagesize())
    if (x.has_timetofetch()): self.set_timetofetch(x.timetofetch())
    if (x.has_contentchecksum()): self.set_contentchecksum(x.contentchecksum())
    if (x.has_linkchecksum()): self.set_linkchecksum(x.linkchecksum())
    if (x.has_newlinks()): self.set_newlinks(x.newlinks())
    if (x.has_deprecated_pagerank()): self.set_deprecated_pagerank(x.deprecated_pagerank())
    if (x.has_deprecated_sourcetag()): self.set_deprecated_sourcetag(x.deprecated_sourcetag())
    if (x.has_docid()): self.set_docid(x.docid())
    if (x.has_deprecated_segment()): self.set_deprecated_segment(x.deprecated_segment())
    if (x.has_deprecated_fetchtype()): self.set_deprecated_fetchtype(x.deprecated_fetchtype())
    if (x.has_documentarchived()): self.set_documentarchived(x.documentarchived())
    if (x.has_urlcrawlstatus()): self.mutable_urlcrawlstatus().MergeFrom(x.urlcrawlstatus())
    if (x.has_contentdups()): self.mutable_contentdups().MergeFrom(x.contentdups())
    if (x.has_canonicalfp()): self.set_canonicalfp(x.canonicalfp())

  def _CMergeFromString(self, s):
    _net_proto___parse__python.MergeFromString(self, 'UrlHistoryTag', s)

  def _CEncode(self):
    return _net_proto___parse__python.Encode(self, 'UrlHistoryTag')

  def _CToASCII(self, output_format):
    return _net_proto___parse__python.ToASCII(self, 'UrlHistoryTag', output_format)


  def ParseASCII(self, s):
    _net_proto___parse__python.ParseASCII(self, 'UrlHistoryTag', s)


  def ParseASCIIIgnoreUnknown(self, s):
    _net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'UrlHistoryTag', s)


  def Equals(self, x):
    if x is self: return 1
    if self.has_timestamp_ != x.has_timestamp_: return 0
    if self.has_timestamp_ and self.timestamp_ != x.timestamp_: return 0
    if self.has_lastmodified_ != x.has_lastmodified_: return 0
    if self.has_lastmodified_ and self.lastmodified_ != x.lastmodified_: return 0
    if self.has_pagesize_ != x.has_pagesize_: return 0
    if self.has_pagesize_ and self.pagesize_ != x.pagesize_: return 0
    if self.has_timetofetch_ != x.has_timetofetch_: return 0
    if self.has_timetofetch_ and self.timetofetch_ != x.timetofetch_: return 0
    if self.has_contentchecksum_ != x.has_contentchecksum_: return 0
    if self.has_contentchecksum_ and self.contentchecksum_ != x.contentchecksum_: return 0
    if self.has_linkchecksum_ != x.has_linkchecksum_: return 0
    if self.has_linkchecksum_ and self.linkchecksum_ != x.linkchecksum_: return 0
    if self.has_newlinks_ != x.has_newlinks_: return 0
    if self.has_newlinks_ and self.newlinks_ != x.newlinks_: return 0
    if self.has_deprecated_pagerank_ != x.has_deprecated_pagerank_: return 0
    if self.has_deprecated_pagerank_ and self.deprecated_pagerank_ != x.deprecated_pagerank_: return 0
    if self.has_deprecated_sourcetag_ != x.has_deprecated_sourcetag_: return 0
    if self.has_deprecated_sourcetag_ and self.deprecated_sourcetag_ != x.deprecated_sourcetag_: return 0
    if self.has_docid_ != x.has_docid_: return 0
    if self.has_docid_ and self.docid_ != x.docid_: return 0
    if self.has_deprecated_segment_ != x.has_deprecated_segment_: return 0
    if self.has_deprecated_segment_ and self.deprecated_segment_ != x.deprecated_segment_: return 0
    if self.has_deprecated_fetchtype_ != x.has_deprecated_fetchtype_: return 0
    if self.has_deprecated_fetchtype_ and self.deprecated_fetchtype_ != x.deprecated_fetchtype_: return 0
    if self.has_documentarchived_ != x.has_documentarchived_: return 0
    if self.has_documentarchived_ and self.documentarchived_ != x.documentarchived_: return 0
    if self.has_urlcrawlstatus_ != x.has_urlcrawlstatus_: return 0
    if self.has_urlcrawlstatus_ and self.urlcrawlstatus_ != x.urlcrawlstatus_: return 0
    if self.has_contentdups_ != x.has_contentdups_: return 0
    if self.has_contentdups_ and self.contentdups_ != x.contentdups_: return 0
    if self.has_canonicalfp_ != x.has_canonicalfp_: return 0
    if self.has_canonicalfp_ and self.canonicalfp_ != x.canonicalfp_: return 0
    return 1

  def __eq__(self, other):
    return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)

  def __ne__(self, other):
    return not (self == other)

  def IsInitialized(self, debug_strs=None):
    initialized = 1
    if (self.has_urlcrawlstatus_ and not self.urlcrawlstatus_.IsInitialized(debug_strs)): initialized = 0
    if (self.has_contentdups_ and not self.contentdups_.IsInitialized(debug_strs)): initialized = 0
    return initialized

  def ByteSize(self):
    n = 0
    if (self.has_timestamp_): n += 1 + self.lengthVarInt64(self.timestamp_)
    if (self.has_lastmodified_): n += 1 + self.lengthVarInt64(self.lastmodified_)
    if (self.has_pagesize_): n += 1 + self.lengthVarInt64(self.pagesize_)
    if (self.has_timetofetch_): n += 1 + self.lengthVarInt64(self.timetofetch_)
    if (self.has_contentchecksum_): n += 1 + self.lengthVarInt64(self.contentchecksum_)
    if (self.has_linkchecksum_): n += 1 + self.lengthVarInt64(self.linkchecksum_)
    if (self.has_newlinks_): n += 1 + self.lengthVarInt64(self.newlinks_)
    if (self.has_deprecated_pagerank_): n += 1 + self.lengthVarInt64(self.deprecated_pagerank_)
    if (self.has_deprecated_sourcetag_): n += 1 + self.lengthVarInt64(self.deprecated_sourcetag_)
    if (self.has_docid_): n += 1 + self.lengthVarInt64(self.docid_)
    if (self.has_deprecated_segment_): n += 1 + self.lengthVarInt64(self.deprecated_segment_)
    if (self.has_deprecated_fetchtype_): n += 1 + self.lengthVarInt64(self.deprecated_fetchtype_)
    if (self.has_documentarchived_): n += 3
    if (self.has_urlcrawlstatus_): n += 2 + self.lengthString(self.urlcrawlstatus_.ByteSize())
    if (self.has_contentdups_): n += 4 + self.contentdups_.ByteSize()
    if (self.has_canonicalfp_): n += 10
    return n + 0

  def Clear(self):
    self.clear_timestamp()
    self.clear_lastmodified()
    self.clear_pagesize()
    self.clear_timetofetch()
    self.clear_contentchecksum()
    self.clear_linkchecksum()
    self.clear_newlinks()
    self.clear_deprecated_pagerank()
    self.clear_deprecated_sourcetag()
    self.clear_docid()
    self.clear_deprecated_segment()
    self.clear_deprecated_fetchtype()
    self.clear_documentarchived()
    self.clear_urlcrawlstatus()
    self.clear_contentdups()
    self.clear_canonicalfp()

  def OutputUnchecked(self, out):
    if (self.has_timestamp_):
      out.putVarInt32(32)
      out.putVarInt64(self.timestamp_)
    if (self.has_pagesize_):
      out.putVarInt32(40)
      out.putVarInt64(self.pagesize_)
    if (self.has_timetofetch_):
      out.putVarInt32(48)
      out.putVarInt64(self.timetofetch_)
    if (self.has_contentchecksum_):
      out.putVarInt32(56)
      out.putVarInt64(self.contentchecksum_)
    if (self.has_linkchecksum_):
      out.putVarInt32(64)
      out.putVarInt64(self.linkchecksum_)
    if (self.has_newlinks_):
      out.putVarInt32(72)
      out.putVarInt64(self.newlinks_)
    if (self.has_deprecated_pagerank_):
      out.putVarInt32(80)
      out.putVarInt64(self.deprecated_pagerank_)
    if (self.has_deprecated_sourcetag_):
      out.putVarInt32(88)
      out.putVarInt64(self.deprecated_sourcetag_)
    if (self.has_lastmodified_):
      out.putVarInt32(96)
      out.putVarInt64(self.lastmodified_)
    if (self.has_docid_):
      out.putVarInt32(104)
      out.putVarInt64(self.docid_)
    if (self.has_deprecated_segment_):
      out.putVarInt32(112)
      out.putVarInt64(self.deprecated_segment_)
    if (self.has_deprecated_fetchtype_):
      out.putVarInt32(120)
      out.putVarInt64(self.deprecated_fetchtype_)
    if (self.has_documentarchived_):
      out.putVarInt32(256)
      out.putBoolean(self.documentarchived_)
    if (self.has_urlcrawlstatus_):
      out.putVarInt32(274)
      out.putVarInt32(self.urlcrawlstatus_.ByteSize())
      self.urlcrawlstatus_.OutputUnchecked(out)
    if (self.has_contentdups_):
      out.putVarInt32(283)
      self.contentdups_.OutputUnchecked(out)
      out.putVarInt32(284)
    if (self.has_canonicalfp_):
      out.putVarInt32(297)
      out.put64(self.canonicalfp_)

  def TryMerge(self, d):
    while 1:
      tt = d.getVarInt32()
      if tt == 28: break
      if tt == 32:
        self.set_timestamp(d.getVarInt64())
        continue
      if tt == 40:
        self.set_pagesize(d.getVarInt64())
        continue
      if tt == 48:
        self.set_timetofetch(d.getVarInt64())
        continue
      if tt == 56:
        self.set_contentchecksum(d.getVarInt64())
        continue
      if tt == 64:
        self.set_linkchecksum(d.getVarInt64())
        continue
      if tt == 72:
        self.set_newlinks(d.getVarInt64())
        continue
      if tt == 80:
        self.set_deprecated_pagerank(d.getVarInt64())
        continue
      if tt == 88:
        self.set_deprecated_sourcetag(d.getVarInt64())
        continue
      if tt == 96:
        self.set_lastmodified(d.getVarInt64())
        continue
      if tt == 104:
        self.set_docid(d.getVarInt64())
        continue
      if tt == 112:
        self.set_deprecated_segment(d.getVarInt64())
        continue
      if tt == 120:
        self.set_deprecated_fetchtype(d.getVarInt64())
        continue
      if tt == 256:
        self.set_documentarchived(d.getBoolean())
        continue
      if tt == 274:
        length = d.getVarInt32()
        tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
        d.skip(length)
        self.mutable_urlcrawlstatus().TryMerge(tmp)
        continue
      if tt == 283:
        self.mutable_contentdups().TryMerge(d)
        continue
      if tt == 297:
        self.set_canonicalfp(d.get64())
        continue
      # tag 0 is special: it's used to indicate an error.
      # so if we see it we raise an exception.
      if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
      d.skipData(tt)


  def __str__(self, prefix="", printElemNumber=0):
    res=""
    if self.has_timestamp_: res+=prefix+("Timestamp: %s\n" % self.DebugFormatInt64(self.timestamp_))
    if self.has_lastmodified_: res+=prefix+("LastModified: %s\n" % self.DebugFormatInt64(self.lastmodified_))
    if self.has_pagesize_: res+=prefix+("PageSize: %s\n" % self.DebugFormatInt64(self.pagesize_))
    if self.has_timetofetch_: res+=prefix+("TimeToFetch: %s\n" % self.DebugFormatInt64(self.timetofetch_))
    if self.has_contentchecksum_: res+=prefix+("ContentChecksum: %s\n" % self.DebugFormatInt64(self.contentchecksum_))
    if self.has_linkchecksum_: res+=prefix+("LinkChecksum: %s\n" % self.DebugFormatInt64(self.linkchecksum_))
    if self.has_newlinks_: res+=prefix+("NewLinks: %s\n" % self.DebugFormatInt64(self.newlinks_))
    if self.has_deprecated_pagerank_: res+=prefix+("DEPRECATED_PageRank: %s\n" % self.DebugFormatInt64(self.deprecated_pagerank_))
    if self.has_deprecated_sourcetag_: res+=prefix+("DEPRECATED_SourceTag: %s\n" % self.DebugFormatInt64(self.deprecated_sourcetag_))
    if self.has_docid_: res+=prefix+("DocId: %s\n" % self.DebugFormatInt64(self.docid_))
    if self.has_deprecated_segment_: res+=prefix+("DEPRECATED_Segment: %s\n" % self.DebugFormatInt64(self.deprecated_segment_))
    if self.has_deprecated_fetchtype_: res+=prefix+("DEPRECATED_FetchType: %s\n" % self.DebugFormatInt64(self.deprecated_fetchtype_))
    if self.has_documentarchived_: res+=prefix+("DocumentArchived: %s\n" % self.DebugFormatBool(self.documentarchived_))
    if self.has_urlcrawlstatus_:
      res+=prefix+"UrlCrawlStatus <\n"
      res+=self.urlcrawlstatus_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+">\n"
    if self.has_contentdups_:
      res+=prefix+"Contentdups {\n"
      res+=self.contentdups_.__str__(prefix + "  ", printElemNumber)
      res+=prefix+"}\n"
    if self.has_canonicalfp_: res+=prefix+("CanonicalFp: %s\n" % self.DebugFormatFixed64(self.canonicalfp_))
    return res
Esempio n. 4
0
class UrlHistoryTag_FetchData(ProtocolBuffer.ProtocolMessage):
    def __init__(self, contents=None):
        self.timestamp_ = 0
        self.lastmodified_ = 0
        self.pagesize_ = 0
        self.timetofetch_ = 0
        self.contentchecksum_ = 0
        self.linkchecksum_ = 0
        self.newlinks_ = 0
        self.deprecated_pagerank_ = 0
        self.deprecated_sourcetag_ = 0
        self.docid_ = 0
        self.deprecated_segment_ = 0
        self.deprecated_fetchtype_ = 0
        self.documentarchived_ = 0
        self.urlcrawlstatus_ = None
        self.contentdups_ = None
        self.canonicalfp_ = 0
        self.has_timestamp_ = 0
        self.has_lastmodified_ = 0
        self.has_pagesize_ = 0
        self.has_timetofetch_ = 0
        self.has_contentchecksum_ = 0
        self.has_linkchecksum_ = 0
        self.has_newlinks_ = 0
        self.has_deprecated_pagerank_ = 0
        self.has_deprecated_sourcetag_ = 0
        self.has_docid_ = 0
        self.has_deprecated_segment_ = 0
        self.has_deprecated_fetchtype_ = 0
        self.has_documentarchived_ = 0
        self.has_urlcrawlstatus_ = 0
        self.has_contentdups_ = 0
        self.has_canonicalfp_ = 0
        self.lazy_init_lock_ = thread.allocate_lock()
        if contents is not None:
            self.MergeFromString(contents)

    def timestamp(self):
        return self.timestamp_

    def set_timestamp(self, x):
        self.has_timestamp_ = 1
        self.timestamp_ = x

    def clear_timestamp(self):
        self.has_timestamp_ = 0
        self.timestamp_ = 0

    def has_timestamp(self):
        return self.has_timestamp_

    def lastmodified(self):
        return self.lastmodified_

    def set_lastmodified(self, x):
        self.has_lastmodified_ = 1
        self.lastmodified_ = x

    def clear_lastmodified(self):
        self.has_lastmodified_ = 0
        self.lastmodified_ = 0

    def has_lastmodified(self):
        return self.has_lastmodified_

    def pagesize(self):
        return self.pagesize_

    def set_pagesize(self, x):
        self.has_pagesize_ = 1
        self.pagesize_ = x

    def clear_pagesize(self):
        self.has_pagesize_ = 0
        self.pagesize_ = 0

    def has_pagesize(self):
        return self.has_pagesize_

    def timetofetch(self):
        return self.timetofetch_

    def set_timetofetch(self, x):
        self.has_timetofetch_ = 1
        self.timetofetch_ = x

    def clear_timetofetch(self):
        self.has_timetofetch_ = 0
        self.timetofetch_ = 0

    def has_timetofetch(self):
        return self.has_timetofetch_

    def contentchecksum(self):
        return self.contentchecksum_

    def set_contentchecksum(self, x):
        self.has_contentchecksum_ = 1
        self.contentchecksum_ = x

    def clear_contentchecksum(self):
        self.has_contentchecksum_ = 0
        self.contentchecksum_ = 0

    def has_contentchecksum(self):
        return self.has_contentchecksum_

    def linkchecksum(self):
        return self.linkchecksum_

    def set_linkchecksum(self, x):
        self.has_linkchecksum_ = 1
        self.linkchecksum_ = x

    def clear_linkchecksum(self):
        self.has_linkchecksum_ = 0
        self.linkchecksum_ = 0

    def has_linkchecksum(self):
        return self.has_linkchecksum_

    def newlinks(self):
        return self.newlinks_

    def set_newlinks(self, x):
        self.has_newlinks_ = 1
        self.newlinks_ = x

    def clear_newlinks(self):
        self.has_newlinks_ = 0
        self.newlinks_ = 0

    def has_newlinks(self):
        return self.has_newlinks_

    def deprecated_pagerank(self):
        return self.deprecated_pagerank_

    def set_deprecated_pagerank(self, x):
        self.has_deprecated_pagerank_ = 1
        self.deprecated_pagerank_ = x

    def clear_deprecated_pagerank(self):
        self.has_deprecated_pagerank_ = 0
        self.deprecated_pagerank_ = 0

    def has_deprecated_pagerank(self):
        return self.has_deprecated_pagerank_

    def deprecated_sourcetag(self):
        return self.deprecated_sourcetag_

    def set_deprecated_sourcetag(self, x):
        self.has_deprecated_sourcetag_ = 1
        self.deprecated_sourcetag_ = x

    def clear_deprecated_sourcetag(self):
        self.has_deprecated_sourcetag_ = 0
        self.deprecated_sourcetag_ = 0

    def has_deprecated_sourcetag(self):
        return self.has_deprecated_sourcetag_

    def docid(self):
        return self.docid_

    def set_docid(self, x):
        self.has_docid_ = 1
        self.docid_ = x

    def clear_docid(self):
        self.has_docid_ = 0
        self.docid_ = 0

    def has_docid(self):
        return self.has_docid_

    def deprecated_segment(self):
        return self.deprecated_segment_

    def set_deprecated_segment(self, x):
        self.has_deprecated_segment_ = 1
        self.deprecated_segment_ = x

    def clear_deprecated_segment(self):
        self.has_deprecated_segment_ = 0
        self.deprecated_segment_ = 0

    def has_deprecated_segment(self):
        return self.has_deprecated_segment_

    def deprecated_fetchtype(self):
        return self.deprecated_fetchtype_

    def set_deprecated_fetchtype(self, x):
        self.has_deprecated_fetchtype_ = 1
        self.deprecated_fetchtype_ = x

    def clear_deprecated_fetchtype(self):
        self.has_deprecated_fetchtype_ = 0
        self.deprecated_fetchtype_ = 0

    def has_deprecated_fetchtype(self):
        return self.has_deprecated_fetchtype_

    def documentarchived(self):
        return self.documentarchived_

    def set_documentarchived(self, x):
        self.has_documentarchived_ = 1
        self.documentarchived_ = x

    def clear_documentarchived(self):
        self.has_documentarchived_ = 0
        self.documentarchived_ = 0

    def has_documentarchived(self):
        return self.has_documentarchived_

    def urlcrawlstatus(self):
        if self.urlcrawlstatus_ is None:
            self.lazy_init_lock_.acquire()
            try:
                if self.urlcrawlstatus_ is None:
                    self.urlcrawlstatus_ = URLCrawlStatusTag()
            finally:
                self.lazy_init_lock_.release()
        return self.urlcrawlstatus_

    def mutable_urlcrawlstatus(self):
        self.has_urlcrawlstatus_ = 1
        return self.urlcrawlstatus()

    def clear_urlcrawlstatus(self):
        # Warning: this method does not acquire the lock.
        self.has_urlcrawlstatus_ = 0
        if self.urlcrawlstatus_ is not None:
            self.urlcrawlstatus_.Clear()

    def has_urlcrawlstatus(self):
        return self.has_urlcrawlstatus_

    def contentdups(self):
        if self.contentdups_ is None:
            self.lazy_init_lock_.acquire()
            try:
                if self.contentdups_ is None:
                    self.contentdups_ = UrlHistoryTag_FetchDataContentdups()
            finally:
                self.lazy_init_lock_.release()
        return self.contentdups_

    def mutable_contentdups(self):
        self.has_contentdups_ = 1
        return self.contentdups()

    def clear_contentdups(self):
        # Warning: this method does not acquire the lock.
        self.has_contentdups_ = 0
        if self.contentdups_ is not None:
            self.contentdups_.Clear()

    def has_contentdups(self):
        return self.has_contentdups_

    def canonicalfp(self):
        return self.canonicalfp_

    def set_canonicalfp(self, x):
        self.has_canonicalfp_ = 1
        self.canonicalfp_ = x

    def clear_canonicalfp(self):
        self.has_canonicalfp_ = 0
        self.canonicalfp_ = 0

    def has_canonicalfp(self):
        return self.has_canonicalfp_

    def MergeFrom(self, x):
        assert x is not self
        if x.has_timestamp():
            self.set_timestamp(x.timestamp())
        if x.has_lastmodified():
            self.set_lastmodified(x.lastmodified())
        if x.has_pagesize():
            self.set_pagesize(x.pagesize())
        if x.has_timetofetch():
            self.set_timetofetch(x.timetofetch())
        if x.has_contentchecksum():
            self.set_contentchecksum(x.contentchecksum())
        if x.has_linkchecksum():
            self.set_linkchecksum(x.linkchecksum())
        if x.has_newlinks():
            self.set_newlinks(x.newlinks())
        if x.has_deprecated_pagerank():
            self.set_deprecated_pagerank(x.deprecated_pagerank())
        if x.has_deprecated_sourcetag():
            self.set_deprecated_sourcetag(x.deprecated_sourcetag())
        if x.has_docid():
            self.set_docid(x.docid())
        if x.has_deprecated_segment():
            self.set_deprecated_segment(x.deprecated_segment())
        if x.has_deprecated_fetchtype():
            self.set_deprecated_fetchtype(x.deprecated_fetchtype())
        if x.has_documentarchived():
            self.set_documentarchived(x.documentarchived())
        if x.has_urlcrawlstatus():
            self.mutable_urlcrawlstatus().MergeFrom(x.urlcrawlstatus())
        if x.has_contentdups():
            self.mutable_contentdups().MergeFrom(x.contentdups())
        if x.has_canonicalfp():
            self.set_canonicalfp(x.canonicalfp())

    def _CMergeFromString(self, s):
        _net_proto___parse__python.MergeFromString(self, "UrlHistoryTag", s)

    def _CEncode(self):
        return _net_proto___parse__python.Encode(self, "UrlHistoryTag")

    def _CToASCII(self, output_format):
        return _net_proto___parse__python.ToASCII(self, "UrlHistoryTag", output_format)

    def ParseASCII(self, s):
        _net_proto___parse__python.ParseASCII(self, "UrlHistoryTag", s)

    def ParseASCIIIgnoreUnknown(self, s):
        _net_proto___parse__python.ParseASCIIIgnoreUnknown(self, "UrlHistoryTag", s)

    def Equals(self, x):
        if x is self:
            return 1
        if self.has_timestamp_ != x.has_timestamp_:
            return 0
        if self.has_timestamp_ and self.timestamp_ != x.timestamp_:
            return 0
        if self.has_lastmodified_ != x.has_lastmodified_:
            return 0
        if self.has_lastmodified_ and self.lastmodified_ != x.lastmodified_:
            return 0
        if self.has_pagesize_ != x.has_pagesize_:
            return 0
        if self.has_pagesize_ and self.pagesize_ != x.pagesize_:
            return 0
        if self.has_timetofetch_ != x.has_timetofetch_:
            return 0
        if self.has_timetofetch_ and self.timetofetch_ != x.timetofetch_:
            return 0
        if self.has_contentchecksum_ != x.has_contentchecksum_:
            return 0
        if self.has_contentchecksum_ and self.contentchecksum_ != x.contentchecksum_:
            return 0
        if self.has_linkchecksum_ != x.has_linkchecksum_:
            return 0
        if self.has_linkchecksum_ and self.linkchecksum_ != x.linkchecksum_:
            return 0
        if self.has_newlinks_ != x.has_newlinks_:
            return 0
        if self.has_newlinks_ and self.newlinks_ != x.newlinks_:
            return 0
        if self.has_deprecated_pagerank_ != x.has_deprecated_pagerank_:
            return 0
        if self.has_deprecated_pagerank_ and self.deprecated_pagerank_ != x.deprecated_pagerank_:
            return 0
        if self.has_deprecated_sourcetag_ != x.has_deprecated_sourcetag_:
            return 0
        if self.has_deprecated_sourcetag_ and self.deprecated_sourcetag_ != x.deprecated_sourcetag_:
            return 0
        if self.has_docid_ != x.has_docid_:
            return 0
        if self.has_docid_ and self.docid_ != x.docid_:
            return 0
        if self.has_deprecated_segment_ != x.has_deprecated_segment_:
            return 0
        if self.has_deprecated_segment_ and self.deprecated_segment_ != x.deprecated_segment_:
            return 0
        if self.has_deprecated_fetchtype_ != x.has_deprecated_fetchtype_:
            return 0
        if self.has_deprecated_fetchtype_ and self.deprecated_fetchtype_ != x.deprecated_fetchtype_:
            return 0
        if self.has_documentarchived_ != x.has_documentarchived_:
            return 0
        if self.has_documentarchived_ and self.documentarchived_ != x.documentarchived_:
            return 0
        if self.has_urlcrawlstatus_ != x.has_urlcrawlstatus_:
            return 0
        if self.has_urlcrawlstatus_ and self.urlcrawlstatus_ != x.urlcrawlstatus_:
            return 0
        if self.has_contentdups_ != x.has_contentdups_:
            return 0
        if self.has_contentdups_ and self.contentdups_ != x.contentdups_:
            return 0
        if self.has_canonicalfp_ != x.has_canonicalfp_:
            return 0
        if self.has_canonicalfp_ and self.canonicalfp_ != x.canonicalfp_:
            return 0
        return 1

    def __eq__(self, other):
        return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)

    def __ne__(self, other):
        return not (self == other)

    def IsInitialized(self, debug_strs=None):
        initialized = 1
        if self.has_urlcrawlstatus_ and not self.urlcrawlstatus_.IsInitialized(debug_strs):
            initialized = 0
        if self.has_contentdups_ and not self.contentdups_.IsInitialized(debug_strs):
            initialized = 0
        return initialized

    def ByteSize(self):
        n = 0
        if self.has_timestamp_:
            n += 1 + self.lengthVarInt64(self.timestamp_)
        if self.has_lastmodified_:
            n += 1 + self.lengthVarInt64(self.lastmodified_)
        if self.has_pagesize_:
            n += 1 + self.lengthVarInt64(self.pagesize_)
        if self.has_timetofetch_:
            n += 1 + self.lengthVarInt64(self.timetofetch_)
        if self.has_contentchecksum_:
            n += 1 + self.lengthVarInt64(self.contentchecksum_)
        if self.has_linkchecksum_:
            n += 1 + self.lengthVarInt64(self.linkchecksum_)
        if self.has_newlinks_:
            n += 1 + self.lengthVarInt64(self.newlinks_)
        if self.has_deprecated_pagerank_:
            n += 1 + self.lengthVarInt64(self.deprecated_pagerank_)
        if self.has_deprecated_sourcetag_:
            n += 1 + self.lengthVarInt64(self.deprecated_sourcetag_)
        if self.has_docid_:
            n += 1 + self.lengthVarInt64(self.docid_)
        if self.has_deprecated_segment_:
            n += 1 + self.lengthVarInt64(self.deprecated_segment_)
        if self.has_deprecated_fetchtype_:
            n += 1 + self.lengthVarInt64(self.deprecated_fetchtype_)
        if self.has_documentarchived_:
            n += 3
        if self.has_urlcrawlstatus_:
            n += 2 + self.lengthString(self.urlcrawlstatus_.ByteSize())
        if self.has_contentdups_:
            n += 4 + self.contentdups_.ByteSize()
        if self.has_canonicalfp_:
            n += 10
        return n + 0

    def Clear(self):
        self.clear_timestamp()
        self.clear_lastmodified()
        self.clear_pagesize()
        self.clear_timetofetch()
        self.clear_contentchecksum()
        self.clear_linkchecksum()
        self.clear_newlinks()
        self.clear_deprecated_pagerank()
        self.clear_deprecated_sourcetag()
        self.clear_docid()
        self.clear_deprecated_segment()
        self.clear_deprecated_fetchtype()
        self.clear_documentarchived()
        self.clear_urlcrawlstatus()
        self.clear_contentdups()
        self.clear_canonicalfp()

    def OutputUnchecked(self, out):
        if self.has_timestamp_:
            out.putVarInt32(32)
            out.putVarInt64(self.timestamp_)
        if self.has_pagesize_:
            out.putVarInt32(40)
            out.putVarInt64(self.pagesize_)
        if self.has_timetofetch_:
            out.putVarInt32(48)
            out.putVarInt64(self.timetofetch_)
        if self.has_contentchecksum_:
            out.putVarInt32(56)
            out.putVarInt64(self.contentchecksum_)
        if self.has_linkchecksum_:
            out.putVarInt32(64)
            out.putVarInt64(self.linkchecksum_)
        if self.has_newlinks_:
            out.putVarInt32(72)
            out.putVarInt64(self.newlinks_)
        if self.has_deprecated_pagerank_:
            out.putVarInt32(80)
            out.putVarInt64(self.deprecated_pagerank_)
        if self.has_deprecated_sourcetag_:
            out.putVarInt32(88)
            out.putVarInt64(self.deprecated_sourcetag_)
        if self.has_lastmodified_:
            out.putVarInt32(96)
            out.putVarInt64(self.lastmodified_)
        if self.has_docid_:
            out.putVarInt32(104)
            out.putVarInt64(self.docid_)
        if self.has_deprecated_segment_:
            out.putVarInt32(112)
            out.putVarInt64(self.deprecated_segment_)
        if self.has_deprecated_fetchtype_:
            out.putVarInt32(120)
            out.putVarInt64(self.deprecated_fetchtype_)
        if self.has_documentarchived_:
            out.putVarInt32(256)
            out.putBoolean(self.documentarchived_)
        if self.has_urlcrawlstatus_:
            out.putVarInt32(274)
            out.putVarInt32(self.urlcrawlstatus_.ByteSize())
            self.urlcrawlstatus_.OutputUnchecked(out)
        if self.has_contentdups_:
            out.putVarInt32(283)
            self.contentdups_.OutputUnchecked(out)
            out.putVarInt32(284)
        if self.has_canonicalfp_:
            out.putVarInt32(297)
            out.put64(self.canonicalfp_)

    def TryMerge(self, d):
        while 1:
            tt = d.getVarInt32()
            if tt == 28:
                break
            if tt == 32:
                self.set_timestamp(d.getVarInt64())
                continue
            if tt == 40:
                self.set_pagesize(d.getVarInt64())
                continue
            if tt == 48:
                self.set_timetofetch(d.getVarInt64())
                continue
            if tt == 56:
                self.set_contentchecksum(d.getVarInt64())
                continue
            if tt == 64:
                self.set_linkchecksum(d.getVarInt64())
                continue
            if tt == 72:
                self.set_newlinks(d.getVarInt64())
                continue
            if tt == 80:
                self.set_deprecated_pagerank(d.getVarInt64())
                continue
            if tt == 88:
                self.set_deprecated_sourcetag(d.getVarInt64())
                continue
            if tt == 96:
                self.set_lastmodified(d.getVarInt64())
                continue
            if tt == 104:
                self.set_docid(d.getVarInt64())
                continue
            if tt == 112:
                self.set_deprecated_segment(d.getVarInt64())
                continue
            if tt == 120:
                self.set_deprecated_fetchtype(d.getVarInt64())
                continue
            if tt == 256:
                self.set_documentarchived(d.getBoolean())
                continue
            if tt == 274:
                length = d.getVarInt32()
                tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
                d.skip(length)
                self.mutable_urlcrawlstatus().TryMerge(tmp)
                continue
            if tt == 283:
                self.mutable_contentdups().TryMerge(d)
                continue
            if tt == 297:
                self.set_canonicalfp(d.get64())
                continue
            # tag 0 is special: it's used to indicate an error.
            # so if we see it we raise an exception.
            if tt == 0:
                raise ProtocolBuffer.ProtocolBufferDecodeError
            d.skipData(tt)

    def __str__(self, prefix="", printElemNumber=0):
        res = ""
        if self.has_timestamp_:
            res += prefix + ("Timestamp: %s\n" % self.DebugFormatInt64(self.timestamp_))
        if self.has_lastmodified_:
            res += prefix + ("LastModified: %s\n" % self.DebugFormatInt64(self.lastmodified_))
        if self.has_pagesize_:
            res += prefix + ("PageSize: %s\n" % self.DebugFormatInt64(self.pagesize_))
        if self.has_timetofetch_:
            res += prefix + ("TimeToFetch: %s\n" % self.DebugFormatInt64(self.timetofetch_))
        if self.has_contentchecksum_:
            res += prefix + ("ContentChecksum: %s\n" % self.DebugFormatInt64(self.contentchecksum_))
        if self.has_linkchecksum_:
            res += prefix + ("LinkChecksum: %s\n" % self.DebugFormatInt64(self.linkchecksum_))
        if self.has_newlinks_:
            res += prefix + ("NewLinks: %s\n" % self.DebugFormatInt64(self.newlinks_))
        if self.has_deprecated_pagerank_:
            res += prefix + ("DEPRECATED_PageRank: %s\n" % self.DebugFormatInt64(self.deprecated_pagerank_))
        if self.has_deprecated_sourcetag_:
            res += prefix + ("DEPRECATED_SourceTag: %s\n" % self.DebugFormatInt64(self.deprecated_sourcetag_))
        if self.has_docid_:
            res += prefix + ("DocId: %s\n" % self.DebugFormatInt64(self.docid_))
        if self.has_deprecated_segment_:
            res += prefix + ("DEPRECATED_Segment: %s\n" % self.DebugFormatInt64(self.deprecated_segment_))
        if self.has_deprecated_fetchtype_:
            res += prefix + ("DEPRECATED_FetchType: %s\n" % self.DebugFormatInt64(self.deprecated_fetchtype_))
        if self.has_documentarchived_:
            res += prefix + ("DocumentArchived: %s\n" % self.DebugFormatBool(self.documentarchived_))
        if self.has_urlcrawlstatus_:
            res += prefix + "UrlCrawlStatus <\n"
            res += self.urlcrawlstatus_.__str__(prefix + "  ", printElemNumber)
            res += prefix + ">\n"
        if self.has_contentdups_:
            res += prefix + "Contentdups {\n"
            res += self.contentdups_.__str__(prefix + "  ", printElemNumber)
            res += prefix + "}\n"
        if self.has_canonicalfp_:
            res += prefix + ("CanonicalFp: %s\n" % self.DebugFormatFixed64(self.canonicalfp_))
        return res