def test_summary(self): """ :func:`otter.indexer.atom.summary` finds "compute.instance.update" as the summary of the first entry in the sample simple atom feed """ self.assertEqual( summary(self.simple_entry), "compute.instance.update")
def extract_clb_drained_at(feed): """ Extract time when node was changed to DRAINING from a CLB atom feed. Will return node's creation time if node was created with DRAINING. Return None if couldnt find for any reason. :param list feed: ``list`` of atom entry :class:`Elements` :returns: drained_at EPOCH in seconds :rtype: float """ for entry in feed: if _DRAINING_RE.match(atom.summary(entry)): return timestamp_to_epoch(atom.updated(entry)) return None
def test_no_link(self, rel): """ Returns entries collected till now if there is no rel link """ feedstr = ( '<feed xmlns="http://www.w3.org/2005/Atom">' '<entry><summary>summary</summary></entry></feed>') seq = [ (self.svc_intent({"a": "b"}), const(stub_json_response(feedstr))) ] entries, params = perform_sequence( seq, cf.read_entries( self.service_type, self.url, {"a": "b"}, self.directions[rel])) self.assertEqual(atom.summary(entries[0]), "summary") self.assertEqual(params, {"a": "b"})
def extract_CLB_drained_at(feed): """ Extract time when node was changed to DRAINING from a CLB atom feed. :param str feed: Atom feed of the node :returns: EPOCH in seconds :rtype: float """ # TODO: This function temporarily only looks at last entry assuming that # it was draining operation. May need to look at all entries in reverse # order and check for draining operation. This could include paging to # further entries entry = atom.entries(atom.parse(feed))[0] summary = atom.summary(entry) if 'Node successfully updated' in summary and 'DRAINING' in summary: return timestamp_to_epoch(atom.updated(entry)) else: raise ValueError('Unexpected summary: {}'.format(summary))
def test_single_page(self, rel): """ Collects entries and goes to next link if there are entries and returns if next one is empty """ feed1str = self.feed(rel, "https://url?page=2", ["summary1", "summ2"]) feed2str = self.feed(rel, "link", []) seq = [ (self.svc_intent({"a": "b"}), const(stub_json_response(feed1str))), (self.svc_intent({"page": ['2']}), const(stub_json_response(feed2str))) ] entries, params = perform_sequence( seq, cf.read_entries( self.service_type, self.url, {"a": "b"}, self.directions[rel])) self.assertEqual( [atom.summary(entry) for entry in entries], ["summary1", "summ2"]) self.assertEqual(params, {"page": ["2"]})
def test_follow_limit(self, rel): """ Collects entries and keeping following rel link until `follow_limit` is reached. """ feeds = [self.feed(rel, "https://url?page={}".format(i + 1), ["summ{}".format(i + 1)]) for i in range(5)] seq = [ (self.svc_intent(), const(stub_json_response(feeds[0]))), (self.svc_intent({"page": ['1']}), const(stub_json_response(feeds[1]))), (self.svc_intent({"page": ['2']}), const(stub_json_response(feeds[2]))), ] entries, params = perform_sequence( seq, cf.read_entries( self.service_type, self.url, {}, self.directions[rel], 3)) self.assertEqual( [atom.summary(entry) for entry in entries], ["summ1", "summ2", "summ3"]) self.assertEqual(params, {"page": ["3"]})
def test_multiple_pages(self, rel): """ Collects entries and goes to next link if there are entries and continues until next link returns empty list """ feed1_str = self.feed(rel, "https://url?page=2", ["summ1", "summ2"]) feed2_str = self.feed(rel, "https://url?page=3", ["summ3", "summ4"]) feed3_str = self.feed(rel, "link", []) seq = [ (self.svc_intent(), const(stub_json_response(feed1_str))), (self.svc_intent({"page": ['2']}), const(stub_json_response(feed2_str))), (self.svc_intent({"page": ['3']}), const(stub_json_response(feed3_str))), ] entries, params = perform_sequence( seq, cf.read_entries( self.service_type, self.url, {}, self.directions[rel])) self.assertEqual( [atom.summary(entry) for entry in entries], ["summ1", "summ2", "summ3", "summ4"]) self.assertEqual(params, {"page": ["3"]})