def select_ids(connection, query, skip=0, limit=None, include_responses=False): temp, responses = yield _get_query_response(connection, query) total_count = len(temp) if limit is not None: stop = skip + limit else: stop = None name, direction = query.sorting index = first(v.entries for k, v in responses.iteritems() if k.field == name) if direction == Direction.DESC: index = reversed(index) if query.aggregate: # we have to copy the collection, because _get_sorted_slice() # treats it as a buffer, and modifies the content aggregate_index = set(temp) r = Result(_get_sorted_slice(index, temp, skip, stop)) r.total_count = total_count # count reductions for aggregated fields based on the view index if query.aggregate: r.aggregations = list() for handler, field in query.aggregate: value_index = first(v for k, v in responses.iteritems() if k.field == field) r.aggregations.append(handler(x for x in value_iterator(aggregate_index, value_index))) if include_responses: defer.returnValue((r, responses)) else: defer.returnValue(r)
def get_basic_queries(self): if not hasattr(self, "_cached_basic_queries"): temp = list() for part in self.parts: temp.extend(part.get_basic_queries()) # if we want to sort by the field which is not available in # the query we will need to query for the full range of the # index if self.sorting: sortby = self.sorting[0] if not first(x for x in temp if sortby == x.field): temp.append(Condition(sortby, Evaluator.none, None)) # if we want a value of some field included in the result we # need to make sure its also fetched along the query for part in self.include_value + [x[1] for x in self.aggregate]: included = first(x.field for x in temp if x.field == part) if not included: temp.append(Condition(part, Evaluator.none, None)) # remove duplicates self._cached_basic_queries = resp = list() while temp: x = temp.pop(0) if x not in resp: resp.append(x) return self._cached_basic_queries
def prolog(self): setup = text_helper.format_block( """ agency = spawn_agency() host1 = agency.get_host_agent() agency = spawn_agency() host1.start_agent(descriptor_factory('test_exportable_agent')) wait_for_idle() agency = spawn_agency() agency = spawn_agency() host2 = agency.get_host_agent() host2.start_agent(descriptor_factory('export_agent')) wait_for_idle() host2.start_agent(descriptor_factory('migration_agent')) wait_for_idle() """ ) yield self.process(setup) self.export = first(self.driver.iter_agents("export_agent")).get_agent() self.migration = first(self.driver.iter_agents("migration_agent")).get_agent() self.assertEqual(1, self.count_agents("test_exportable_agent")) self.assertEqual(2, self.count_agents("test_child_agent")) self.host1 = self.get_local("host1") self.host2 = self.get_local("host2")
def prolog(self): hostdef = host.HostDef(categories=dict(address=Address.fixed), ports_ranges=dict(dns=(8000, 8000))) self.set_local('hostdef', hostdef) setup = text_helper.format_block(""" a = spawn_agency() a.disable_protocol('setup-monitoring', 'Task') medium = a.start_agent(descriptor_factory('host_agent'),\ hostdef=hostdef) host = medium.get_agent() wait_for_idle() """) dns = text_helper.format_block(""" host.start_agent(descriptor_factory('dns_agent')) """) api = text_helper.format_block(""" host.start_agent(descriptor_factory('api_agent')) wait_for_idle() """) yield self.process(setup) yield self.process(dns) yield self.process(api) self.dns = first(self.driver.iter_agents('dns_agent')).get_agent() self.api = first(self.driver.iter_agents('api_agent')).get_agent()
def prolog(self): setup = format_block(""" agency = spawn_agency() agency = spawn_agency() """) yield self.process(setup) yield self.wait_for_idle(20) self.raage_medium = first(self.driver.iter_agents('raage_agent')) self.shard_medium = first(self.driver.iter_agents('shard_agent'))
def prolog(self): setup1 = text_helper.format_block( """ agency = spawn_agency() host1 = agency.get_host_agent() wait_for_idle() agency = spawn_agency() host1.start_agent(descriptor_factory('test_exportable_agent')) wait_for_idle() agency = spawn_agency() agency = spawn_agency() host2 = agency.get_host_agent() host2.start_agent(descriptor_factory('export_agent')) wait_for_idle() """ ) yield self.process(self.drivers[0], setup1) setup2 = text_helper.format_block( """ agency = spawn_agency(hostdef=hostdef1) host = agency.get_host_agent() host.start_agent(descriptor_factory('alert_agent')) host.start_agent(descriptor_factory('migration_agent')) wait_for_idle() """ ) hd1 = host.HostDef() hd1.resources["epu"] = self.epu_in_new_cluster self.drivers[1].set_local("hostdef1", hd1) hd2 = host.HostDef() hd2.resources["epu"] = 500 self.drivers[1].set_local("hostdef2", hd2) yield self.process(self.drivers[1], setup2) self.export = first(self.drivers[0].iter_agents("export_agent")).get_agent() self.migration = first(self.drivers[1].iter_agents("migration_agent")).get_agent() self.host1 = self.drivers[0].get_local("host1") self.host2 = self.drivers[0].get_local("host2") self.alert = first(self.drivers[1].iter_agents("alert_agent")).get_agent() recp = yield self.export.get_own_address() self.assertIsInstance(recp, recipient.Recipient) url = self.export.get_tunneling_url() yield self.migration.add_tunneling_route(recp, url) yield self.migration.handshake(recp) yield self.migration.set_current("testing_site")
def values(connection, query, field, unique=True): if field not in query.fields: raise ValueError("%r doesn't have %s field defined" % (type(query), field)) query.include_value.append(field) query.reset() # ensures the field condition gets included temp, responses = yield _get_query_response(connection, query) index = first(v for k, v in responses.iteritems() if k.field == field) if not index.includes_values: raise ValueError( "The query controller of %s field " "of %s query is not marked to " "keep the value in the cache. You have to enable " "it to make query.value() work." % (field, query.name) ) if unique: resp = set() for x in temp: resp.add(index.get_value(x)) defer.returnValue(list(resp)) else: resp = list() for x in temp: resp.append(index.get_value(x)) defer.returnValue(resp)
def prolog(self): setup = format_block(""" test_desc = descriptor_factory('contract-running-agent') agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') agency.start_agent(descriptor_factory('host_agent', shard='s1'), \ run_startup=False) agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') agency.start_agent(descriptor_factory('host_agent', shard='s1'), \ run_startup=False) agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') agency.start_agent(descriptor_factory('host_agent', shard='s1'), \ run_startup=False) agent = _.get_agent() agent.wait_for_ready() agent.start_agent(test_desc) """) yield self.process(setup) medium = first(self.driver.iter_agents('contract-running-agent')) self.agent = medium.get_agent()
def _topology_fixes(self, state, migration): """ This is part of algorithms logic which is not static. It combines information from the shard view and migration to figure out if migration involves terminating the shard(s). In this case it removes the steps for migrating structural agents (strategy "locally") from the migration plan. """ kill_list = migration.get_kill_list() for shard, hosts in kill_list.iteritems(): shard_view = first(x for x in state.shards if x.shard == shard) if shard_view is None: self.warning( "Shard %r has not been found in shard view. " "This is really strange! Shard structure taken " "for analyzing: \n%s", shard, state.shards, ) continue shard_is_terminating = set(hosts) == set(shard_view.hosts) if shard_is_terminating: self.log("Detected that shard %r will be terminating, removing" " local steps of the migration.", shard) migration = migration.remove_local_migrations(shard) return migration
def testMigrateOutShard(self): exports = self.migration._get_exports() self.assertEqual(1, len(exports.entries)) self.assertIn("testing_site", exports.entries) yield self.migration.set_current("testing_site") shards = yield self.migration.get_structure() # we will migrate first shard (with only migratable agents) shard = recipient.IRecipient(self.host1).route to_migrate = first(x for x in shards if x.shard == shard) self.assertIsNot(None, to_migrate) migration = yield self.migration.prepare_shard_migration(to_migrate) self.assertTrue(migration.completable) self.assertFalse(migration.completed) show = yield self.migration.show_migration(migration.ident) self.assertIsInstance(show, str) # apply first step manually just to check it works yield self.migration.apply_migration_step(migration, 0) yield self.migration.apply_migration(migration) yield self.wait_for_idle(10) self.assertEqual(1, self.count_agents("test_exportable_agent")) self.assertEqual(2, self.count_agents("test_child_agent")) self.assertEqual(2, self.count_agents("host_agent")) self.assertEqual(1, self.count_agents("shard_agent")) self.assertEqual(1, self.count_agents("raage_agent")) self.assertEqual(1, self.count_agents("monitor_agent"))
def iterate(self): if not self.query: self.info("Design document id: %s has no views to rebuild", self.design_doc.doc_id) self.stop() return self.count += 1 if self.count == 1: self.trigger_rebuild() else: from feat.common import first active_tasks = yield self.db.couchdb_call( self.db.couchdb.get, '/_active_tasks') relevant = first( x for x in active_tasks if (x.get('type') == 'indexer' and x.get('database') == self.db.db_name and x.get('design_document') == self.design_doc.doc_id)) if relevant: self.info('The progress of updating %s is %s%%', self.design_doc.doc_id, relevant.get('progress')) # if the updater is in progress don't wait for the view # result if self.query_defer: self.debug('Not waiting for the result of the query.') self.query_defer.cancel() else: self.trigger_rebuild()
def testKillHost(self): self.assertEqual(1, self.count_agents('host_agent')) self.assertEqual(1, self.count_agents('shard_agent')) self.assertEqual(1, self.count_agents('raage_agent')) self.assertEqual(1, self.count_agents('monitor_agent')) yield self.wait_for_idle(10) medium = self.get_local('medium') desc = medium.get_descriptor() @defer.inlineCallbacks def has_monitor(): p = yield medium.agent.query_partners('monitors') defer.returnValue(len(p) > 0) yield self.wait_for(has_monitor, 10) yield medium.terminate_hard() self.assertEqual(0, self.count_agents('host_agent')) agency = self.get_local('agency') self.assertEqual(1, desc.instance_id) yield agency.start_agent(desc) yield self.wait_for_idle(10) new_desc = yield self.driver._database_connection.get_document( desc.doc_id) self.assertEqual(2, new_desc.instance_id) self.assertEqual(1, self.count_agents('shard_agent')) self.assertEqual(1, self.count_agents('raage_agent')) self.assertEqual(1, self.count_agents('monitor_agent')) monitor = first(self.driver.iter_agents('monitor_agent')).get_agent() hosts = yield monitor.query_partners('hosts') self.assertEqual(2, hosts[0].instance_id)
def do_cleanup(connection, host_agent_id): ''' Performs cleanup after the host agent who left his descriptor in database. Deletes the descriptor and the descriptors of the partners he was hosting. ''' desc = yield safe_get(connection, host_agent_id) if isinstance(desc, host.Descriptor): for partner in desc.partners: partner_desc = yield safe_get(connection, partner.recipient.key) if partner_desc: host_part = first(x for x in partner_desc.partners if x.role == 'host') if host_part is None: log.warning('host_restart', 'Agent id: %s type: %s did not have any ' 'host partner. So we are leaving it be.', partner_desc.doc_id, partner_desc.type_name) elif host_part.recipient.key == host_agent_id: log.info('host_restart', "Deleting document with ID: %s", partner_desc.doc_id) yield connection.delete_document(partner_desc) else: log.warning('host_restart', "Not deleting descriptor of the agent id: %s, " "agent_type: %s, as it seems to be hosted by " "the host agent: %s. Although keep in mind " "that he will not receive the goodbye " "notification from us!", partner_desc.doc_id, partner_desc.type_name, host_part.recipient.key) log.info('host_restart', "Deleting document with ID: %s", desc.doc_id) yield connection.delete_document(desc)
def _get_own_address(self): res = first(x.recipient for x in self._bindings if x.recipient.type == RecipientType.agent) if res is None: raise ValueError( "We have been asked to give the our address " "but so far no personal binding have been " "created." ) return res
def _get_contractor(self, *_): self.medium = first(x for x in self.agent._protocols.itervalues() if isinstance(x, AgencyContractor)) if self.medium is None: self.fail('Contractor not found') self.contractor = self.medium.get_agent_side() self.remote_id = self.medium.guid return self.contractor
def closed_handler(s, state): s.log('Contracts closed, terminating.') bidded = state.medium.contractors.with_state(ContractorState.bid) to_elect = first(x.bid for x in bidded if x.bid.payload['cost'] == 3) state.medium.elect(to_elect) state.medium.terminate()
def _select_reference_handler(self, model, reference): if reference is None: self._highlight_selected() return src = self._get_node(str(reference.owner_url)) dst = self._get_node(str(reference.url)) edge = first(x for x in self._xdot.graph.edges if x.src == src and x.dst == dst) if edge: self._xdot.set_highlight([src, edge])
def include_values(docs, responses, query): # dict field_name -> ParsedIndex lookup = dict( (field, first(v for k, v in responses.iteritems() if k.field == field)) for field in query.include_value ) for doc in docs: for name, cache_entry in lookup.iteritems(): setattr(doc, name, cache_entry.get_value(doc.doc_id)) return docs
def find_agent(self, desc): '''find_agent(agent_id_or_descriptor) -> Gives medium class of the agent if the agency hosts it.''' agent_id = (desc.doc_id if isinstance(desc, descriptor.Descriptor) else desc) self.log("I'm trying to find the agent with id: %s", agent_id) result = first(x for x in self._agents if x._descriptor.doc_id == agent_id) return defer.succeed(result)
def prolog(self): setup = format_block(""" agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') agency.start_agent(descriptor_factory('host_agent')) wait_for_idle() agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') agency.start_agent(descriptor_factory('host_agent')) wait_for_idle() """) yield self.process(setup) yield self.wait_for_idle(20) self.raage_medium = first(self.driver.iter_agents('raage_agent')) self.shard_medium = first(self.driver.iter_agents('shard_agent'))
def __init__(self, friend, **opts): if self.stages is None: raise NotImplementedError("stages attribute needs to be set") log.Logger.__init__(self, friend) initial_state = first(iter(self.stages)) StateMachineMixin.__init__(self, initial_state) self.friend = friend self.opts = opts self._observer = observer.Observer(self._initiate)
def testStartStandaloneAgent(self): yield self.agency.initiate() yield self.wait_for_host_agent(20) host_a = self.agency.get_host_agent() yield host_a.wait_for_ready() self.info("Host agent is ready, starting standalone agent.") yield self.agency.spawn_agent("standalone") yield self.wait_for_standalone() part = host_a.query_partners_with_role('all', 'standalone') agent_ids = [host_a.get_own_address().key, part[0].recipient.key] # check that journaling works as it should yield self.assert_journal_contains(agent_ids) # now test the find_agent logic host = yield self.agency.find_agent(agent_ids[0]) self.assertIsInstance(host, base_agency.AgencyAgent) stand = yield self.agency.find_agent(agent_ids[1]) self.assertIsInstance(stand, broker.AgentReference) slave = first(x for x in self.agency._broker.slaves.itervalues() if x.is_standalone) self.assertIsInstance(slave, broker.SlaveReference) host = yield slave.callRemote('find_agent', agent_ids[0]) self.assertIsInstance(host, base_agency.AgencyAgent) stand = yield slave.callRemote('find_agent', agent_ids[1]) self.assertIsInstance(stand, broker.AgentReference) not_found = yield slave.callRemote('find_agent', 'unknown id') self.assertIs(None, not_found) # asserts on slaves registry self.assertEqual(2, len(self.agency._broker.slaves)) self.assertEqual(1, len(slave.agents)) self.assertEqual(agent_ids[1], slave.agents.keys()[0]) # asserts on logs and journal entries in journal database jour = self.agency._journaler._writer yield self.wait_for(jour.is_idle, 10) categories = yield jour.get_log_categories() self.assertTrue(set(['host_agent', 'standalone', 'agency']).issubset( set(categories))) log_names = yield jour.get_log_names('host_agent') self.assertEqual([agent_ids[0]], log_names) log_names = yield jour.get_log_names('standalone') self.assertEqual([agent_ids[1]], log_names) yield self.assert_has_logs('host_agent', agent_ids[0]) yield self.assert_has_logs('standalone', agent_ids[1]) self.info("Just before validating models.") yield self.validate_model_tree(self.agency)
def testMigrateAgentWhileNotHavingResource(self): yield self.migration.set_current('testing_site') shards = yield self.migration.get_structure() # we will migrate first shard (with only migratable agents) shard = recipient.IRecipient(self.host1).route to_migrate = first(x for x in shards if x.shard == shard) self.assertIsNot(None, to_migrate) migration = yield self.migration.prepare_shard_migration(to_migrate) # here we don't have enough epu in new cluster so first attempt to # spawn agent should fail. Cluster 1 should stabilize with exported # agent terminated and rest not touched. We should also get the alert # about the failure d = self.migration.apply_migration(migration) def condition(): return len(self.alert.get_alerts()) > 0 # now we will retry 3 times to find allocation before giving up yield self.wait_for(condition, 200) self.assertEqual(0, self.drivers[0].count_agents('test_exportable_agent')) self.assertEqual(2, self.drivers[0].count_agents('test_child_agent')) spawn_host = text_helper.format_block(""" agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') agency.start_agent(descriptor_factory('host_agent'), hostdef=hostdef2) """) yield self.process(self.drivers[1], spawn_host) self.migration.spawn_next_agent() yield d yield self.wait_for_idle(10) self.assertEqual(1, self.drivers[1].count_agents('test_exportable_agent')) self.assertEqual(2, self.drivers[1].count_agents('test_child_agent')) self.assertEqual(2, self.drivers[1].count_agents('host_agent')) self.assertEqual(1, self.drivers[1].count_agents('shard_agent')) self.assertEqual(1, self.drivers[1].count_agents('raage_agent')) self.assertEqual(1, self.drivers[1].count_agents('monitor_agent')) self.assertEqual(0, self.drivers[0].count_agents('test_exportable_agent')) self.assertEqual(0, self.drivers[0].count_agents('test_child_agent')) self.assertEqual(2, self.drivers[0].count_agents('host_agent')) self.assertEqual(1, self.drivers[0].count_agents('shard_agent')) self.assertEqual(1, self.drivers[0].count_agents('raage_agent')) self.assertEqual(1, self.drivers[0].count_agents('monitor_agent'))
def prolog(self): setup = format_block(""" agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') descriptor_factory('test-agent') agency.start_agent(_, host='127.0.0.1', port=%(port)s, \ exchange=%(exchange)s, \ exchange_type=%(type)s) """) % dict(port=1234, exchange="'exchange'", type="'direct'") yield self.process(setup) self.agent = first(self.driver.iter_agents('test-agent')).get_agent()
def prolog(self): setup = text_helper.format_block(""" agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') agency.start_agent(descriptor_factory('host_agent')) host1 = _.get_agent() wait_for_idle() agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') agency.start_agent(descriptor_factory('host_agent')) wait_for_idle() host1.start_agent(descriptor_factory('test_exportable_agent')) wait_for_idle() agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') agency.start_agent(descriptor_factory('host_agent')) wait_for_idle() agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') agency.start_agent(descriptor_factory('host_agent')) host2 = _.get_agent() host2.start_agent(descriptor_factory('export_agent')) wait_for_idle() host2.start_agent(descriptor_factory('migration_agent')) wait_for_idle() """) yield self.process(setup) self.export = first( self.driver.iter_agents('export_agent')).get_agent() self.migration = first( self.driver.iter_agents('migration_agent')).get_agent() self.assertEqual(1, self.count_agents('test_exportable_agent')) self.assertEqual(2, self.count_agents('test_child_agent')) self.host1 = self.get_local('host1') self.host2 = self.get_local('host2')
def prolog(self): setup = format_block(""" spawn_agency(start_host=False, \ 'feat.agents.base.amqp.interface.IAMQPClientFactory') agency = _ descriptor_factory('test-agent') agency.start_agent(_, host='127.0.0.1', port=%(port)s, \ exchange=%(exchange)s, \ exchange_type=%(type)s) """) % dict(port=self.rabbit.get_config()['port'], exchange="'exchange'", type="'direct'") yield self.process(setup) self.agent = first(self.driver.iter_agents('test-agent'))
def testStoringEntries(self): l = self._generate_log e = self._generate_entry yield self.journalers[1].insert_entries([ l(message="some cool msg"), e(agent_id='standalone_agent')]) histories = yield self.sql_writer.get_histories() self.assertEqual(1, len(histories)) self.assertEqual('standalone_agent', histories[0].agent_id) logs = yield self.sql_writer.get_log_entries() self.assertTrue(first(x for x in logs if x['message'] == 'some cool msg'))
def testArmagedon(self): ''' In this test we kill the host with Raage and Shard. Then we assert that they were recreated. ''' d1 = self.raage_medium._terminate() d2 = self.shard_medium._terminate() d3 = first(self.driver.iter_agents('host_agent'))._terminate() yield defer.DeferredList([d1, d2, d3]) yield self.wait_for_idle(30) self.assertEqual(1, self.count_agents('raage_agent')) self.assertEqual(1, self.count_agents('shard_agent')) self.assertEqual(1, self.count_agents('host_agent'))
def prolog(self): setup = format_block(""" test_desc = descriptor_factory('contract-running-agent') agency = spawn_agency() agency = spawn_agency() agency = spawn_agency() agent = agency.get_host_agent() agent.wait_for_ready() agent.start_agent(test_desc) """) yield self.process(setup) medium = first(self.driver.iter_agents('contract-running-agent')) self.agent = medium.get_agent()
def prolog(self): setup = format_block(""" # Host 1 will run Raage, Host, Shard and Requesting agents load('feat.test.integration.resource') agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') host_desc = descriptor_factory('host_agent') req_desc = descriptor_factory('requesting_agent') agency.start_agent(host_desc, hostdef=hostdef1) host = _.get_agent() wait_for_idle() host.start_agent(req_desc) # Host 2 run only host agent agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') agency.start_agent(descriptor_factory('host_agent'), hostdef=hostdef1) wait_for_idle() # Host 3 will run Shard, Host and Raage agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') agency.start_agent(descriptor_factory('host_agent'), hostdef=hostdef2) wait_for_idle() # Host 4 will run only host agent agency = spawn_agency() agency.disable_protocol('setup-monitoring', 'Task') agency.start_agent(descriptor_factory('host_agent'), hostdef=hostdef2) """) # host definition in first shard (no space to allocate) hostdef1 = host.HostDef(resources=dict(host=0, epu=10, local=1)) self.set_local("hostdef1", hostdef1) # host definition in second shard (no space to allocate) hostdef2 = host.HostDef(resources=dict(host=1, epu=10)) self.set_local("hostdef2", hostdef2) yield self.process(setup) yield self.wait_for_idle(20) raage_mediums = self.driver.iter_agents('raage_agent') self.raage_agents = [x.get_agent() for x in raage_mediums] host_mediums = self.driver.iter_agents('host_agent') self.host_agents = [x.get_agent() for x in host_mediums] self.req_agent = first( self.driver.iter_agents('requesting_agent')).get_agent()
def get_item(self, job_id): return first(x for x in self.items if x[0] == job_id)