def setup_resources(): ModelMeta.remove_all() node2 = vr.create( 'node2', 'resources/ro_node/', { 'ip': '10.0.0.4', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key', 'ssh_user': '******' })[0] solar_bootstrap2 = vr.create('solar_bootstrap2', 'resources/solar_bootstrap', {'master_ip': '10.0.0.2'})[0] signals.connect(node2, solar_bootstrap2) has_errors = False for r in locals().values(): if not isinstance(r, resource.Resource): continue print 'Validating {}'.format(r.name) errors = validation.validate_resource(r) if errors: has_errors = True print 'ERROR: %s: %s' % (r.name, errors) if has_errors: sys.exit(1)
def test_update_action_after_commit(): res = resource.load(create_resource('1').name) res.set_operational() res.update({'a': 10}) ModelMeta.save_all_lazy() staged_log = change.staged_log() assert staged_log[0].action == 'update'
def setup_resources(): ModelMeta.remove_all() node2 = vr.create('node2', 'resources/ro_node/', { 'ip': '10.0.0.4', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key', 'ssh_user': '******' })[0] solar_bootstrap2 = vr.create('solar_bootstrap2', 'resources/solar_bootstrap', {'master_ip': '10.0.0.2'})[0] signals.connect(node2, solar_bootstrap2) has_errors = False for r in locals().values(): if not isinstance(r, resource.Resource): continue print 'Validating {}'.format(r.name) errors = validation.validate_resource(r) if errors: has_errors = True print 'ERROR: %s: %s' % (r.name, errors) if has_errors: sys.exit(1)
def _end_start_session(cls, uid, identity): """Because of isolated versions of data in concurrent sessions we need to ensure that session will be re-started at certain hooks during locking logic """ ModelMeta.session_end() ModelMeta.session_start()
def test_discard_all_pending_changes_resources_created(): res1 = DBResource.from_dict('test1', {'name': 'test1', 'base_path': 'x', 'state': RESOURCE_STATE.created.name, 'meta_inputs': {'a': {'value': None, 'schema': 'str'}}}) res1.inputs['a'] = '9' res1.save_lazy() res2 = DBResource.from_dict('test2', {'name': 'test2', 'base_path': 'x', 'state': RESOURCE_STATE.created.name, 'meta_inputs': {'a': {'value': None, 'schema': 'str'}}}) res2.inputs['a'] = '0' res2.save_lazy() ModelMeta.save_all_lazy() staged_log = change.stage_changes() assert len(staged_log) == 2 change.discard_all() staged_log = change.stage_changes() assert len(staged_log) == 0 assert resource.load_all() == []
def test_revert_create(): res = DBResource.from_dict('test1', {'name': 'test1', 'base_path': 'x', 'state': RESOURCE_STATE.created.name, 'meta_inputs': {'a': {'value': None, 'schema': 'str'}}}) res.inputs['a'] = '9' res.save_lazy() ModelMeta.save_all_lazy() staged_log = change.stage_changes() assert len(staged_log) == 1 logitem = staged_log[0] operations.move_to_commited(logitem.log_action) assert logitem.diff == [['add', '', [['a', '9']]]] commited = CommitedResource.get('test1') assert commited.inputs == {'a': '9'} change.revert(logitem.uid) staged_log = change.stage_changes() assert len(staged_log) == 1 for item in staged_log: operations.move_to_commited(item.log_action) assert resource.load_all() == []
def create_plan_from_graph(dg): dg.graph['uid'] = "{0}:{1}".format(dg.graph['name'], str(uuid.uuid4())) # FIXME change save_graph api to return new graph with Task objects # included save_graph(dg) ModelMeta.save_all_lazy() return get_graph(dg.graph['uid'])
def wait_finish(uid, timeout): """Check if graph is finished Will return when no PENDING or INPROGRESS otherwise yields summary """ start_time = time.time() while start_time + timeout >= time.time(): dg = get_graph(uid) summary = Counter() summary.update({s.name: 0 for s in states}) summary.update([task.status for task in dg.nodes()]) yield summary if summary[states.PENDING.name] + summary[states.INPROGRESS.name] == 0: return else: # on db backends with snapshot isolation level and higher # updates wont be visible after start of transaction, # in order to report state correctly we will "refresh" transaction ModelMeta.session_end() ModelMeta.session_start() else: raise errors.ExecutionTimeout( 'Run %s wasnt able to finish' % uid)
def test_concurrent_sequences_with_no_handler(scale, clients): total_resources = scale * 3 timeout = scale * 2 scheduler_client = clients['scheduler'] assert len(change.staged_log()) == total_resources ModelMeta.save_all_lazy() plan = change.send_to_orchestration() ModelMeta.save_all_lazy() scheduler_client.next({}, plan.graph['uid']) def wait_function(timeout): try: for summary in wait_finish(plan.graph['uid'], timeout): assert summary[states.ERROR.name] == 0 time.sleep(0.5) except ExecutionTimeout: pass return summary waiter = gevent.spawn(wait_function, timeout) waiter.join(timeout=timeout) res = waiter.get(block=True) assert res[states.SUCCESS.name] == total_resources assert len(data.CL()) == total_resources clear_cache() assert len(change.staged_log()) == 0
def tagged_resources(): base_tags = ['n1=x', 'n2'] tags = base_tags + ['node=t1'] t1 = Resource.from_dict('t1', { 'name': 't1', 'tags': tags, 'base_path': 'x' }) t1.save_lazy() tags = base_tags + ['node=t2'] t2 = Resource.from_dict('t2', { 'name': 't2', 'tags': tags, 'base_path': 'x' }) t2.save_lazy() tags = base_tags + ['node=t3'] t3 = Resource.from_dict('t3', { 'name': 't3', 'tags': tags, 'base_path': 'x' }) t3.save_lazy() tags = ['node=t3'] t4 = Resource.from_dict('t4', { 'name': 't4', 'tags': tags, 'base_path': 'x' }) t4.save_lazy() ModelMeta.save_all_lazy() return [t1, t2, t3]
def test_discard_connection(): res1 = DBResource.from_dict('test1', {'name': 'test1', 'base_path': 'x', 'state': RESOURCE_STATE.created.name, 'meta_inputs': {'a': {'value': None, 'schema': 'str'}}}) res1.inputs['a'] = '9' res1.save_lazy() res2 = DBResource.from_dict('test2', {'name': 'test2', 'base_path': 'x', 'state': RESOURCE_STATE.created.name, 'meta_inputs': {'a': {'value': None, 'schema': 'str'}}}) res2.inputs['a'] = '0' res2.save_lazy() ModelMeta.save_all_lazy() staged_log = change.stage_changes() for item in staged_log: operations.move_to_commited(item.log_action) res1 = resource.load('test1') res2 = resource.load('test2') res1.connect(res2, {'a': 'a'}) staged_log = change.stage_changes() assert len(staged_log) == 1 assert res2.args == {'a': '9'} change.discard_all() assert res2.args == {'a': '0'} assert len(change.stage_changes()) == 0
def test_revert_create(): res = DBResource.from_dict( "test1", { "name": "test1", "base_path": "x", "state": RESOURCE_STATE.created.name, "meta_inputs": {"a": {"value": None, "schema": "str"}}, }, ) res.inputs["a"] = "9" res.save_lazy() ModelMeta.save_all_lazy() staged_log = change.stage_changes() assert len(staged_log) == 1 logitem = staged_log[0] operations.move_to_commited(logitem.log_action) assert logitem.diff == [["add", "", [["a", "9"]]]] commited = CommitedResource.get("test1") assert commited.inputs == {"a": "9"} change.revert(logitem.uid) staged_log = change.stage_changes() assert len(staged_log) == 1 for item in staged_log: operations.move_to_commited(item.log_action) assert resource.load_all() == []
def test_concurrent_sequences_with_no_handler(scale, clients): total_resources = scale * 3 timeout = scale * 2 scheduler_client = clients['scheduler'] assert len(change.staged_log()) == total_resources ModelMeta.session_end() plan = change.send_to_orchestration() scheduler_client.next({}, plan.graph['uid']) def wait_function(timeout): try: for summary in wait_finish(plan.graph['uid'], timeout): assert summary[states.ERROR.name] == 0 time.sleep(0.5) except ExecutionTimeout: pass return summary waiter = gevent.spawn(wait_function, timeout) waiter.join(timeout=timeout) res = waiter.get(block=True) assert res[states.SUCCESS.name] == total_resources assert len(data.CL()) == total_resources clear_cache() assert len(change.staged_log()) == 0
def test_discard_all_pending_changes_resources_created(): res1 = DBResource.from_dict( "test1", { "name": "test1", "base_path": "x", "state": RESOURCE_STATE.created.name, "meta_inputs": {"a": {"value": None, "schema": "str"}}, }, ) res1.inputs["a"] = "9" res1.save_lazy() res2 = DBResource.from_dict( "test2", { "name": "test2", "base_path": "x", "state": RESOURCE_STATE.created.name, "meta_inputs": {"a": {"value": None, "schema": "str"}}, }, ) res2.inputs["a"] = "0" res2.save_lazy() ModelMeta.save_all_lazy() staged_log = change.stage_changes() assert len(staged_log) == 2 change.discard_all() staged_log = change.stage_changes() assert len(staged_log) == 0 assert resource.load_all() == []
def test_discard_removed(): res1 = DBResource.from_dict( "test1", { "name": "test1", "base_path": "x", "state": RESOURCE_STATE.created.name, "meta_inputs": {"a": {"value": None, "schema": "str"}}, }, ) res1.inputs["a"] = "9" res1.save_lazy() ModelMeta.save_all_lazy() staged_log = change.stage_changes() for item in staged_log: operations.move_to_commited(item.log_action) res1 = resource.load("test1") res1.remove() assert len(change.stage_changes()) == 1 assert res1.to_be_removed() change.discard_all() assert len(change.stage_changes()) == 0 assert not resource.load("test1").to_be_removed()
def run(): ModelMeta.remove_all() resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 2}) node1, node2 = [x for x in resources if x.name.startswith('node')] hosts1, hosts2 = [x for x in resources if x.name.startswith('hosts_file')] node1.connect(hosts1, { 'name': 'hosts:name', 'ip': 'hosts:ip', }) node2.connect(hosts1, { 'name': 'hosts:name', 'ip': 'hosts:ip', }) node1.connect(hosts2, { 'name': 'hosts:name', 'ip': 'hosts:ip', }) node2.connect(hosts2, { 'name': 'hosts:name', 'ip': 'hosts:ip', })
def set_states(uid, tasks): plan = get_graph(uid) for t in tasks: if t not in plan.node: raise Exception("No task %s in plan %s", t, uid) plan.node[t]['task'].status = states.NOOP.name plan.node[t]['task'].save_lazy() ModelMeta.save_all_lazy()
def run(): ModelMeta.remove_all() node = vr.create('node', 'resources/ro_node', { 'name': 'first' + str(time.time()), 'ip': '10.0.0.3', 'node_id': 'node1', })[0] transports = vr.create('transports_node1', 'resources/transports')[0] transports_for_solar_agent = vr.create('transports_for_solar_agent', 'resources/transports')[0] ssh_transport = vr.create( 'ssh_transport', 'resources/transport_ssh', { 'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key', 'ssh_user': '******' })[0] solar_agent_transport = vr.create('solar_agent_transport', 'resources/transport_solar_agent', { 'solar_agent_user': '******', 'solar_agent_password': '******' })[0] transports_for_solar_agent.connect(solar_agent_transport, {}) ssh_transport.connect( transports_for_solar_agent, { 'ssh_key': 'transports:key', 'ssh_user': '******', 'ssh_port': 'transports:port', 'name': 'transports:name' }) # set transports_id transports.connect(node, {}) # it uses reverse mappings ssh_transport.connect( transports, { 'ssh_key': 'transports:key', 'ssh_user': '******', 'ssh_port': 'transports:port', 'name': 'transports:name' }) solar_agent_transport.connect( transports, { 'solar_agent_user': '******', 'solar_agent_port': 'transports:port', 'solar_agent_password': '******', 'name': 'transports:name' }) hosts = vr.create('hosts_file', 'resources/hosts_file', {})[0] node.connect(hosts, {'ip': 'hosts:ip', 'name': 'hosts:name'})
def test_childs_added_on_stage(): res_0, res_1 = [create_resource(str(n)) for n in range(2)] res_0.connect(res_1, {'a': 'a'}) ModelMeta.save_all_lazy() created_log_items = stage_resources(res_0.name, 'run') assert len(created_log_items) == 1 assert created_log_items[0].resource == res_0.name staged_log = change.staged_log() assert len(staged_log) == 2 child_log_item = next(li for li in staged_log if li.resource == res_1.name) assert child_log_item.action == 'run'
def undeploy(): ModelMeta.remove_all() resources = resource.load_all() resources = {r.name: r for r in resources} actions.resource_action(resources['openstack_rabbitmq_user'], 'remove') actions.resource_action(resources['openstack_vhost'], 'remove') actions.resource_action(resources['rabbitmq_service1'], 'remove') ModelMeta.remove_all()
def undeploy(): resources = resource.load_all() resources = {r.name: r for r in resources} for name in reversed(resources_to_run): try: actions.resource_action(resources[name], 'remove') except errors.SolarError as e: print 'WARNING: %s' % str(e) ModelMeta.remove_all()
def test_discard_update(): res1 = create_resource('test1') res1.db_obj.inputs['a'] = '9' operations.commit_log_item(change.create_run(res1)) res1.update({'a': '11'}) ModelMeta.save_all_lazy() assert len(change.staged_log()) == 1 assert res1.args == {'a': '11'} change.discard_single(change.staged_log()[0]) assert res1.args == {'a': '9'}
def tagged_resources(): tags = ['n1', 'n2', 'n3'] t1 = Resource.from_dict('t1', {'name': 't1', 'tags': tags, 'base_path': 'x'}) t1.save_lazy() t2 = Resource.from_dict('t2', {'name': 't2', 'tags': tags, 'base_path': 'x'}) t2.save_lazy() t3 = Resource.from_dict('t3', {'name': 't3', 'tags': tags, 'base_path': 'x'}) t3.save_lazy() ModelMeta.save_all_lazy() return [t1, t2, t3]
def test_discard_update(): res1 = create_resource('test1') res1.inputs['a'] = '9' res1.save_lazy() operations.commit_log_item(change.create_run(res1.name)) res1 = resource.load('test1') res1.update({'a': '11'}) ModelMeta.save_all_lazy() assert len(change.staged_log()) == 1 assert res1.args == {'a': '11'} change.discard_all() assert res1.args == {'a': '9'}
def create_all(): import sys if sys.executable.startswith(('python', )): # auto add session to only standalone python runs return from solar.dblayer.model import ModelMeta import atexit ModelMeta.session_start() atexit.register(ModelMeta.session_end)
def create_all(): import sys if sys.executable.split('/')[-1] not in ['python', 'python2']: # auto add session to only standalone python runs return from solar.dblayer.model import ModelMeta import atexit ModelMeta.session_start() atexit.register(ModelMeta.session_end)
def test_discard_removed(): res1 = create_resource('test1') res1.inputs['a'] = '9' res1.save_lazy() res1 = resource.load('test1') res1.remove() ModelMeta.save_all_lazy() assert len(change.staged_log()) == 1 assert res1.to_be_removed() change.discard_all() assert len(change.staged_log()) == 0 assert not resource.load('test1').to_be_removed()
def test_discard_removed(): res1 = create_resource('test1') res1.db_obj.inputs['a'] = '9' res1.db_obj.save_lazy() res1 = resource.load('test1') res1.remove() ModelMeta.save_all_lazy() assert len(change.staged_log()) == 1 assert res1.to_be_removed() change.discard_all() assert len(change.staged_log()) == 0 assert not resource.load('test1').to_be_removed()
def test_only_relevant_child_updated(): res1, res2, res3 = ( create_resource( name, inputs={'a': '', 'b': ''}) for name in ('t1', 't2', 't3')) res1.update({'a': '9', 'b': '10'}) res1.connect(res2, {'a': 'a'}) res1.connect(res3, {'b': 'b'}) ModelMeta.save_all_lazy() # currently childs added as a side effect of staged log, thus we need # to run it before commiting changes assert set(s.resource for s in change.staged_log()) == {'t1', 't2', 't3'} change.commit_all() res1.update({'a': '12'}) ModelMeta.save_all_lazy() # t3 not updated because "a" connected only to t2 assert set(s.resource for s in change.staged_log()) == {'t1', 't2'}
def test_revert_update_connected(): res1 = create_resource('test1') res1.inputs['a'] = '9' res1.save_lazy() res2 = create_resource('test2') res2.inputs['a'] = '' res2.save_lazy() res3 = create_resource('test3') res3.inputs['a'] = '' res3.save_lazy() res1 = resource.load('test1') res2 = resource.load('test2') res3 = resource.load('test3') res1.connect(res2) res2.connect(res3) ModelMeta.save_all_lazy() staged_log = map(lambda res: change.create_run(res.name), (res1, res2, res3)) assert len(staged_log) == 3 for item in staged_log: assert item.action == 'run' operations.commit_log_item(item) res1.disconnect(res2) staged_log = map(lambda res: change.create_run(res.name), (res2, res3)) to_revert = [] for item in staged_log: assert item.action == 'run' to_revert.append(item.uid) operations.commit_log_item(item) change.revert_uids(sorted(to_revert, reverse=True)) ModelMeta.save_all_lazy() staged_log = map(lambda res: change.create_run(res.name), (res2, res3)) for item in staged_log: assert item.diff == [['change', 'a', ['', '9']]]
def setup_riak(): ModelMeta.remove_all() nodes = template.nodes_from('templates/riak_nodes.yaml') riak_services = nodes.on_each( 'resources/riak_node', args={ 'riak_self_name': 'riak{num}', 'riak_hostname': 'riak_server{num}.solar', 'riak_name': 'riak{num}@riak_server{num}.solar', } ) slave_riak_services = riak_services.tail() riak_services.take(0).connect_list( slave_riak_services, mapping={ 'riak_name': 'join_to', } ) hosts_files = nodes.on_each('resources/hosts_file') riak_services.connect_list_to_each( hosts_files, mapping={ 'ip': 'hosts:ip', 'riak_hostname': 'hosts:name', }, events=False ) errors = resource.validate_resources() for r, error in errors: click.echo('ERROR: %s: %s' % (r.name, error)) if errors: click.echo("ERRORS") sys.exit(1) hosts_files.add_deps('run/success', riak_services, 'run') slave_riak_services.add_reacts('run/success', slave_riak_services, 'join') slave_riak_services.add_reacts('leave/success', slave_riak_services, 'join') slave_riak_services.add_react('run/success', riak_services.take(0), 'commit')
def test_revert_removal(): res = DBResource.from_dict('test1', {'name': 'test1', 'base_path': 'x', 'state': RESOURCE_STATE.created.name, 'meta_inputs': {'a': {'value': None, 'schema': 'str'}}}) res.inputs['a'] = '9' res.save_lazy() commited = CommitedResource.from_dict('test1', {'inputs': {'a': '9'}, 'state': 'operational'}) commited.save_lazy() resource_obj = resource.load(res.name) resource_obj.remove() ModelMeta.save_all_lazy() changes = change.stage_changes() assert len(changes) == 1 assert changes[0].diff == [['remove', '', [['a', '9']]]] operations.move_to_commited(changes[0].log_action) clear_cache() assert DBResource._c.obj_cache == {} # assert DBResource.bucket.get('test1').siblings == [] with mock.patch.object(repository.Repository, 'read_meta') as mread: mread.return_value = { 'input': {'a': {'schema': 'str!'}}, 'id': 'mocked' } with mock.patch.object(repository.Repository, 'get_path') as mpath: mpath.return_value = 'x' change.revert(changes[0].uid) ModelMeta.save_all_lazy() # assert len(DBResource.bucket.get('test1').siblings) == 1 resource_obj = resource.load('test1') assert resource_obj.args == { 'a': '9', 'location_id': '', 'transports_id': '' }
def setup_riak(): ModelMeta.remove_all() nodes = template.nodes_from('templates/riak_nodes.yaml') riak_services = nodes.on_each('resources/riak_node', args={ 'riak_self_name': 'riak{num}', 'riak_hostname': 'riak_server{num}.solar', 'riak_name': 'riak{num}@riak_server{num}.solar', }) slave_riak_services = riak_services.tail() riak_services.take(0).connect_list(slave_riak_services, mapping={ 'riak_name': 'join_to', }) hosts_files = nodes.on_each('resources/hosts_file') riak_services.connect_list_to_each(hosts_files, mapping={ 'ip': 'hosts:ip', 'riak_hostname': 'hosts:name', }, events=False) errors = resource.validate_resources() for r, error in errors: click.echo('ERROR: %s: %s' % (r.name, error)) if errors: click.echo("ERRORS") sys.exit(1) hosts_files.add_deps('run/success', riak_services, 'run') slave_riak_services.add_reacts('run/success', slave_riak_services, 'join') slave_riak_services.add_reacts('leave/success', slave_riak_services, 'join') slave_riak_services.add_react('run/success', riak_services.take(0), 'commit')
def run(): ModelMeta.remove_all() node = cr.create('node', 'resources/ro_node', {'name': 'first' + str(time.time()), 'ip': '10.0.0.3', 'node_id': 'node1', })[0] transports = cr.create('transports_node1', 'resources/transports')[0] transports_for_solar_agent = cr.create('transports_for_solar_agent', 'resources/transports')[0] ssh_transport = cr.create('ssh_transport', 'resources/transport_ssh', {'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key', 'ssh_user': '******'})[0] solar_agent_transport = cr.create('solar_agent_transport', 'resources/transport_solar_agent', {'solar_agent_user': '******', 'solar_agent_password': '******'})[0] transports_for_solar_agent.connect(solar_agent_transport, {}) ssh_transport.connect(transports_for_solar_agent,{'ssh_key': 'transports:key', 'ssh_user': '******', 'ssh_port': 'transports:port', 'name': 'transports:name'}) # set transports_id transports.connect(node, {}) # it uses reverse mappings ssh_transport.connect(transports, {'ssh_key': 'transports:key', 'ssh_user': '******', 'ssh_port': 'transports:port', 'name': 'transports:name'}) solar_agent_transport.connect(transports, {'solar_agent_user': '******', 'solar_agent_port': 'transports:port', 'solar_agent_password': '******', 'name': 'transports:name'}) hosts = cr.create('hosts_file', 'resources/hosts_file', {})[0] node.connect(hosts, { 'ip': 'hosts:ip', 'name': 'hosts:name' })
def test_revert_create(): res = create_resource('test1') res.db_obj.inputs['a'] = '9' logitem = change.create_run(res) assert logitem.diff == [['add', '', [['a', '9']]]] uid = logitem.uid operations.commit_log_item(logitem) commited = CommitedResource.get('test1') assert commited.inputs == {'a': '9'} change.revert(uid) ModelMeta.save_all_lazy() staged_log = change.staged_log() assert len(staged_log) == 1 for item in staged_log: operations.commit_log_item(item) assert resource.load_all() == []
def next(self, ctxt, plan_uid): with Lock( plan_uid, str(get_current_ident()), retries=20, waiter=Waiter(1) ): log.debug('Received *next* event for %s', plan_uid) plan = graph.get_graph(plan_uid) # FIXME get_graph should raise DBNotFound if graph is not # created if len(plan) == 0: raise ValueError('Plan {} is empty'.format(plan_uid)) tasks_to_schedule = self._next(plan) for task in tasks_to_schedule: self._do_scheduling(task) log.debug('Scheduled tasks %r', tasks_to_schedule) ModelMeta.save_all_lazy() return tasks_to_schedule
def test_revert_create(): res = create_resource('test1') res.inputs['a'] = '9' res.save_lazy() logitem = change.create_run(res.name) assert logitem.diff == [['add', '', [['a', '9']]]] uid = logitem.uid operations.commit_log_item(logitem) commited = CommitedResource.get('test1') assert commited.inputs == {'a': '9'} change.revert(uid) ModelMeta.save_all_lazy() staged_log = change.staged_log() assert len(staged_log) == 1 for item in staged_log: operations.commit_log_item(item) assert resource.load_all() == []
def deploy(): ModelMeta.remove_all() resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 2}) first_node, second_node = [ x for x in resources if x.name.startswith('node') ] first_transp = next(x for x in resources if x.name.startswith('transport')) library = vr.create('library1', 'resources/fuel_library', {})[0] first_node.connect(library) keys = vr.create('ceph_key', 'resources/ceph_keys', {})[0] first_node.connect(keys) remote_file = vr.create('ceph_key2', 'resources/remote_file', {'dest': '/var/lib/astute/'})[0] second_node.connect(remote_file) keys.connect(remote_file, {'ip': 'remote_ip', 'path': 'remote_path'}) first_transp.connect(remote_file, {'transports': 'remote'}) ceph_mon = vr.create( 'ceph_mon1', 'resources/ceph_mon', { 'storage': STORAGE, 'keystone': KEYSTONE, 'network_scheme': NETWORK_SCHEMA, 'ceph_monitor_nodes': NETWORK_METADATA, 'ceph_primary_monitor_node': NETWORK_METADATA, 'role': 'controller', })[0] managed_apt = vr.create('managed_apt1', 'templates/mos_repos.yaml', { 'node': first_node.name, 'index': 0 })[-1] keys.connect(ceph_mon, {}) first_node.connect(ceph_mon, {'ip': ['ip', 'public_vip', 'management_vip']}) library.connect(ceph_mon, {'puppet_modules': 'puppet_modules'}) managed_apt.connect(ceph_mon, {})
def test_stage_and_process_partially(): a = ['a'] b = ['b'] both = a + b range_a = range(1, 4) range_b = range(4, 6) with_tag_a = [create_resource(str(n), tags=a) for n in range_a] with_tag_b = [create_resource(str(n), tags=b) for n in range_b] ModelMeta.save_all_lazy() created_log_items_with_a = stage_resources(a, 'restart') assert len(created_log_items_with_a) == len(with_tag_a) created_log_items_with_b = stage_resources(b, 'restart') assert len(created_log_items_with_b) == len(with_tag_b) a_graph = change.send_to_orchestration(a) a_expected = set(['%s.restart' % n for n in range_a]) assert set(a_graph.nodes()) == a_expected b_graph = change.send_to_orchestration(b) b_expected = set(['%s.restart' % n for n in range_b]) assert set(b_graph.nodes()) == b_expected both_graph = change.send_to_orchestration(both) assert set(both_graph.nodes()) == a_expected | b_expected
def test_discard_removed(): res1 = DBResource.from_dict('test1', {'name': 'test1', 'base_path': 'x', 'state': RESOURCE_STATE.created.name, 'meta_inputs': {'a': {'value': None, 'schema': 'str'}}}) res1.inputs['a'] = '9' res1.save_lazy() ModelMeta.save_all_lazy() staged_log = change.stage_changes() for item in staged_log: operations.move_to_commited(item.log_action) res1 = resource.load('test1') res1.remove() assert len(change.stage_changes()) == 1 assert res1.to_be_removed() change.discard_all() assert len(change.stage_changes()) == 0 assert not resource.load('test1').to_be_removed()
def test_discard_connection(): res1 = create_resource('test1') res1.inputs['a'] = '9' res1.save_lazy() res2 = create_resource('test2') res2.inputs['a'] = '0' res2.save_lazy() staged_log = map(change.create_run, (res1.name, res2.name)) for item in staged_log: operations.commit_log_item(item) res1 = resource.load('test1') res2 = resource.load('test2') res1.connect(res2, {'a': 'a'}) ModelMeta.save_all_lazy() staged_log = change.staged_log() assert len(staged_log) == 1 assert res2.args == {'a': '9'} change.discard_all() assert res2.args == {'a': '0'} assert len(change.staged_log()) == 0
def deploy(): ModelMeta.remove_all() signals.Connections.clear() node1 = resources_compiled.RoNodeResource('node1', None, {}) node1.ip = '10.0.0.3' node1.ssh_key = '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key' node1.ssh_user = '******' rabbitmq_service1 = resources_compiled.RabbitmqServiceResource('rabbitmq_service1', None, {'management_port': 15672, 'port': 5672, 'container_name': 'rabbitmq_service1', 'image': 'rabbitmq:3-management'}) openstack_vhost = resource.create('openstack_vhost', 'resources/rabbitmq_vhost/', {'vhost_name': 'openstack'})[0] openstack_rabbitmq_user = resource.create('openstack_rabbitmq_user', 'resources/rabbitmq_user/', {'user_name': 'openstack', 'password': '******'})[0] #### # connections #### # rabbitmq signals.connect(node1, rabbitmq_service1) signals.connect(rabbitmq_service1, openstack_vhost) signals.connect(rabbitmq_service1, openstack_rabbitmq_user) signals.connect(openstack_vhost, openstack_rabbitmq_user, {'vhost_name': 'vhost_name'}) errors = vr.validate_resources() if errors: for r, error in errors: print 'ERROR: %s: %s' % (r.name, error) sys.exit(1) # run actions.resource_action(rabbitmq_service1, 'run') actions.resource_action(openstack_vhost, 'run') actions.resource_action(openstack_rabbitmq_user, 'run') time.sleep(10)
def test_revert_removal(): res = create_resource('test1') res.inputs['a'] = '9' res.save_lazy() commited = CommitedResource.from_dict('test1', { 'inputs': { 'a': '9' }, 'state': 'operational' }) commited.save_lazy() resource_obj = resource.load(res.name) resource_obj.remove() ModelMeta.save_all_lazy() log_item = change.create_remove(resource_obj.name) log_item.save() uid = log_item.uid assert log_item.diff == [['remove', '', [['a', '9']]]] operations.commit_log_item(log_item) ModelMeta.save_all_lazy() with mock.patch.object(repository.Repository, 'read_meta') as mread: mread.return_value = { 'input': { 'a': { 'schema': 'str!' } }, 'id': 'mocked' } with mock.patch.object(repository.Repository, 'get_path') as mpath: mpath.return_value = 'x' change.revert(uid) ModelMeta.save_all_lazy() resource_obj = resource.load('test1') assert resource_obj.args == { 'a': '9', 'location_id': '', 'transports_id': '' }
def test_revert_removal(): res = DBResource.from_dict('test1', {'name': 'test1', 'base_path': 'x', 'state': RESOURCE_STATE.created.name, 'meta_inputs': {'a': {'value': None, 'schema': 'str'}}}) res.inputs['a'] = '9' res.save_lazy() commited = CommitedResource.from_dict('test1', {'inputs': {'a': '9'}, 'state': 'operational'}) commited.save_lazy() resource_obj = resource.load(res.name) resource_obj.remove() ModelMeta.save_all_lazy() changes = change.stage_changes() assert len(changes) == 1 assert changes[0].diff == [['remove', '', [['a', '9']]]] operations.move_to_commited(changes[0].log_action) ModelMeta.session_start() assert DBResource._c.obj_cache == {} assert DBResource.bucket.get('test1').siblings == [] with mock.patch.object(resource, 'read_meta') as mread: mread.return_value = { 'input': {'a': {'schema': 'str!'}}, 'id': 'mocked' } change.revert(changes[0].uid) ModelMeta.save_all_lazy() assert len(DBResource.bucket.get('test1').siblings) == 1 resource_obj = resource.load('test1') assert resource_obj.args == { 'a': '9', 'location_id': '', 'transports_id': '' }
def pytest_runtest_setup(item): ModelMeta.session_start()
def pytest_runtest_call(item): ModelMeta.session_end() ModelMeta.session_start()