def create_virtual_resource(vr_name, template): resources = template["resources"] connections = [] created_resources = [] cwd = os.getcwd() for resource in resources: name = resource["id"] base_path = os.path.join(cwd, resource["from"]) args = resource["values"] new_resources = create(name, base_path, args, vr_name) created_resources += new_resources if not is_virtual(base_path): for key, arg in args.items(): if isinstance(arg, basestring) and "::" in arg: emitter, src = arg.split("::") connections.append((emitter, name, {src: key})) db = load_all() for emitter, reciver, mapping in connections: emitter = db[emitter] reciver = db[reciver] signals.connect(emitter, reciver, mapping) return created_resources
def show(**kwargs): resources = [] for name, res in sresource.load_all().items(): show = True if kwargs['tag']: if kwargs['tag'] not in res.tags: show = False if kwargs['name']: if res.name != kwargs['name']: show = False if show: resources.append(res) echo = click.echo_via_pager if kwargs['json']: output = json.dumps([r.to_dict() for r in resources], indent=2) echo = click.echo else: if kwargs['color']: formatter = lambda r: r.color_repr() else: formatter = lambda r: unicode(r) output = '\n'.join(formatter(r) for r in resources) if output: echo(output)
def prepare_nodes(nodes_count): nodes = resource.load_all(startswith='node') resources = cr.create('nodes_network', 'templates/nodes_network', {"count": nodes_count}) nodes_sdn = resources.like('node') r = {} for node, node_sdn in zip(nodes, nodes_sdn): r[node.name] = node r[node_sdn.name] = node_sdn # LIBRARIAN librarian = cr.create('librarian_{}'.format(node.name), 'resources/librarian', {})[0] r[librarian.name] = librarian node.connect(librarian, {}) # NETWORKING # TODO(bogdando) node's IPs should be populated as br-mgmt IPs, but now are hardcoded in templates signals.connect(node, node_sdn) node_sdn.connect_with_events(librarian, {'module': 'modules'}, {}) evapi.add_dep(librarian.name, node_sdn.name, actions=('run', 'update')) signals.connect(node, node_sdn) node_sdn.connect_with_events(librarian, {'module': 'modules'}, {}) evapi.add_dep(librarian.name, node_sdn.name, actions=('run', 'update')) return r
def create_controller(node): r = {r.name: r for r in resource.load_all()} librarian_node = 'librarian_{}'.format(node) r.update(setup_base(r[node], r[librarian_node])) r.update(setup_keystone(r[node], r[librarian_node], r['mariadb_service'], r['openstack_rabbitmq_user'])) r.update(setup_openrc(r['node0'], r['keystone_puppet'], r['admin_user'])) r.update(setup_neutron(r['node0'], r['librarian_node0'], r['rabbitmq_service1'], r['openstack_rabbitmq_user'], r['openstack_vhost'])) r.update(setup_neutron_api(r['node0'], r['mariadb_service'], r['admin_user'], r['keystone_puppet'], r['services_tenant'], r['neutron_puppet'])) r.update(setup_neutron_agent(r['node0'], r['neutron_server_puppet'])) r.update(setup_cinder(r['node0'], r['librarian_node0'], r['rabbitmq_service1'], r['mariadb_service'], r['keystone_puppet'], r['admin_user'], r['openstack_vhost'], r['openstack_rabbitmq_user'], r['services_tenant'])) r.update(setup_cinder_api(r['node0'], r['cinder_puppet'])) r.update(setup_cinder_scheduler(r['node0'], r['cinder_puppet'])) r.update(setup_cinder_volume(r['node0'], r['cinder_puppet'])) r.update(setup_nova(r['node0'], r['librarian_node0'], r['mariadb_service'], r['rabbitmq_service1'], r['admin_user'], r['openstack_vhost'], r['services_tenant'], r['keystone_puppet'], r['openstack_rabbitmq_user'])) r.update(setup_nova_api(r['node0'], r['nova_puppet'], r['neutron_agents_metadata'])) r.update(setup_nova_conductor(r['node0'], r['nova_puppet'], r['nova_api_puppet'])) r.update(setup_nova_scheduler(r['node0'], r['nova_puppet'], r['nova_api_puppet'])) r.update(setup_glance_api(r['node0'], r['librarian_node0'], r['mariadb_service'], r['admin_user'], r['keystone_puppet'], r['services_tenant'], r['cinder_glance_puppet'])) r.update(setup_glance_registry(r['node0'], r['glance_api_puppet'])) return r
def send_to_orchestration(): dg = nx.MultiDiGraph() staged = {r.name: r.args_show() for r in resource.load_all().values()} commited = data.CD() events = {} changed_nodes = [] for res_uid in staged.keys(): commited_data = commited.get(res_uid, {}) staged_data = staged.get(res_uid, {}) df = create_diff(staged_data, commited_data) if df: events[res_uid] = evapi.all_events(res_uid) changed_nodes.append(res_uid) action = guess_action(commited_data, staged_data) state_change = evapi.StateChange(res_uid, action) state_change.insert(changed_nodes, dg) evapi.build_edges(changed_nodes, dg, events) # what it should be? dg.graph['name'] = 'system_log' return graph.create_plan_from_graph(dg)
def create_controller(node): r = {r.name: r for r in resource.load_all()} librarian_node = 'librarian_{}'.format(node) r.update(setup_base(r[node], r[librarian_node])) r.update(setup_keystone(r[node], r[librarian_node], r['mariadb_service'], r['openstack_rabbitmq_user'])) r.update(setup_openrc(r[node], r['keystone_puppet'], r['admin_user'])) r.update(setup_neutron(r[node], r['librarian_{}'.format(node)], r['rabbitmq_service1'], r['openstack_rabbitmq_user'], r['openstack_vhost'])) r.update(setup_neutron_api(r[node], r['mariadb_service'], r['admin_user'], r['keystone_puppet'], r['services_tenant'], r['neutron_puppet'])) r.update(setup_neutron_agent(r[node], r['neutron_server_puppet'])) r.update(setup_cinder(r[node], r['librarian_{}'.format(node)], r['rabbitmq_service1'], r['mariadb_service'], r['keystone_puppet'], r['admin_user'], r['openstack_vhost'], r['openstack_rabbitmq_user'], r['services_tenant'])) r.update(setup_cinder_api(r[node], r['cinder_puppet'])) r.update(setup_cinder_scheduler(r[node], r['cinder_puppet'])) r.update(setup_cinder_volume(r[node], r['cinder_puppet'])) r.update(setup_nova(r[node], r['librarian_{}'.format(node)], r['mariadb_service'], r['rabbitmq_service1'], r['admin_user'], r['openstack_vhost'], r['services_tenant'], r['keystone_puppet'], r['openstack_rabbitmq_user'])) r.update(setup_nova_api(r[node], r['nova_puppet'], r['neutron_agents_metadata'])) r.update(setup_nova_conductor(r[node], r['nova_puppet'], r['nova_api_puppet'])) r.update(setup_nova_scheduler(r[node], r['nova_puppet'], r['nova_api_puppet'])) r.update(setup_glance_api(r[node], r['librarian_{}'.format(node)], r['mariadb_service'], r['admin_user'], r['keystone_puppet'], r['services_tenant'], r['cinder_glance_puppet'])) r.update(setup_glance_registry(r[node], r['glance_api_puppet'])) return r
def find_missing_connections(): """Find resources whose input values are duplicated and they are not connected between each other (i.e. the values are hardcoded, not coming from connection). NOTE: this we could have 2 inputs of the same value living in 2 "circles". This is not covered, we find only inputs whose value is hardcoded. :return: [(resource_name1, input_name1, resource_name2, input_name2)] """ ret = set() resources = load_all() inputs_without_source = find_inputs_without_source() for resource1, input1 in inputs_without_source: r1 = resources[resource1] v1 = r1.args[input1] for resource2, input2 in inputs_without_source: r2 = resources[resource2] v2 = r2.args[input2] if v1 == v2 and resource1 != resource2 and (resource2, input2, resource1, input1) not in ret: ret.add((resource1, input1, resource2, input2)) return list(ret)
def create_compute(node): r = {r.name: r for r in resource.load_all()} librarian_node = 'librarian_{}'.format(node) res = {} res.update(setup_neutron_compute(r[node], r[librarian_node], r['neutron_puppet'], r['neutron_server_puppet'])) res.update(setup_nova_compute(r[node], r[librarian_node], r['nova_puppet'], r['nova_api_puppet'], r['neutron_server_puppet'], r['neutron_keystone_service_endpoint'], r['glance_api_puppet'])) return r
def stage_changes(): log = data.SL() log.clean() conn_graph = signals.detailed_connection_graph() staged = {r.name: r.args_show() for r in resource.load_all().values()} commited = data.CD() return _stage_changes(staged, conn_graph, commited, log)
def test_load_all(self): sample_meta_dir = self.make_resource_meta(""" id: sample handler: ansible version: 1.0.0 input: value: schema: int value: 0 """) self.create_resource('sample1', sample_meta_dir, {'value': 1}) self.create_resource('sample2', sample_meta_dir, {'value': 1}) self.create_resource('x_sample1', sample_meta_dir, {'value': 1}) assert len(resource.load_all()) == 3 assert len(resource.load_all(startswith='sample')) == 2 assert len(resource.load_all(startswith='x_sample')) == 1 assert len(resource.load_all(startswith='nothing')) == 0
def validate_resources(): db = load_all() all_errors = [] for r in db.values(): if not isinstance(r, Resource): continue errors = validation.validate_resource(r) if errors: all_errors.append((r, errors)) return all_errors
def test_all(): results = {} resources = resource.load_all() for r in resources: ret = test(r) if ret: results.update(ret) return results
def undeploy(): ModelMeta.remove_all() resources = resource.load_all() resources = {r.name: r for r in resources} actions.resource_action(resources['openstack_rabbitmq_user'], 'remove') actions.resource_action(resources['openstack_vhost'], 'remove') actions.resource_action(resources['rabbitmq_service1'], 'remove') ModelMeta.remove_all()
def undeploy(): resources = resource.load_all() resources = {r.name: r for r in resources} for name in reversed(resources_to_run): try: actions.resource_action(resources[name], 'remove') except errors.SolarError as e: print 'WARNING: %s' % str(e) ModelMeta.remove_all()
def test_load_all(self): sample_meta_dir = self.make_resource_meta( """ id: sample handler: ansible version: 1.0.0 input: value: schema: int value: 0 """ ) self.create_resource("sample1", sample_meta_dir, {"value": 1}) self.create_resource("sample2", sample_meta_dir, {"value": 1}) self.create_resource("x_sample1", sample_meta_dir, {"value": 1}) assert len(resource.load_all()) == 3 assert len(resource.load_all(startswith="sample")) == 2 assert len(resource.load_all(startswith="x_sample")) == 1 assert len(resource.load_all(startswith="nothing")) == 0
def update(name, args): args_parsed = {} for arg in args: try: args_parsed.update(json.loads(arg)) except ValueError: k, v = arg.split('=') args_parsed.update({k: v}) click.echo('Updating resource {} with args {}'.format(name, args_parsed)) all = sresource.load_all() r = all[name] r.update(args_parsed)
def remove(name, tag, f): if name: resources = [sresource.load(name)] elif tag: resources = sresource.load_by_tags(set(tag)) else: resources = sresource.load_all() for res in resources: res.remove(force=f) if f: click.echo('Resource %s removed from database' % res.name) else: click.echo('Resource %s will be removed after commiting changes.' % res.name)
def deploy(): setup_resources() # run resources = resource.load_all() resources = {r.name: r for r in resources} for name in resources_to_run: try: actions.resource_action(resources[name], 'run') except errors.SolarError as e: print 'WARNING: %s' % str(e) raise time.sleep(10)
def find_inputs_without_source(): """Find resources and inputs values of which are hardcoded. :return: [(resource_name, input_name)] """ resources = load_all() ret = set([(r.name, input_name) for r in resources.values() for input_name in r.args]) clients = signals.Connections.read_clients() for dest_dict in clients.values(): for destinations in dest_dict.values(): for receiver_name, receiver_input in destinations: try: ret.remove((receiver_name, receiver_input)) except KeyError: continue return list(ret)
def show(name, tag, json, color): if name: resources = [sresource.load(name)] elif tag: resources = sresource.load_by_tags(set(tag)) else: resources = sresource.load_all() echo = click.echo_via_pager if json: output = json.dumps([r.to_dict() for r in resources], indent=2) echo = click.echo else: if color: formatter = lambda r: r.color_repr() else: formatter = lambda r: unicode(r) output = '\n'.join(formatter(r) for r in resources) if output: echo(output)
def detailed_connection_graph(start_with=None, end_with=None, details=False): from solar.core.resource import load_all if details: def format_for_edge(resource, input): return '"{}/{}"'.format(resource, input) else: def format_for_edge(resource, input): input = input.split(':', 1)[0] return '"{}/{}"'.format(resource, input) res_props = {'color': 'yellowgreen', 'style': 'filled'} inp_props = {'color': 'lightskyblue', 'style': 'filled, rounded'} graph = networkx.DiGraph() resources = load_all() for resource in resources: res_node = '{}'.format(resource.name) for name in resource.db_obj.meta_inputs: resource_input = format_for_edge(resource.name, name) graph.add_edge(resource.name, resource_input) graph.node[resource_input] = inp_props conns = resource.connections for (emitter_resource, emitter_input, receiver_resource, receiver_input) in conns: # NOQA e = format_for_edge(emitter_resource, emitter_input) r = format_for_edge(receiver_resource, receiver_input) graph.add_edge(emitter_resource, e) graph.add_edge(receiver_resource, r) graph.add_edge(e, r) graph.node[e] = inp_props graph.node[r] = inp_props graph.node[res_node] = res_props return graph
def stage_changes(): log = data.SL() log.clean() for resouce_obj in resource.load_all(): commited = resouce_obj.load_commited() base_path = resouce_obj.base_path if resouce_obj.to_be_removed(): resource_args = {} resource_connections = [] else: resource_args = resouce_obj.args resource_connections = resouce_obj.connections if commited.state == RESOURCE_STATE.removed.name: commited_args = {} commited_connections = [] else: commited_args = commited.inputs commited_connections = commited.connections inputs_diff = create_diff(resource_args, commited_args) connections_diff = create_sorted_diff( resource_connections, commited_connections) # if new connection created it will be reflected in inputs # but using inputs to reverse connections is not possible if inputs_diff: log_item = create_logitem( resouce_obj.name, guess_action(commited_args, resource_args), inputs_diff, connections_diff, base_path=base_path) log.append(log_item) return log
def show(): resources = sresource.load_all() for r in resources: show_emitter_connections(r)
def create_controller(node): r = {r.name: r for r in resource.load_all()} librarian_node = "librarian_{}".format(node) r.update(setup_base(r[node], r[librarian_node])) r.update(setup_keystone(r[node], r[librarian_node], r["mariadb_service"], r["openstack_rabbitmq_user"])) r.update(setup_openrc(r["node0"], r["keystone_puppet"], r["admin_user"])) r.update( setup_neutron( r["node0"], r["librarian_node0"], r["rabbitmq_service1"], r["openstack_rabbitmq_user"], r["openstack_vhost"] ) ) r.update( setup_neutron_api( r["node0"], r["mariadb_service"], r["admin_user"], r["keystone_puppet"], r["services_tenant"], r["neutron_puppet"], ) ) r.update(setup_neutron_agent(r["node0"], r["neutron_server_puppet"])) r.update( setup_cinder( r["node0"], r["librarian_node0"], r["rabbitmq_service1"], r["mariadb_service"], r["keystone_puppet"], r["admin_user"], r["openstack_vhost"], r["openstack_rabbitmq_user"], r["services_tenant"], ) ) r.update(setup_cinder_api(r["node0"], r["cinder_puppet"])) r.update(setup_cinder_scheduler(r["node0"], r["cinder_puppet"])) r.update(setup_cinder_volume(r["node0"], r["cinder_puppet"])) r.update( setup_nova( r["node0"], r["librarian_node0"], r["mariadb_service"], r["rabbitmq_service1"], r["admin_user"], r["openstack_vhost"], r["services_tenant"], r["keystone_puppet"], r["openstack_rabbitmq_user"], ) ) r.update(setup_nova_api(r["node0"], r["nova_puppet"], r["neutron_agents_metadata"])) r.update(setup_nova_conductor(r["node0"], r["nova_puppet"], r["nova_api_puppet"])) r.update(setup_nova_scheduler(r["node0"], r["nova_puppet"], r["nova_api_puppet"])) r.update( setup_glance_api( r["node0"], r["librarian_node0"], r["mariadb_service"], r["admin_user"], r["keystone_puppet"], r["services_tenant"], r["cinder_glance_puppet"], ) ) r.update(setup_glance_registry(r["node0"], r["glance_api_puppet"])) return r