def _is_node_busy_and_reserve_it(node_id, api, controller_requestor='gcc'): settings = get_current_registry().settings seconds_block_is_busy = int(settings.get('chef.seconds_block_is_busy')) time_to_exp = datetime.timedelta(seconds=seconds_block_is_busy) node = ChefNode(node_id, api) current_use_node = node.attributes.get(USE_NODE, {}) current_use_node_control = current_use_node.get('control', None) current_use_node_exp_date = current_use_node.get('exp_date', None) if current_use_node_exp_date: current_use_node_exp_date = json.loads( current_use_node_exp_date, object_hook=json_util.object_hook) current_use_node_exp_date = current_use_node_exp_date.astimezone( pytz.utc).replace(tzinfo=None) now = datetime.datetime.now() if now - current_use_node_exp_date > time_to_exp: current_use_node_control = None if current_use_node_control == controller_requestor: return (node, False) elif current_use_node_control is None: exp_date = datetime.datetime.utcnow() + time_to_exp node.attributes.set_dotted( USE_NODE, { 'control': controller_requestor, 'exp_date': json.dumps(exp_date, default=json_util.default) }) node.save() node2 = ChefNode(node.name, api) # second check current_use_node2 = node2.attributes.get(USE_NODE, {}) current_use_control2 = current_use_node2.get('control', None) if current_use_control2 == controller_requestor: return (node2, False) return (node, True)
def build_computes(computes): # Run computes print "Making the compute nodes..." for compute in computes: compute_node = Node(compute) compute_node['in_use'] = "compute" compute_node.run_list = ["role[qa-single-compute]"] compute_node.save() print "Updating server...this may take some time" update_node(compute_node) if compute_node['platform_family'] == 'rhel': print "Platform is RHEL family, disabling iptables" disable_iptables(compute_node) # Run chef client twice print "Running chef-client on compute node: %s, this may take some time..." % compute run1 = run_chef_client(compute_node) if run1['success']: print "First chef-client run successful...starting second run..." run2 = run_chef_client(compute_node) if run2['success']: print "Second chef-client run successful..." else: print "Error running chef-client for compute %s" % compute print run2 sys.exit(1) else: print "Error running chef-client for compute %s" % compute print run1 sys.exit(1)
def handle(event, _context): """Lambda Handler""" log_event(event) with ChefAPI(CHEF_SERVER_URL, get_pem(), USERNAME): instance_id = get_instance_id(event) try: search = Search('node', 'ec2_instance_id:' + instance_id) except ChefServerNotFoundError as err: LOGGER.error(err) return False if len(search) != 0: for instance in search: node = Node(instance.object.name) client = Client(instance.object.name) try: node.delete() LOGGER.info('===Node Delete: SUCCESS===') client.delete() LOGGER.info('===Client Delete: SUCCESS===') return True except ChefServerNotFoundError as err: LOGGER.error(err) return False else: LOGGER.info('=Instance does not appear to be Chef Server managed.=') return True
def post(self): node_id = self.request.POST.get('node_id') if node_id is None: return {'ok': False, 'message': 'Missing node ID'} settings = get_current_registry().settings api = get_chef_api(settings, self.request.user) # create chef client chef_client = ChefClient(node_id, api) if chef_client.exists: return {'ok': False, 'message': 'This client already exists'} chef_client = ChefClient.create(node_id, api) # Prepare the API for this client chef_url = settings.get('chef.url') chef_version = settings.get('chef.version') chef_ssl_verify = settings.get('chef.ssl.verify') if chef_ssl_verify == 'False' or chef_ssl_verify == 'True': chef_ssl_verify = bool(chef_ssl_verify) api = ChefAPI(chef_url, str(chef_client.private_key), node_id, chef_version, ssl_verify = False) # create chef node chef_node = ChefNode(node_id, api) if chef_node.exists: return {'ok': False, 'message': 'This node already exists'} chef_node.save() return {'ok': True, 'message': 'Node and client have been added', 'client_private_key': chef_client.private_key}
def chef_instance(self, deployment, name, flavor="2GBP"): """ Builds an instance with desired specs and inits it with chef :param client: compute client object :type client: novaclient.client.Client :param deployment: deployement to add to :type deployment: ChefDeployment :param name: name for instance :type name: string :param flavor: desired flavor for node :type flavor: string :rtype: ChefNode """ image = deployment.os_name server, password = self.build_instance(name=name, image=image, flavor=flavor) run_list = ",".join(util.config[str(self)]['run_list']) run_list_arg = "" if run_list: run_list_arg = "-r {0}".format(run_list) command = 'knife bootstrap {0} -u root -P {1} -N {2} {3}'.format( server.accessIPv4, password, name, run_list_arg) run_cmd(command) node = Node(name, api=deployment.environment.local_api) node.chef_environment = deployment.environment.name node['in_use'] = "provisioning" node['ipaddress'] = server.accessIPv4 node['password'] = password node['uuid'] = server.id node['current_user'] = "******" node.save() return node
def _is_node_busy_and_reserve_it(node_id, api, controller_requestor='gcc'): settings = get_current_registry().settings seconds_block_is_busy = int(settings.get('chef.seconds_block_is_busy')) time_to_exp = datetime.timedelta(seconds=seconds_block_is_busy) node = ChefNode(node_id, api) current_use_node = node.attributes.get(USE_NODE, {}) current_use_node_control = current_use_node.get('control', None) current_use_node_exp_date = current_use_node.get('exp_date', None) if current_use_node_exp_date: current_use_node_exp_date = json.loads(current_use_node_exp_date, object_hook=json_util.object_hook) current_use_node_exp_date = current_use_node_exp_date.astimezone(pytz.utc).replace(tzinfo=None) now = datetime.datetime.now() if now - current_use_node_exp_date > time_to_exp: current_use_node_control = None if current_use_node_control == controller_requestor: return (node, False) elif current_use_node_control is None: exp_date = datetime.datetime.utcnow() + time_to_exp node.attributes.set_dotted(USE_NODE, {'control': controller_requestor, 'exp_date': json.dumps(exp_date, default=json_util.default)}) node.save() node2 = ChefNode(node.name, api) # second check current_use_node2 = node2.attributes.get(USE_NODE, {}) current_use_control2 = current_use_node2.get('control', None) if current_use_control2 == controller_requestor: return (node2, False) return (node, True)
def create_nodes(cluster, facet): """Initialize Chef nodes""" instances = create_instances(cluster, facet) for nodename, ipaddress in instances: node = Node(nodename) if node.exists: node_ipaddress = node.get('ipaddress') if ipaddress is None and node_ipaddress: ipaddress = node_ipaddress elif node_ipaddress and node_ipaddress != ipaddress: raise Exception('The remote IP address is different: %s' % node_ipaddress) if ipaddress is None: raise Exception('Can not determine the IP address for %s' % nodename) node['ipaddress'] = ipaddress # update environment and run_list node.chef_environment = cluster.environment run_list = list(cluster.run_list) run_list.extend(facet.run_list) # tagging the cluster run_list.append(u'role[%s_cluster]' % cluster.name) run_list.append(u'role[%s_%s]'% (cluster.name, facet.name)) for role in run_list: if role not in node.run_list: node.run_list.append(role) facet.nodes[ipaddress] = node
def post(self): node_id = self.request.POST.get('node_id') if node_id is None: return {'ok': False, 'message': 'Missing node ID'} settings = get_current_registry().settings api = get_chef_api(settings, self.request.user) # create chef client chef_client = ChefClient(node_id, api) if chef_client.exists: return {'ok': False, 'message': 'This client already exists'} chef_client = ChefClient.create(node_id, api) # Prepare the API for this client chef_url = settings.get('chef.url') chef_version = settings.get('chef.version') chef_ssl_verify = settings.get('chef.ssl.verify') if chef_ssl_verify == 'False' or chef_ssl_verify == 'True': chef_ssl_verify = bool(chef_ssl_verify) api = ChefAPI(chef_url, chef_client.private_key.encode(), node_id, chef_version, ssl_verify = False) # create chef node chef_node = ChefNode(node_id, api) if chef_node.exists: return {'ok': False, 'message': 'This node already exists'} chef_node.save() return {'ok': True, 'message': 'Node and client have been added', 'client_private_key': chef_client.private_key}
def build_dir_server(dir_server): # We dont support 389 yet, so exit if it is not ldap if results.dir_version != 'openldap': print "%s as a directory service is not yet supported...exiting" % results.dir_version sys.exit(1) # Build directory service node dir_node = Node(dir_server) ip = dir_node['ipaddress'] root_pass = razor.get_active_model_pass(dir_node['razor_metadata'].to_dict()['razor_active_model_uuid'])['password'] dir_node['in_use'] = 'directory-server' dir_node.run_list = ["role[qa-%s-%s]" % (results.dir_version, results.os)] dir_node.save() print "Updating server...this may take some time" update_node(dir_node) # if redhat platform, disable iptables if dir_node['platform_family'] == 'rhel': print "Platform is RHEL family, disabling iptables" disable_iptables(dir_node) # Run chef-client twice print "Running chef-client for directory service node...this may take some time..." run1 = run_chef_client(dir_node) if run1['success']: print "First chef-client run successful...starting second run..." run2 = run_chef_client(dir_node) if run2['success']: print "Second chef-client run successful..." else: print "Error running chef-client for directory node %s" % dir_node print run2 sys.exit(1) else: print "Error running chef-client for directory node %s" % dir_node print run1 sys.exit(1) # Directory service is set up, need to import config if run1['success'] and run2['success']: if results.dir_version == 'openldap': scp_run = run_remote_scp_cmd(ip, 'root', root_pass, '/var/lib/jenkins/source_files/ldif/*.ldif') if scp_run['success']: ssh_run = run_remote_ssh_cmd(ip, 'root', root_pass, 'ldapadd -x -D \"cn=admin,dc=dev,dc=rcbops,dc=me\" -f base.ldif -w@privatecloud') elif results.dir_version == '389': # Once we support 389, code here to import needed config files print "389 is not yet supported..." sys.exit(1) else: print "%s is not supported...exiting" % results.dir_version sys.exit(1) if scp_run['success'] and ssh_run['success']: print "Directory Service: %s successfully set up..." % results.dir_version else: print "Failed to set-up Directory Service: %s..." % results.dir_version sys.exit(1)
def test_create(self): name = self.random() node = Node.create(name, run_list=['recipe[foo]']) self.register(node) self.assertEqual(node.run_list, ['recipe[foo]']) node2 = Node(name) self.assertTrue(node2.exists) self.assertEqual(node2.run_list, ['recipe[foo]'])
def computer_deleted(self, user, obj, computers=None): node_chef_id = obj.get('node_chef_id', None) if node_chef_id: api = get_chef_api(self.app.conf, user) node = Node(node_chef_id, api) node.delete() client = Client(node_chef_id, api=api) client.delete() self.log_action('deleted', 'Computer', obj)
def add_run_list_item(self, items): """ Adds list of items to run_list """ util.logger.debug("run_list:{0} add:{1}".format(self.run_list, items)) self.run_list.extend(items) cnode = ChefNode(self.name, api=self.environment.local_api) cnode.run_list = self.run_list self.save(cnode)
def build(self): """ Builds the node """ # clear run_list self.run_list = [] node = ChefNode(self.name, self.environment.local_api) node.run_list = [] node.save() super(Chef, self).build()
def remove_run_list_item(self, item): """ Adds list of items to run_list """ util.logger.debug("run_list:{0} remove:{1}".format(self.run_list, item)) self.run_list.pop(self.run_list.index(item)) cnode = ChefNode(self.name, api=self.environment.local_api) cnode.run_list = self.run_list self.save(cnode)
def clear_pool(chef_nodes, environment): for n in chef_nodes: name = n['name'] node = Node(name) if node.chef_environment == environment: if "recipe[network-interfaces]" not in node.run_list: erase_node(name) else: node.chef_environment = "_default" node.save()
def hosts(env=None, name=None): api = autoconfigure() if name: nodes = (node for node in Node.list() if name in Node(node).name) else: nodes = (node for node in Node.list() if Node(node).chef_environment == env) file = open("hosts", "w") file.write("[hosts]\n") map(lambda n: file.write("{0}\n".format(Node(n)['ipaddress'])), nodes)
def handle(event, _context): """Lambda Handler""" log_event(event) node_name = None node_ip = None # Remove from one of the chef servers for URL in CHEF_SERVER_URLS: with ChefAPI(URL, CHEF_PEM, CHEF_USERNAME): instance_id = get_instance_id(event) try: search = Search('node', 'ec2_instance_id:' + instance_id) except ChefServerNotFoundError as err: LOGGER.error(err) return False if len(search) != 0: for instance in search: node_name = instance.object.name node = Node(node_name) node_ip = node['ipaddress'] client = Client(node_name) try: node.delete() client.delete() LOGGER.info( '=====SUCCESSFULLY REMOVED INSTANCE FROM CHEF SERVER===== {}' .format(URL)) break except ChefServerNotFoundError as err: LOGGER.error(err) return False else: LOGGER.info( '===Instance does not appear to be Chef Server managed.=== {}' .format(URL)) # Remove from Spacewalk spacewalk_cleanup(node_ip) # Remove from DNS dns_cleanup(node_name) # Remove from AD active_directory_cleanup(node_name) # Remove fom Solarwinds solarwinds_cleanup(node_ip, node_name) # Remove from Chef Automate chef_automate_cleanup(node_name)
def destroy_node(self, node): """ Destroys chef node from openstack :param node: node to destroy :type node: ChefNode """ cnode = Node(node.name, node.environment.local_api) if cnode.exists: self.compute_client.servers.get(node['uuid']).delete() cnode.delete() client = Client(node.name, node.environment.local_api) if client.exists: client.delete()
def opencenter_endpoint(chef, name='test', os='ubuntu'): validate_environment(chef, name=name, os=os) env = env_format % (name, os) query = "in_use:\"server\" AND chef_environment:%s" % env server = next(Node(node['name']) for node in Search('node').query(query)) ep_url = "https://%s:8443" % server['ipaddress'] return OpenCenterEndpoint(ep_url, user="******", password="******")
def _clean_node(self, hostid, config, target_system, **kwargs): """clean node""" from chef import Node try: node = Node(self._get_node_name(config['hostname'], config['clusterid'], target_system), api=self.api_) node.delete() logging.debug( 'node is removed for host %s ' 'config %s target_system %s', hostid, config, target_system) except Exception as error: logging.debug( 'no node to delete for host %s ' 'config %s target_system %s', hostid, config, target_system)
class MockSearchTestCase(ChefTestCase): @mockSearch({('node', '*:*'): [Node('fake_1', skip_load=True).to_dict()]}) def test_single_node(self, MockSearch): import chef.search s = chef.search.Search('node') self.assertEqual(len(s), 1) self.assertIn('fake_1', s)
def test_create_crosslink(self): node = Node.create(self.random()) self.register(node) node.normal['foo'] = 'bar' self.assertEqual(node['foo'], 'bar') node.attributes['foo'] = 'baz' self.assertEqual(node.normal['foo'], 'baz')
def _clean_node(self, hostid, config, target_system, **kwargs): """clean node""" from chef import Node try: node = Node( self._get_node_name( config['hostname'], config['clusterid'], target_system), api=self.api_ ) node.delete() logging.debug('node is removed for host %s ' 'config %s target_system %s', hostid, config, target_system) except Exception as error: logging.debug('no node to delete for host %s ' 'config %s target_system %s', hostid, config, target_system)
def destroy_node(self, node): """ Destroys a node provisioned by razor :param node: Node to destroy :type node: ChefNode """ cnode = Node(node.name, node.environment.local_api) in_use = node['in_use'] if in_use == "provisioning" or in_use == 0: # Return to pool if the node is clean cnode['in_use'] = 0 cnode['archive'] = {} cnode.chef_environment = "_default" cnode.save() else: # Remove active model if the node is dirty active_model = cnode['razor_metadata']['razor_active_model_uuid'] try: if node.feature_in('controller'): # rabbit can cause the node to not actually reboot kill = ("for i in `ps -U rabbitmq | tail -n +2 | " "awk '{print $1}' `; do kill -9 $i; done") node.run_cmd(kill) node.run_cmd("shutdown -r now") self.api.remove_active_model(active_model) Client(node.name).delete() cnode.delete() sleep(15) except: util.logger.error("Node unreachable. " "Manual restart required:{0}".format( str(node)))
def delete(self): node_id = self.request.GET.get('node_id') if node_id is None: return {'ok': False, 'message': 'Missing node ID'} settings = get_current_registry().settings api = get_chef_api(settings, self.request.user) chef_node = ChefNode(node_id, api) if not chef_node.exists: return {'ok': False, 'message': 'This node does not exists'} chef_node.delete() chef_client = ChefClient(node_id, api) if not chef_client.exists: return {'ok': False, 'message': 'This client does not exists'} chef_client.delete() return {'ok': True, 'message': 'Node and client have been deleted'}
def destroy_node(self, node): """ Destroys a node provisioned by razor :param node: Node to destroy :type node: ChefNode """ cnode = Node(node.name, node.environment.local_api) in_use = node['in_use'] if in_use == "provisioning" or in_use == 0: # Return to pool if the node is clean cnode['in_use'] = 0 cnode['archive'] = {} cnode.chef_environment = "_default" cnode.save() else: # Remove active model if the node is dirty active_model = cnode['razor_metadata']['razor_active_model_uuid'] try: if node.feature_in('controller'): # rabbit can cause the node to not actually reboot kill = ("for i in `ps -U rabbitmq | tail -n +2 | " "awk '{print $1}' `; do kill -9 $i; done") node.run_cmd(kill) node.run_cmd("shutdown -r now") self.api.remove_active_model(active_model) Client(node.name).delete() cnode.delete() sleep(15) except: util.logger.error("Node unreachable. " "Manual restart required:{0}". format(str(node)))
def _is_node_busy_and_reserve_it(node_id, api, controller_requestor='gcc'): ''' Check if the node is busy, else try to get it and write in control and expiration date in the field USE_NODE. ''' settings = get_current_registry().settings seconds_block_is_busy = int(settings.get('chef.seconds_block_is_busy')) time_to_exp = datetime.timedelta(seconds=seconds_block_is_busy) time_get = time.time() node = ChefNode(node_id, api) time_get = time.time() - time_get current_use_node = node.attributes.get(USE_NODE, {}) current_use_node_control = current_use_node.get('control', None) current_use_node_exp_date = current_use_node.get('exp_date', None) if current_use_node_exp_date: current_use_node_exp_date = json.loads(current_use_node_exp_date, object_hook=json_util.object_hook) current_use_node_exp_date = current_use_node_exp_date.astimezone(pytz.utc).replace(tzinfo=None) now = datetime.datetime.now() if now - current_use_node_exp_date > time_to_exp: current_use_node_control = None if current_use_node_control == controller_requestor: return (node, False) elif current_use_node_control is None: exp_date = datetime.datetime.utcnow() + time_to_exp node.attributes.set_dotted(USE_NODE, {'control': controller_requestor, 'exp_date': json.dumps(exp_date, default=json_util.default)}) node.save() smart_lock_sleep_parameter = settings.get('chef.smart_lock_sleep_factor', 3) seconds_sleep_is_busy = time_get * int(smart_lock_sleep_parameter) time.sleep(seconds_sleep_is_busy) node2 = ChefNode(node.name, api) # second check current_use_node2 = node2.attributes.get(USE_NODE, {}) current_use_control2 = current_use_node2.get('control', None) if current_use_control2 == controller_requestor: return (node2, False) return (node, True)
def openstack_endpoints(opencenter_endpoint): ep = opencenter_endpoint infrastructure_nodes = ep.nodes.filter('name = "Infrastructure"') for node_id in infrastructure_nodes.keys(): ha = infrastructure_nodes[node_id].facts["ha_infra"] endpoint = None if ha: endpoint = infrastructure_nodes[node_id].facts["nova_api_vip"] else: name = next(node.name for node in ep.nodes if "nova-controller" in node.facts["backends"]) endpoint = Node(name)['ipaddress'] return endpoint
def command(self): api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, self.settings.get('chef.ssl.verify'), self.settings.get('chef.version')) ou_name = 'ou_0' ou = self.create_root_ou(ou_name) for node_id in ChefNode.list(): try: node_mongo_id = register_or_updated_node(api, node_id, ou, self.db.nodes) except DuplicateKeyError: node_mongo_id = update_node(api, node_id, ou, self.db.nodes) if not node_mongo_id: print "%s does not exists" % node_id
def build_controller(controller, ha=False, ha_num=0): controller_node = Node(controller) # Check for ha if ha: print "Making %s the ha-controller%s node" % (controller, ha_num) controller_node['in_use'] = "ha-controller%s" % ha_num controller_node.run_list = ["role[qa-ha-controller%s]" % ha_num] else: print "Making %s the controller node" % controller controller_node['in_use'] = "controller" controller_node.run_list = ["role[qa-single-controller]"] # save node controller_node.save() print "Updating server...this may take some time" update_node(controller_node) if controller_node['platform_family'] == 'rhel': print "Platform is RHEL family, disabling iptables" disable_iptables(controller_node) # Run chef-client twice print "Running chef-client for controller node...this may take some time..." run1 = run_chef_client(controller_node) if run1['success']: print "First chef-client run successful...starting second run..." run2 = run_chef_client(controller_node) if run2['success']: print "Second chef-client run successful..." else: print "Error running chef-client for controller %s" % controller print run2 sys.exit(1) else: print "Error running chef-client for controller %s" % controller print run1 sys.exit(1)
def put(self): node_id = self.request.POST.get('node_id') username = self.request.POST.get('gcc_username') if not node_id: return {'ok': False, 'message': 'Please set a node id (node_id)'} if not username: return {'ok': False, 'message': 'Please set a admin username (gcc_username)'} self.request.user = self.request.db.adminusers.find_one({'username': username}) if not self.request.user: return {'ok': False, 'message': 'The admin user %s does not exists' % username} settings = get_current_registry().settings api = get_chef_api(settings, self.request.user) node = Node(node_id, api) job_status = node.attributes.get('job_status') reserve_node = False if job_status: node = reserve_node_or_raise(node_id, api, 'gcc-chef-status-%s' % random.random(), attempts=3) reserve_node = True chef_client_error = False for job_id, job_status in job_status.to_dict().items(): job = self.collection.find_one({'_id': ObjectId(job_id)}) if not job: continue if job_status['status'] == 0: self.collection.update({'_id': job['_id']}, {'$set': {'status': 'finished', 'last_update': datetime.datetime.utcnow()}}) else: chef_client_error = True self.collection.update({'_id': job['_id']}, {'$set': {'status': 'errors', 'message': job_status.get('message', 'Error'), 'last_update': datetime.datetime.utcnow()}}) self.request.db.nodes.update({'node_chef_id': node_id}, {'$set': {'error_last_chef_client': chef_client_error}}) invalidate_jobs(self.request) node.attributes.set_dotted('job_status', {}) users_old = self.get_attr(node, USERS_OLD) users = self.get_attr(node, USERS_OHAI) if not users_old or users_old != users: if not reserve_node: node = reserve_node_or_raise(node_id, api, 'gcc-chef-status-%s' % random.random(), attempts=3) return self.check_users(node) if job_status: save_node_and_free(node) return {'ok': True}
def command(self): api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem) ou_name = 'ou_0' ou = self.create_root_ou(ou_name) for node_id in ChefNode.list(): try: node_mongo_id = register_or_updated_node( api, node_id, ou, self.db.nodes) except DuplicateKeyError: node_mongo_id = update_node(api, node_id, ou, self.db.nodes) if not node_mongo_id: print "%s does not exists" % node_id
def fqdn_for_node(self, node): self.node = node api = autoconfigure() n = Node(node) if n: try: self.fqdn = n.attributes['ec2']['public_hostname'] except KeyError: if 'fqdn' in n: self.fqdn = n['fqdn'] else: return None else: return None return self.fqdn
def get_node_information(self, node_name): """ Get node attributes(metadata) using Node.attributes of PyChef Store the values of the attributes selected by the user for each node """ chefUniqueId = self.organization + "_" + node_name node_details = Node(node_name) node_information = {} node_information['chefUniqueId'] = chefUniqueId node_information['chef_environment'] = node_details.chef_environment for attribute in self.config: attribute_value = self.get_attribute_value(attribute, node_details) if attribute_value: attribute = self.adjust_attribute_name(attribute) node_information[attribute] = attribute_value self.nodes_metadata.append(node_information)
def __setitem__(self, item, value): """ Node can set chef attributes """ lnode = CNode(self.name, api=self.environment.local_api) lnode[item] = value lnode.save() if self.environment.remote_api: rnode = CNode(self.name, api=self.environment.remote_api) rnode[item] = value rnode.save()
def command(self): api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, self.settings.get('chef.version')) print('INFO: Update IPv4 address START!') db = self.pyramid.db computers = db.nodes.find({'type': 'computer'}) for comp in computers: node_id = comp.get('node_chef_id', None) node = Node(node_id, api) ipaddress = node.attributes.get('ipaddress') print('INFO: Update node: %s, set IP: %s'%(node_id, ipaddress)) db.nodes.update({'node_chef_id':node_id},{'$set': {'ipaddress':ipaddress}}) print('INFO: Update IPv4 address END!')
def get_node_attrs_from_chef(self): try: env_subnets_dict = {} node_attribute_dict = {} for organization in self.awshelper_obj.get_organizations(): node_attribute_dict = defaultdict(dict) node_list = Node.list(self.api) for environment in self.awshelper_obj.get_environments(organization): for region in self.awshelper_obj.get_regions(): vpc_list = self.awshelper_obj.get_vpc_in_region(region) if vpc_list: for vpc in self.awshelper_obj.get_vpc_in_region(region): env_subnets_dict = self.awshelper_obj.get_env_subnets(organization, region, vpc) for node in node_list: node_obj = Node(node, api=self.api) node_split = self.ah_obj.split_string(node, ["."]) if node_split is None or len(node_split)<=1: pass else: node_subnet = self.ah_obj.split_string(node, ['.'])[1] for key_tuple, environment in env_subnets_dict.iteritems(): if node_subnet in key_tuple: environment = env_subnets_dict[key_tuple] attribute_list = node_obj.attributes if 'ec2' in attribute_list: if 'instance_id' in node_obj.attributes.get_dotted('ec2'): instance_id = node_obj.attributes.get_dotted('ec2.instance_id') node_attribute_dict[instance_id]['node'] = node if 'os' in attribute_list: node_attribute_dict[instance_id]['os']=node_obj['os'] if 'os_version' in attribute_list: node_attribute_dict[instance_id]['os_version'] = node_obj['os_version'] if 'platform' in attribute_list: node_attribute_dict[instance_id]['platform'] = node_obj['platform'] if 'platform_version' in attribute_list: node_attribute_dict[instance_id]['platform_version'] = node_obj['platform_version'] if 'uptime' in attribute_list: node_attribute_dict[instance_id]['uptime'] = node_obj['uptime'] if 'idletime' in attribute_list: node_attribute_dict[instance_id]['idletime'] = node_obj['idletime'] return dict(node_attribute_dict) except Exception as exp_object: exc_type, exc_obj, exc_tb = sys.exc_info() self.ah_obj.print_exception("chef_helper.py", "get_node_attrs_from_chef1()", exp_object, exc_type, exc_obj, exc_tb) return {}
def destroy(self): cnode = CNode(self.name) if self['in_use'] == "provisioned": # Return to pool if the node is clean cnode.chef_environment = "_default" cnode.save() else: # Remove active model if the node is dirty active_model = cnode['razor_metadata']['razor_active_model_uuid'] self.razor.remove_active_model(active_model) self.run_cmd("reboot 0") CClient(self.name).delete() cnode.delete() sleep(15)
def get_machine_name(self): """ Get the human readable machine name using the machine type and searching Knife for similar servers """ # This must be called to get the correct Knife Setup for Node() # Even if we don't use the resulting API object directly api = ChefAPI.from_config_file(self.KNIFE_CONFIG) if self.VPC: base_name = "yip_%s%%s" % self.machine_type else: base_name = "yipit_%s%%s" % self.machine_type index = 1 while True: name = base_name % index node = Node(name) if node.exists: index += 1 else: break return name
def lambda_handler(event, context): ec2c = boto3.client('ec2') api = chef.autoconfigure() chef_nodes = {} aws_is_running = [] nodes_to_remove = [] #Array of running ec2 instances - ip addresses for region in ec2c.describe_regions()['Regions']: ec2 = boto3.resource('ec2', region_name=region['RegionName']) for instance in ec2.instances.filter(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]): #print('Debug EC2 Instances: ', region['RegionName'], instance.private_ip_address) aws_is_running.append(instance.private_ip_address) #Dictionary of ip addresses (key) and Node objects (value) from Chef for name, nodeobj in Node.list().iteritems() : for key, value in nodeobj.attributes.iteritems(): if key == 'ipaddress' : #print('Debug Chef Nodes: ', value, nodeobj) chef_nodes.update({value : nodeobj}) #Calculating nodes to remove for key, node in chef_nodes.iteritems(): if key not in aws_is_running: nodes_to_remove.append(node) #Removing nodes in Chef that are no longer in AWS for node in nodes_to_remove: #print('Debug Nodes to Remove:', node['ipaddress']) node.delete() #So that the removed nodes are logged print 'Removed', [x['ipaddress'] for x in nodes_to_remove] return 'Removed', [x['ipaddress'] for x in nodes_to_remove]
for compute in computes: compute_ip = rpcsqa.set_node_in_use(compute, "agent") rpcsqa.remove_chef(compute) rpcsqa.install_opencenter(compute, results.repo, 'agent', oc_server_ip) # Print Cluster Info print "************************************************************" print "2 VMs, 1 controller ( VM Host ), %i Agents" % len(computes) print "OpenCenter Server (VM) with IP: %s on Host: %s" % (oc_server_ip, controller) print "Chef Server (VM) with IP: %s on Host: %s" % (chef_server_ip, controller) print "Controller Node: %s with IP: %s" % (controller, controller_ip) for agent in computes: node = Node(agent) print "Agent Node: %s with IP: %s" % (agent, node['ipaddress']) print "************************************************************" else: #Pick an opencenter server, and rest for agents server = opencenter_list[0] dashboard = [] clients = [] if len(opencenter_list) > 1: dashboard = opencenter_list[1] if len(opencenter_list) > 2: agents = opencenter_list[2:] #Remove chef client...install opencenter server print "Making %s the server node" % server
def command(self): # Initialization logger.info("MIGRATION SCRIPT FOR FILES LIST POLICY") logger.info("######################################") # Disabling InsecureRequestWarning Unverified HTTPS request requests.packages.urllib3.disable_warnings() sanitized = False computers = set() self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.auth_user = self.db.adminusers.find_one( {'username': self.options.chef_username}) if self.auth_user is None: logger.error('The administrator user must exist in MongoDB') sys.exit(1) self.db = self.pyramid.db # Get local_file (File list) policy logger.info('Getting policy schema (local_file_res) ...') policy = self.db.policies.find_one({'slug': 'local_file_res'}) schema = policy['schema'] policyId = policy['_id'] logger.info('schema = %s' % str(schema)) logger.debug('policyId = %s' % str(policyId)) # Searching nodes with the File List policy # Query Fields of an Embedded Document (Mongo documentation) # Example: # db.nodes.find({"policies.58c8122a0dfd425b0894d5b6":{$exists:true}}) logger.info('Searching for nodes with applied policy...') field = 'policies.' + str(policyId) filters = {field: {'$exists': True}} nodes = self.db.nodes.find(filters) # Validating data and, where appropiate, fixing for node in nodes: instance = node['policies'][unicode(policyId)] logger.debug("node = %s" % str(node)) logger.info('-----------------------------------------------') logger.info('Node name = %s, mongo_id = %s' % (node['name'], str(node['_id']))) logger.info('Instance of the policy on the node: %s' % str(instance)) while True: try: validate(instance, schema) break except ValidationError as e: logger.warn('Validation error on instance: instance = %s' % str(instance)) logger.warn( 'Validation error on instance: message error = %s' % str(e.message)) # Sanitize instance self.sanitize(e, instance) sanitized = True if sanitized: # Setting false sanitized for next iteration sanitized = False logger.info( 'Sanitized instance of the policy on the node AFTER calling the validate method: %s' % str(instance)) # Update mongo logger.info('Updating instance in database (mongo) ...') self.db.nodes.update({'_id': node['_id']}, {'$set': { field: instance }}) logger.info('Recalculating policies in the node.') # Affected nodes if node['type'] == 'ou': result = list( self.db.nodes.find( { 'path': get_filter_nodes_belonging_ou( node['_id']), 'type': 'computer' }, {'_id': 1})) logger.info('OU computers = %s' % str(result)) elif node['type'] == 'group': result = list( self.db.nodes.find( { '_id': { '$in': node['members'] }, 'type': 'computer' }, {'_id': 1})) logger.info('GROUP computers = %s' % str(result)) elif node['type'] == 'computer': result = [node] logger.info('COMPUTER computers = %s' % str(result)) [computers.add(str(n['_id'])) for n in result] for computer in computers: logger.info( 'Applying policies to COMPUTER. For more information, see "gecosccui-celery.log" file' ) computer = self.db.nodes.find_one({'_id': ObjectId(computer)}) apply_policies_to_computer(self.db.nodes, computer, self.auth_user, api=self.api, initialize=False, use_celery=True) # Removing unused attributes (copy_files, delete_files) in chef nodes logger.info('\n') attrs = [ "%s.copy_files" % (policy['path']), "%s.delete_files" % (policy['path']) ] logger.info('Removing unused attributes %s in chef nodes ...' % attrs) logger.info('\n') for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s' % (node_id)) for attr in attrs: try: if node.attributes.has_dotted(attr): logger.warn("Remove %s attribute!" % attr) delete_dotted(node.attributes, attr) node.save() except: logger.warn( "Problem deleting attribute %s value from node: %s" % (attr, node_id)) logger.warn( "You may be trying to delete a default attribute instead normal attribute: %s" % (node_id)) logger.info('Finished.')
def setUp(self): super(NodeTestCase, self).setUp() self.node = Node('test_1')
def command(self): # Initialization sanitized = False computers = set() self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.auth_user = self.db.adminusers.find_one( {'username': self.options.chef_username}) if self.auth_user is None: logger.error('The administrator user must exist in MongoDB') sys.exit(1) self.db = self.pyramid.db # Get local_admin_users_res (Local Administrators) policy logger.info( 'Getting Local Administrators (local_admin_users_res) policy ...') policy = self.db.policies.find_one({'slug': 'local_admin_users_res'}) schema = policy['schema'] policyId = policy['_id'] logger.info('schema = %s' % str(schema)) logger.info('Id.policy = %s' % str(policyId)) # Searching nodes with the Local Administrators policy # Query Fields of an Embedded Document (Mongo documentation) # Example: # db.nodes.find({"policies.58c8122a0dfd425b0894d5b6":{$exists:true}}) logger.info('Searching nodes with the Local Administrators policy...') field = 'policies.' + str(policyId) filters = {field: {'$exists': True}} nodes = self.db.nodes.find(filters) # Validating data and, where appropiate, fixing for node in nodes: instance = node['policies'][unicode(policyId)] logger.info('Node name = %s, _id = %s' % (node['name'], str(node['_id']))) logger.info('Instance before validate method: %s' % str(instance)) while True: try: validate(instance, schema) break except ValidationError as e: logger.warning('Validation error on instance = %s' % str(e.message)) # Sanitize instance self.sanitize(e, instance) sanitized = True if sanitized: # Setting false sanitized for next iteration sanitized = False logger.info('Sanitized instance: %s' % str(instance)) # Update mongo self.db.nodes.update({'_id': node['_id']}, {'$set': { field: instance }}) # Affected nodes if node['type'] == 'ou': result = list( self.db.nodes.find( { 'path': get_filter_nodes_belonging_ou( node['_id']), 'type': 'computer' }, {'_id': 1})) logger.info('OU computers = %s' % str(result)) elif node['type'] == 'group': result = list( self.db.nodes.find( { '_id': { '$in': node['members'] }, 'type': 'computer' }, {'_id': 1})) logger.info('GROUP computers = %s' % str(result)) elif node['type'] == 'computer': result = [node] logger.info('COMPUTER computers = %s' % str(result)) [computers.add(str(n['_id'])) for n in result] # Removing unused local_admin_remove_list attribute in chef nodes for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s' % (node_id)) attr_dotted = policy['path'] + '.local_admin_remove_list' logger.info('Atttribute dotted path: %s' % (attr_dotted)) if node.attributes.has_dotted(attr_dotted): logger.info("Remove 'local_admin_remove_list' attribute!") try: logger.info( "node.attributes = %s" % str(node.attributes['gecos_ws_mgmt']['misc_mgmt'] ['local_admin_users_res'].to_dict())) delete_dotted(node.attributes, attr_dotted) node.save() except: logger.warn( "Problem deleting local_admin_remove_list value from node: %s" % (node_id)) logger.warn( "You may be trying to delete a default attribute instead normal attribute: %s" % (node_id)) for computer in computers: logger.info('computer = %s' % str(computer)) computer = self.db.nodes.find_one({'_id': ObjectId(computer)}) apply_policies_to_computer(self.db.nodes, computer, self.auth_user, api=self.api, initialize=False, use_celery=False) logger.info('Finished.')
def command(self): db = self.pyramid.db ou = db.nodes.find_one({'_id': ObjectId(self.options.ou_id)}) if not ou: print 'Error OU does not exists' return comp = db.nodes.find_one({'_id': ObjectId(self.options.comp_id)}) if not comp: print 'Error computer does not exists' return node_id = comp.get('node_chef_id', None) if not comp: print 'Error this computer has not node_chef_id' return policies = comp.get('policies', None) if policies != {}: print 'Error this computer should not have any policies' return admin = db.adminusers.find_one({'username': self.options.gcc_username}) if not admin: print 'Error this admin does not exists' return elif not admin.get('is_superuser', None): print 'You need a super admin' return number_nodes = int(self.options.number) api = get_chef_api(self.settings, admin) node = ChefNode(node_id, api) for i in range(number_nodes): new_node_name = '%s-%s' % (self.options.prefix, i) new_node = ChefNode(new_node_name, api) for attr in node.to_dict().keys(): if hasattr(node, attr) and attr != 'name': if attr == 'automatic': automatic_dict = node.automatic.to_dict() automatic_dict['ohai_gecos']['pclabel'] = new_node_name user1 = 'user.name-%s-1' % new_node_name user2 = 'user.name-%s-2' % new_node_name automatic_dict['ohai_gecos']['users'] = [{'username': user1, 'home': '/home/%s' % user1, 'gid': 1000, 'sudo': False, 'uid': 1000}, {'username': user2, 'home': '/home/%s' % user2, 'gid': 1000, 'sudo': False, 'uid': 1001}] automatic = NodeAttributes(automatic_dict) setattr(new_node, attr, automatic) elif attr == 'normal': node.normal.set_dotted('ohai_gecos', {}) else: setattr(new_node, attr, getattr(node, attr)) new_node.save() print 'Created %s at chef' % new_node_name res = requests.post('%s/register/computer/' % self.options.gcc_url, {'ou_id': self.options.ou_id, 'node_id': new_node_name}, auth=(self.options.gcc_username, self.options.gcc_password)) if res.ok and res.json()['ok']: print 'Created %s at gcc' % new_node_name elif res.ok and not res.json()['ok']: print 'Error %s at gcc' % new_node_name print '\t %s' % res.json()['message'] else: print 'Unknow error %s at gcc' % new_node_name res = requests.put('%s/chef/status/' % self.options.gcc_url, {'node_id': new_node_name, 'gcc_username': self.options.gcc_username}) if res.ok and res.json()['ok']: print 'Chef client %s' % new_node_name elif res.ok and not res.json()['ok']: print 'Error %s at chef client' % new_node_name print '\t %s' % res.json()['message'] else: print 'Unknow error %s at chef client' % new_node_name waiting_to_celery(db)
def command(self): from gecoscc.api.chef_status import USERS_OLD, USERS_OHAI # Initialization self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.db = self.pyramid.db # Check administrator user auth_user = self.db.adminusers.find_one({'username': self.options.chef_username}) if auth_user is None: logger.error('The administrator user must exist in MongoDB') sys.exit(1) # Recorriendo todos los nodos for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s'%(node_id)) try: if node.attributes.get_dotted(USERS_OLD): delete_dotted(node.attributes, USERS_OLD) node.save() except KeyError: logger.warn("Not found attribute: %s"%(USERS_OLD)) except: logger.warn("Problem deleting users_old attribute from node: %s"%(node_id)) # Updating users list computer = self.db.nodes.find_one({'node_chef_id': node_id, 'type':'computer'}) if not computer: logger.error('This node does not exist (mongodb)') continue chef_node_usernames = set([d['username'] for d in node.attributes.get_dotted(USERS_OHAI)]) gcc_node_usernames = set([d['name'] for d in self.db.nodes.find({ 'type':'user', 'computers': {'$in': [computer['_id']]} }, {'_id':0, 'name':1}) ]) users_recalculate_policies = [] users_remove_policies = [] # Users added/removed ? if set.symmetric_difference(chef_node_usernames, gcc_node_usernames): logger.info("Users added/removed found.") # Add users or vinculate user to computer if already exists addusers = set.difference(chef_node_usernames, gcc_node_usernames) for add in addusers: logger.info("Added user: %s"%(add)) user = self.db.nodes.find_one({'name': add, 'type': 'user', 'path': get_filter_in_domain(computer)}) if not user: user_model = User() user = user_model.serialize({'name': add, 'path': computer.get('path', ''), 'type': 'user', 'lock': computer.get('lock', ''), 'source': computer.get('source', '')}) user = update_computers_of_user(self.db, user, self.api) del user['_id'] user_id = self.db.nodes.insert(user) user = self.db.nodes.find_one({'_id': user_id}) users_recalculate_policies.append(user) else: computers = user.get('computers', []) if computer['_id'] not in computers: computers.append(computer['_id']) self.db.nodes.update({'_id': user['_id']}, {'$set': {'computers': computers}}) users_recalculate_policies.append(user) add_computer_to_user(computer['_id'], user['_id']) # Removed users delusers = set.difference(gcc_node_usernames, chef_node_usernames) for delete in delusers: logger.info("Deleted user: %s"%(delete)) user = self.db.nodes.find_one({'name': delete, 'type': 'user', 'path': get_filter_in_domain(computer)}) computers = user['computers'] if user else [] if computer['_id'] in computers: users_remove_policies.append(deepcopy(user)) computers.remove(computer['_id']) self.db.nodes.update({'_id': user['_id']}, {'$set': {'computers': computers}}) for user in users_recalculate_policies: apply_policies_to_user(self.db.nodes, user, auth_user) for user in users_remove_policies: remove_policies_of_computer(user, computer, auth_user)
def get_all_nodes(self): return Node.list().names
def command(self): # Initialization logger.info("MIGRATION SCRIPT FOR USER_APPS_AUTOSTART POLICY") logger.info("###############################################") # Disabling InsecureRequestWarning Unverified HTTPS request requests.packages.urllib3.disable_warnings() sanitized = False ous = [] groups = [] users = [] self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.auth_user = self.db.adminusers.find_one( {'username': self.options.chef_username}) if self.auth_user is None: logger.error('The administrator user must exist in MongoDB') sys.exit(1) self.db = self.pyramid.db # Get local_users (Users) policy logger.info('Getting policy schema (user_apps_autostart_res) ...') policy = self.db.policies.find_one({'slug': 'user_apps_autostart_res'}) schema = policy['schema'] policyId = policy['_id'] logger.info('schema = %s' % str(schema)) logger.debug('policyId = %s' % str(policyId)) # Searching nodes with the Local Administrators policy # Query Fields of an Embedded Document (Mongo documentation) # Example: # db.nodes.find({"policies.58c8122a0dfd425b0894d5b6":{$exists:true}}) logger.info('Searching for nodes with applied policy...') field = 'policies.' + str(policyId) filters = {field: {'$exists': True}} nodes = self.db.nodes.find(filters) # Validating data and, where appropiate, fixing for node in nodes: instance = node['policies'][unicode(policyId)] logger.info("node = %s" % str(node)) logger.info('-----------------------------------------------') logger.info('Node name = %s, mongo_id = %s' % (node['name'], str(node['_id']))) logger.info('Instance of the policy on the node: %s' % str(instance)) while True: try: validate(instance, schema) break except ValidationError as e: logger.warn('Validation error on instance = %s' % str(e.message)) # Sanitize instance self.sanitize(e, instance) sanitized = True if sanitized: # Setting false sanitized for next iteration sanitized = False logger.info( 'Sanitized instance of the policy on the node AFTER calling the validate method: %s' % str(instance)) # Update mongo logger.info('Updating instance in database (mongo) ...') self.db.nodes.update({'_id': node['_id']}, {'$set': { field: instance }}) logger.info('Recalculating policies in the node.') # Affected nodes if node['type'] == 'ou': ous.append(node) elif node['type'] == 'group': groups.append(node) elif node['type'] == 'user': users.append(node) # We only go through the highest level OUs. # Therefore, we eliminate intermediate OUs and # then do not recalculate the policies # for the same node several times. for ou in ous: parents = [ ObjectId(oid) for oid in ou['path'].split(',') if oid != 'root' ] if any(o['_id'] in parents for o in ous): ous.remove(ou) # Users that are not under an OU or GROUP that have the migrated policy for user in users: parents = [ ObjectId(oid) for oid in user['path'].split(',') if oid != 'root' ] if any(o['_id'] in parents for o in ous): users.remove(user) elif any(user['_id'] in group['members'] for group in groups): users.remove(user) # Recalculating policies for OU for ou in ous: old = deepcopy(ou) del old["policies"][str(policyId)] object_changed(self.auth_user, 'ou', ou, old) # Recalculating policies for GROUP for group in groups: old = deepcopy(group) del old["policies"][str(policyId)] object_changed(self.auth_user, 'group', group, old) # Recalculating policies for USER for user in users: old = deepcopy(user) del old["policies"][str(policyId)] object_changed(self.auth_user, 'user', user, old) # Removing unused desktops_to_remove attribute in chef nodes logger.info('\n') logger.info( 'Removing unused desktops_to_remove attribute in chef nodes ...') for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s' % (node_id)) field_chef = '%s.users' % policy['path'] users = node.attributes.get_dotted( field_chef) if node.attributes.has_dotted(field_chef) else [] for user in users: logger.debug("user = %s" % (user)) attr_delete_path = '%s.%s.desktops_to_remove' % (field_chef, user) logger.debug('Atttribute dotted path: %s' % (attr_delete_path)) if node.attributes.has_dotted(attr_delete_path): logger.warn( "Remove 'desktops_to_remove' attribute! for user %s" % (user)) try: delete_dotted(node.attributes, attr_delete_path) node.save() except: logger.warn( "Problem deleting desktops_to_remove value from node: %s" % (node_id)) logger.warn( "You may be trying to delete a default attribute instead normal attribute: %s" % (node_id)) logger.info('Finished.')
#!/usr/bin/env python # # requires # - pip install PyChef # - access to knife.rb file # - access to client.pem file # # DON'T RUN THIS - THESE ARE JUST EXAMPLES #------------------------------ # update node info from chef import autoconfigure, Node api = autoconfigure() n = Node('web1') print n['fqdn'] n['myapp']['version'] = '1.0' n.save() #------------------------------ #------------------------------ # get client list from chef import autoconfigure api = autoconfigure() print api.api_request('GET', '/clients') #------------------------------
def save_node_and_free(node, api=None, refresh=False): if refresh and api: node = ChefNode(node.name, api) node.attributes.set_dotted(USE_NODE, {}) node.save()
def command(self): # Initialization self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.auth_user = self.db.adminusers.find_one({'username': self.options.chef_username}) if self.auth_user is None: logger.error('The administrator user must exist in MongoDB') return self.db = self.pyramid.db ous = [] groups = [] computers = [] # Get packages policy ID packages_policy = self.db.policies.find_one({"slug": "package_res"}) if packages_policy is None: logger.error('Can\'t detect "package_res" policy!') return if 'pkgs_to_remove' in packages_policy["schema"]["properties"]: logger.error("The 'package_res' policy in the system is deprecated, please update to new package policy!") return logger.info('Packages policy ID: %s'%(str(packages_policy['_id']))) # Get all nodes with old package policy data logger.info('Getting all nodes with old package policy data...') path_to_find = "policies.%s.pkgs_to_remove"%(str(packages_policy['_id'])) old_policy_nodes = self.db.nodes.find({ '$query': {path_to_find: { '$exists': True }}, '$orderby': { "path" : 1 }}) updated_nodes = [] for node in old_policy_nodes: logger.info('Updating node %s ...'%(str(node['_id']))) updated_nodes.append(str(node['_id'])) logger.debug('Packages to add: %s'%(str(node['policies'][str(packages_policy['_id'])]['package_list']))) logger.debug('Packages to remove: %s'%(str(node['policies'][str(packages_policy['_id'])]['pkgs_to_remove']))) # Join the lists package_list = [] for package_name in node['policies'][str(packages_policy['_id'])]['package_list']: package_list.append({'name': package_name, 'version': 'current', 'action': 'add'}) for package_name in node['policies'][str(packages_policy['_id'])]['pkgs_to_remove']: package_list.append({'name': package_name, 'version': 'current', 'action': 'remove'}) if 'pkgs_to_remove' in node['policies'][str(packages_policy['_id'])]: del node['policies'][str(packages_policy['_id'])]['pkgs_to_remove'] node['policies'][str(packages_policy['_id'])]['package_list'] = package_list # Update policies self.db.nodes.update({'_id': node['_id']}, {'$set': {'policies': node['policies']}}) logger.debug('Joined list: %s'%(str(node['policies'][str(packages_policy['_id'])]['package_list']))) if node['type'] == 'ou': ous.append(node) elif node['type'] == 'group': groups.append(node) elif node['type'] == 'computer': computers.append(node) logger.info('%s nodes were updated!'%(len(updated_nodes))) # We only go through the highest level OUs. # Therefore, we eliminate intermediate OUs and # then do not recalculate the policies # for the same node several times. for ou in ous: parents = [ObjectId(oid) for oid in ou['path'].split(',') if oid != 'root'] if any(o['_id'] in parents for o in ous): ous.remove(ou) # Users that are not under an OU or GROUP that have the migrated policy for computer in computers: parents = [ObjectId(oid) for oid in computer['path'].split(',') if oid != 'root'] if any(o['_id'] in parents for o in ous): computers.remove(computer) elif any(computer['_id'] in group['members'] for group in groups): computers.remove(computer) # Recalculating policies for OU for ou in ous: old = deepcopy(ou) del old["policies"][str(packages_policy['_id'])] object_changed(self.auth_user, 'ou', ou, old) # Recalculating policies for GROUP for group in groups: old = deepcopy(group) del old["policies"][str(packages_policy['_id'])] object_changed(self.auth_user, 'group', group, old) # Recalculating policies for USER for computer in computers: old = deepcopy(computer) del old["policies"][str(packages_policy['_id'])] object_changed(self.auth_user, 'computer', computer, old) # Recalculate policies for Chef nodes for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s'%(node_id)) if ("gecos_ws_mgmt" in node.attributes) and ("software_mgmt" in node.attributes["gecos_ws_mgmt"]) and ("package_res" in node.attributes["gecos_ws_mgmt"]["software_mgmt"]): if "pkgs_to_remove" in node.attributes["gecos_ws_mgmt"]["software_mgmt"]["package_res"]: logger.debug("Chef node %s contains a pkgs_to_remove value!"%(node_id)) # Remove pkgs_to_remove from mongodb node logger.info("Remove 'pkgs_to_remove' attribute!") try: del node.attributes["gecos_ws_mgmt"]["software_mgmt"]["package_res"]["pkgs_to_remove"] node.save() except: logger.warn("Problem deleting pkgs_to_remove value from node: %s"%(node_id)) if not "package_list" in node.attributes["gecos_ws_mgmt"]["software_mgmt"]["package_res"]: logger.error("Chef node %s doesn\'t contains a package_list value!"%(node_id)) continue package_list = node.attributes["gecos_ws_mgmt"]["software_mgmt"]["package_res"]["package_list"] for element in package_list: if not 'action' in element: logger.debug('Chef node: %s doesn\'t have an action value in package_res! (package_list:%s)'%(node_id, str(package_list))) break # Final check bad_nodes = Search('node', "pkgs_to_remove:*", rows=1000, start=0, api=self.api) for node in bad_nodes: logger.warn('Detected bad node: %s'%(node.object.name)) gecos_node = self.db.nodes.find_one({"node_chef_id": node.object.name}) if gecos_node is None: logger.warn('Can\'t find node in MongoDB for: %s'%(node.object.name)) else: logger.warn('For an unknown reason a computer called %s wasn\'t updated!'%(gecos_node['name'])) logger.info('END ;)')