def destroy_node(self, node): """ Destroys a node provisioned by razor :param node: Node to destroy :type node: ChefNode """ cnode = Node(node.name, node.environment.local_api) in_use = node['in_use'] if in_use == "provisioning" or in_use == 0: # Return to pool if the node is clean cnode['in_use'] = 0 cnode['archive'] = {} cnode.chef_environment = "_default" cnode.save() else: # Remove active model if the node is dirty active_model = cnode['razor_metadata']['razor_active_model_uuid'] try: if node.feature_in('controller'): # rabbit can cause the node to not actually reboot kill = ("for i in `ps -U rabbitmq | tail -n +2 | " "awk '{print $1}' `; do kill -9 $i; done") node.run_cmd(kill) node.run_cmd("shutdown -r now") self.api.remove_active_model(active_model) Client(node.name).delete() cnode.delete() sleep(15) except: util.logger.error("Node unreachable. " "Manual restart required:{0}". format(str(node)))
def build_computes(computes): # Run computes print "Making the compute nodes..." for compute in computes: compute_node = Node(compute) compute_node['in_use'] = "compute" compute_node.run_list = ["role[qa-single-compute]"] compute_node.save() print "Updating server...this may take some time" update_node(compute_node) if compute_node['platform_family'] == 'rhel': print "Platform is RHEL family, disabling iptables" disable_iptables(compute_node) # Run chef client twice print "Running chef-client on compute node: %s, this may take some time..." % compute run1 = run_chef_client(compute_node) if run1['success']: print "First chef-client run successful...starting second run..." run2 = run_chef_client(compute_node) if run2['success']: print "Second chef-client run successful..." else: print "Error running chef-client for compute %s" % compute print run2 sys.exit(1) else: print "Error running chef-client for compute %s" % compute print run1 sys.exit(1)
def _is_node_busy_and_reserve_it(node_id, api, controller_requestor='gcc'): settings = get_current_registry().settings seconds_block_is_busy = int(settings.get('chef.seconds_block_is_busy')) time_to_exp = datetime.timedelta(seconds=seconds_block_is_busy) node = ChefNode(node_id, api) current_use_node = node.attributes.get(USE_NODE, {}) current_use_node_control = current_use_node.get('control', None) current_use_node_exp_date = current_use_node.get('exp_date', None) if current_use_node_exp_date: current_use_node_exp_date = json.loads( current_use_node_exp_date, object_hook=json_util.object_hook) current_use_node_exp_date = current_use_node_exp_date.astimezone( pytz.utc).replace(tzinfo=None) now = datetime.datetime.now() if now - current_use_node_exp_date > time_to_exp: current_use_node_control = None if current_use_node_control == controller_requestor: return (node, False) elif current_use_node_control is None: exp_date = datetime.datetime.utcnow() + time_to_exp node.attributes.set_dotted( USE_NODE, { 'control': controller_requestor, 'exp_date': json.dumps(exp_date, default=json_util.default) }) node.save() node2 = ChefNode(node.name, api) # second check current_use_node2 = node2.attributes.get(USE_NODE, {}) current_use_control2 = current_use_node2.get('control', None) if current_use_control2 == controller_requestor: return (node2, False) return (node, True)
def _is_node_busy_and_reserve_it(node_id, api, controller_requestor='gcc'): settings = get_current_registry().settings seconds_block_is_busy = int(settings.get('chef.seconds_block_is_busy')) time_to_exp = datetime.timedelta(seconds=seconds_block_is_busy) node = ChefNode(node_id, api) current_use_node = node.attributes.get(USE_NODE, {}) current_use_node_control = current_use_node.get('control', None) current_use_node_exp_date = current_use_node.get('exp_date', None) if current_use_node_exp_date: current_use_node_exp_date = json.loads(current_use_node_exp_date, object_hook=json_util.object_hook) current_use_node_exp_date = current_use_node_exp_date.astimezone(pytz.utc).replace(tzinfo=None) now = datetime.datetime.now() if now - current_use_node_exp_date > time_to_exp: current_use_node_control = None if current_use_node_control == controller_requestor: return (node, False) elif current_use_node_control is None: exp_date = datetime.datetime.utcnow() + time_to_exp node.attributes.set_dotted(USE_NODE, {'control': controller_requestor, 'exp_date': json.dumps(exp_date, default=json_util.default)}) node.save() node2 = ChefNode(node.name, api) # second check current_use_node2 = node2.attributes.get(USE_NODE, {}) current_use_control2 = current_use_node2.get('control', None) if current_use_control2 == controller_requestor: return (node2, False) return (node, True)
def chef_instance(self, deployment, name, flavor="2GBP"): """ Builds an instance with desired specs and inits it with chef :param client: compute client object :type client: novaclient.client.Client :param deployment: deployement to add to :type deployment: ChefDeployment :param name: name for instance :type name: string :param flavor: desired flavor for node :type flavor: string :rtype: ChefNode """ image = deployment.os_name server, password = self.build_instance(name=name, image=image, flavor=flavor) run_list = ",".join(util.config[str(self)]['run_list']) run_list_arg = "" if run_list: run_list_arg = "-r {0}".format(run_list) command = 'knife bootstrap {0} -u root -P {1} -N {2} {3}'.format( server.accessIPv4, password, name, run_list_arg) run_cmd(command) node = Node(name, api=deployment.environment.local_api) node.chef_environment = deployment.environment.name node['in_use'] = "provisioning" node['ipaddress'] = server.accessIPv4 node['password'] = password node['uuid'] = server.id node['current_user'] = "******" node.save() return node
def post(self): node_id = self.request.POST.get('node_id') if node_id is None: return {'ok': False, 'message': 'Missing node ID'} settings = get_current_registry().settings api = get_chef_api(settings, self.request.user) # create chef client chef_client = ChefClient(node_id, api) if chef_client.exists: return {'ok': False, 'message': 'This client already exists'} chef_client = ChefClient.create(node_id, api) # Prepare the API for this client chef_url = settings.get('chef.url') chef_version = settings.get('chef.version') chef_ssl_verify = settings.get('chef.ssl.verify') if chef_ssl_verify == 'False' or chef_ssl_verify == 'True': chef_ssl_verify = bool(chef_ssl_verify) api = ChefAPI(chef_url, str(chef_client.private_key), node_id, chef_version, ssl_verify = False) # create chef node chef_node = ChefNode(node_id, api) if chef_node.exists: return {'ok': False, 'message': 'This node already exists'} chef_node.save() return {'ok': True, 'message': 'Node and client have been added', 'client_private_key': chef_client.private_key}
def post(self): node_id = self.request.POST.get('node_id') if node_id is None: return {'ok': False, 'message': 'Missing node ID'} settings = get_current_registry().settings api = get_chef_api(settings, self.request.user) # create chef client chef_client = ChefClient(node_id, api) if chef_client.exists: return {'ok': False, 'message': 'This client already exists'} chef_client = ChefClient.create(node_id, api) # Prepare the API for this client chef_url = settings.get('chef.url') chef_version = settings.get('chef.version') chef_ssl_verify = settings.get('chef.ssl.verify') if chef_ssl_verify == 'False' or chef_ssl_verify == 'True': chef_ssl_verify = bool(chef_ssl_verify) api = ChefAPI(chef_url, chef_client.private_key.encode(), node_id, chef_version, ssl_verify = False) # create chef node chef_node = ChefNode(node_id, api) if chef_node.exists: return {'ok': False, 'message': 'This node already exists'} chef_node.save() return {'ok': True, 'message': 'Node and client have been added', 'client_private_key': chef_client.private_key}
def destroy_node(self, node): """ Destroys a node provisioned by razor :param node: Node to destroy :type node: ChefNode """ cnode = Node(node.name, node.environment.local_api) in_use = node['in_use'] if in_use == "provisioning" or in_use == 0: # Return to pool if the node is clean cnode['in_use'] = 0 cnode['archive'] = {} cnode.chef_environment = "_default" cnode.save() else: # Remove active model if the node is dirty active_model = cnode['razor_metadata']['razor_active_model_uuid'] try: if node.feature_in('controller'): # rabbit can cause the node to not actually reboot kill = ("for i in `ps -U rabbitmq | tail -n +2 | " "awk '{print $1}' `; do kill -9 $i; done") node.run_cmd(kill) node.run_cmd("shutdown -r now") self.api.remove_active_model(active_model) Client(node.name).delete() cnode.delete() sleep(15) except: util.logger.error("Node unreachable. " "Manual restart required:{0}".format( str(node)))
def build_dir_server(dir_server): # We dont support 389 yet, so exit if it is not ldap if results.dir_version != 'openldap': print "%s as a directory service is not yet supported...exiting" % results.dir_version sys.exit(1) # Build directory service node dir_node = Node(dir_server) ip = dir_node['ipaddress'] root_pass = razor.get_active_model_pass(dir_node['razor_metadata'].to_dict()['razor_active_model_uuid'])['password'] dir_node['in_use'] = 'directory-server' dir_node.run_list = ["role[qa-%s-%s]" % (results.dir_version, results.os)] dir_node.save() print "Updating server...this may take some time" update_node(dir_node) # if redhat platform, disable iptables if dir_node['platform_family'] == 'rhel': print "Platform is RHEL family, disabling iptables" disable_iptables(dir_node) # Run chef-client twice print "Running chef-client for directory service node...this may take some time..." run1 = run_chef_client(dir_node) if run1['success']: print "First chef-client run successful...starting second run..." run2 = run_chef_client(dir_node) if run2['success']: print "Second chef-client run successful..." else: print "Error running chef-client for directory node %s" % dir_node print run2 sys.exit(1) else: print "Error running chef-client for directory node %s" % dir_node print run1 sys.exit(1) # Directory service is set up, need to import config if run1['success'] and run2['success']: if results.dir_version == 'openldap': scp_run = run_remote_scp_cmd(ip, 'root', root_pass, '/var/lib/jenkins/source_files/ldif/*.ldif') if scp_run['success']: ssh_run = run_remote_ssh_cmd(ip, 'root', root_pass, 'ldapadd -x -D \"cn=admin,dc=dev,dc=rcbops,dc=me\" -f base.ldif -w@privatecloud') elif results.dir_version == '389': # Once we support 389, code here to import needed config files print "389 is not yet supported..." sys.exit(1) else: print "%s is not supported...exiting" % results.dir_version sys.exit(1) if scp_run['success'] and ssh_run['success']: print "Directory Service: %s successfully set up..." % results.dir_version else: print "Failed to set-up Directory Service: %s..." % results.dir_version sys.exit(1)
def build(self): """ Builds the node """ # clear run_list self.run_list = [] node = ChefNode(self.name, self.environment.local_api) node.run_list = [] node.save() super(Chef, self).build()
def clear_pool(chef_nodes, environment): for n in chef_nodes: name = n['name'] node = Node(name) if node.chef_environment == environment: if "recipe[network-interfaces]" not in node.run_list: erase_node(name) else: node.chef_environment = "_default" node.save()
def __setitem__(self, item, value): """ Node can set chef attributes """ lnode = CNode(self.name, api=self.environment.local_api) lnode[item] = value lnode.save() if self.environment.remote_api: rnode = CNode(self.name, api=self.environment.remote_api) rnode[item] = value rnode.save()
def destroy(self): cnode = CNode(self.name) if self['in_use'] == "provisioned": # Return to pool if the node is clean cnode.chef_environment = "_default" cnode.save() else: # Remove active model if the node is dirty active_model = cnode['razor_metadata']['razor_active_model_uuid'] self.razor.remove_active_model(active_model) self.run_cmd("reboot 0") CClient(self.name).delete() cnode.delete() sleep(15)
def _is_node_busy_and_reserve_it(node_id, api, controller_requestor='gcc'): ''' Check if the node is busy, else try to get it and write in control and expiration date in the field USE_NODE. ''' settings = get_current_registry().settings seconds_block_is_busy = int(settings.get('chef.seconds_block_is_busy')) time_to_exp = datetime.timedelta(seconds=seconds_block_is_busy) time_get = time.time() node = ChefNode(node_id, api) time_get = time.time() - time_get current_use_node = node.attributes.get(USE_NODE, {}) current_use_node_control = current_use_node.get('control', None) current_use_node_exp_date = current_use_node.get('exp_date', None) if current_use_node_exp_date: current_use_node_exp_date = json.loads(current_use_node_exp_date, object_hook=json_util.object_hook) current_use_node_exp_date = current_use_node_exp_date.astimezone(pytz.utc).replace(tzinfo=None) now = datetime.datetime.now() if now - current_use_node_exp_date > time_to_exp: current_use_node_control = None if current_use_node_control == controller_requestor: return (node, False) elif current_use_node_control is None: exp_date = datetime.datetime.utcnow() + time_to_exp node.attributes.set_dotted(USE_NODE, {'control': controller_requestor, 'exp_date': json.dumps(exp_date, default=json_util.default)}) node.save() smart_lock_sleep_parameter = settings.get('chef.smart_lock_sleep_factor', 3) seconds_sleep_is_busy = time_get * int(smart_lock_sleep_parameter) time.sleep(seconds_sleep_is_busy) node2 = ChefNode(node.name, api) # second check current_use_node2 = node2.attributes.get(USE_NODE, {}) current_use_control2 = current_use_node2.get('control', None) if current_use_control2 == controller_requestor: return (node2, False) return (node, True)
def build_controller(controller, ha=False, ha_num=0): controller_node = Node(controller) # Check for ha if ha: print "Making %s the ha-controller%s node" % (controller, ha_num) controller_node['in_use'] = "ha-controller%s" % ha_num controller_node.run_list = ["role[qa-ha-controller%s]" % ha_num] else: print "Making %s the controller node" % controller controller_node['in_use'] = "controller" controller_node.run_list = ["role[qa-single-controller]"] # save node controller_node.save() print "Updating server...this may take some time" update_node(controller_node) if controller_node['platform_family'] == 'rhel': print "Platform is RHEL family, disabling iptables" disable_iptables(controller_node) # Run chef-client twice print "Running chef-client for controller node...this may take some time..." run1 = run_chef_client(controller_node) if run1['success']: print "First chef-client run successful...starting second run..." run2 = run_chef_client(controller_node) if run2['success']: print "Second chef-client run successful..." else: print "Error running chef-client for controller %s" % controller print run2 sys.exit(1) else: print "Error running chef-client for controller %s" % controller print run1 sys.exit(1)
def check_user_data(self, user): if user['type'] != 'user': raise ValueError('user must be an user') if ((not 'email' in user or user['email'] == '') and (not 'first_name' in user or user['first_name'] == '') and (not 'last_name' in user or user['last_name'] == '')): # Nothing to do return computers = self.db.nodes.find_one({"_id": ObjectId(user['_id']) })['computers'] for computer_id in computers: computer = self.db.nodes.find_one({"_id": ObjectId(computer_id)}) if "node_chef_id" in computer: # Check Chef node node = ChefNode(computer['node_chef_id'], self.api) logger.info("Computer: %s Chef ID: %s" % (computer['name'], computer['node_chef_id'])) if not node.exists: logger.error("No Chef node with ID %s!" % (computer['node_chef_id'])) else: if not node.normal.has_dotted('gecos_info'): node.normal.set_dotted('gecos_info', {}) if not node.normal.has_dotted('gecos_info.users'): node.normal.set_dotted('gecos_info.users', {}) username = get_username_chef_format(user) if not node.normal.has_dotted('gecos_info.users.%s' % (username)): node.normal.set_dotted( 'gecos_info.users.%s' % (username), {}) updated = False if (not node.normal.has_dotted( 'gecos_info.users.%s.email' % (username)) or node.normal.get_dotted( 'gecos_info.users.%s.email' % (username)) != user['email']): node.normal.set_dotted( 'gecos_info.users.%s.email' % (username), user['email']) updated = True if (not node.normal.has_dotted( 'gecos_info.users.%s.firstName' % (username)) or node.normal.get_dotted( 'gecos_info.users.%s.firstName' % (username)) != user['first_name']): node.normal.set_dotted( 'gecos_info.users.%s.firstName' % (username), user['first_name']) updated = True if (not node.normal.has_dotted( 'gecos_info.users.%s.lastName' % (username)) or node.normal.get_dotted( 'gecos_info.users.%s.lastName' % (username)) != user['last_name']): node.normal.set_dotted( 'gecos_info.users.%s.lastName' % (username), user['last_name']) updated = True if updated: logger.info( "Updating user %s data in computer: %s Chef ID: %s" % (user['name'], computer['name'], computer['node_chef_id'])) node.save() else: logger.error("No Chef ID in '%s' computer!" % (computer['name']))
def command(self): from gecoscc.api.chef_status import USERS_OLD, USERS_OHAI # Initialization self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.db = self.pyramid.db # Check administrator user auth_user = self.db.adminusers.find_one({'username': self.options.chef_username}) if auth_user is None: logger.error('The administrator user must exist in MongoDB') sys.exit(1) # Recorriendo todos los nodos for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s'%(node_id)) try: if node.attributes.get_dotted(USERS_OLD): delete_dotted(node.attributes, USERS_OLD) node.save() except KeyError: logger.warn("Not found attribute: %s"%(USERS_OLD)) except: logger.warn("Problem deleting users_old attribute from node: %s"%(node_id)) # Updating users list computer = self.db.nodes.find_one({'node_chef_id': node_id, 'type':'computer'}) if not computer: logger.error('This node does not exist (mongodb)') continue chef_node_usernames = set([d['username'] for d in node.attributes.get_dotted(USERS_OHAI)]) gcc_node_usernames = set([d['name'] for d in self.db.nodes.find({ 'type':'user', 'computers': {'$in': [computer['_id']]} }, {'_id':0, 'name':1}) ]) users_recalculate_policies = [] users_remove_policies = [] # Users added/removed ? if set.symmetric_difference(chef_node_usernames, gcc_node_usernames): logger.info("Users added/removed found.") # Add users or vinculate user to computer if already exists addusers = set.difference(chef_node_usernames, gcc_node_usernames) for add in addusers: logger.info("Added user: %s"%(add)) user = self.db.nodes.find_one({'name': add, 'type': 'user', 'path': get_filter_in_domain(computer)}) if not user: user_model = User() user = user_model.serialize({'name': add, 'path': computer.get('path', ''), 'type': 'user', 'lock': computer.get('lock', ''), 'source': computer.get('source', '')}) user = update_computers_of_user(self.db, user, self.api) del user['_id'] user_id = self.db.nodes.insert(user) user = self.db.nodes.find_one({'_id': user_id}) users_recalculate_policies.append(user) else: computers = user.get('computers', []) if computer['_id'] not in computers: computers.append(computer['_id']) self.db.nodes.update({'_id': user['_id']}, {'$set': {'computers': computers}}) users_recalculate_policies.append(user) add_computer_to_user(computer['_id'], user['_id']) # Removed users delusers = set.difference(gcc_node_usernames, chef_node_usernames) for delete in delusers: logger.info("Deleted user: %s"%(delete)) user = self.db.nodes.find_one({'name': delete, 'type': 'user', 'path': get_filter_in_domain(computer)}) computers = user['computers'] if user else [] if computer['_id'] in computers: users_remove_policies.append(deepcopy(user)) computers.remove(computer['_id']) self.db.nodes.update({'_id': user['_id']}, {'$set': {'computers': computers}}) for user in users_recalculate_policies: apply_policies_to_user(self.db.nodes, user, auth_user) for user in users_remove_policies: remove_policies_of_computer(user, computer, auth_user)
#!/usr/bin/env python # # requires # - pip install PyChef # - access to knife.rb file # - access to client.pem file # # DON'T RUN THIS - THESE ARE JUST EXAMPLES #------------------------------ # update node info from chef import autoconfigure, Node api = autoconfigure() n = Node('web1') print n['fqdn'] n['myapp']['version'] = '1.0' n.save() #------------------------------ #------------------------------ # get client list from chef import autoconfigure api = autoconfigure() print api.api_request('GET', '/clients') #------------------------------
def command(self): # Initialization self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.auth_user = self.db.adminusers.find_one( {'username': self.options.chef_username}) if self.auth_user is None: logger.error('The administrator user must exist in MongoDB') return self.db = self.pyramid.db ous = [] groups = [] computers = [] # Get packages policy ID packages_policy = self.db.policies.find_one({"slug": "package_res"}) if packages_policy is None: logger.error('Can\'t detect "package_res" policy!') return if 'pkgs_to_remove' in packages_policy["schema"]["properties"]: logger.error( "The 'package_res' policy in the system is deprecated, please update to new package policy!" ) return logger.info('Packages policy ID: %s' % (str(packages_policy['_id']))) # Get all nodes with old package policy data logger.info('Getting all nodes with old package policy data...') path_to_find = "policies.%s.pkgs_to_remove" % (str( packages_policy['_id'])) old_policy_nodes = self.db.nodes.find({ '$query': { path_to_find: { '$exists': True } }, '$orderby': { "path": 1 } }) updated_nodes = [] for node in old_policy_nodes: logger.info('Updating node %s ...' % (str(node['_id']))) updated_nodes.append(str(node['_id'])) logger.debug('Packages to add: %s' % (str(node['policies'][str( packages_policy['_id'])]['package_list']))) logger.debug('Packages to remove: %s' % (str(node['policies'][str( packages_policy['_id'])]['pkgs_to_remove']))) # Join the lists package_list = [] for package_name in node['policies'][str( packages_policy['_id'])]['package_list']: package_list.append({ 'name': package_name, 'version': 'current', 'action': 'add' }) for package_name in node['policies'][str( packages_policy['_id'])]['pkgs_to_remove']: package_list.append({ 'name': package_name, 'version': 'current', 'action': 'remove' }) if 'pkgs_to_remove' in node['policies'][str( packages_policy['_id'])]: del node['policies'][str( packages_policy['_id'])]['pkgs_to_remove'] node['policies'][str( packages_policy['_id'])]['package_list'] = package_list # Update policies self.db.nodes.update({'_id': node['_id']}, {'$set': { 'policies': node['policies'] }}) logger.debug('Joined list: %s' % (str(node['policies'][str( packages_policy['_id'])]['package_list']))) if node['type'] == 'ou': ous.append(node) elif node['type'] == 'group': groups.append(node) elif node['type'] == 'computer': computers.append(node) logger.info('%s nodes were updated!' % (len(updated_nodes))) # We only go through the highest level OUs. # Therefore, we eliminate intermediate OUs and # then do not recalculate the policies # for the same node several times. for ou in ous: parents = [ ObjectId(oid) for oid in ou['path'].split(',') if oid != 'root' ] if any(o['_id'] in parents for o in ous): ous.remove(ou) # Users that are not under an OU or GROUP that have the migrated policy for computer in computers: parents = [ ObjectId(oid) for oid in computer['path'].split(',') if oid != 'root' ] if any(o['_id'] in parents for o in ous): computers.remove(computer) elif any(computer['_id'] in group['members'] for group in groups): computers.remove(computer) # Recalculating policies for OU for ou in ous: old = deepcopy(ou) del old["policies"][str(packages_policy['_id'])] object_changed(self.auth_user, 'ou', ou, old) # Recalculating policies for GROUP for group in groups: old = deepcopy(group) del old["policies"][str(packages_policy['_id'])] object_changed(self.auth_user, 'group', group, old) # Recalculating policies for USER for computer in computers: old = deepcopy(computer) del old["policies"][str(packages_policy['_id'])] object_changed(self.auth_user, 'computer', computer, old) # Recalculate policies for Chef nodes for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s' % (node_id)) if ("gecos_ws_mgmt" in node.attributes) and ( "software_mgmt" in node.attributes["gecos_ws_mgmt"]) and ( "package_res" in node.attributes["gecos_ws_mgmt"]["software_mgmt"]): if "pkgs_to_remove" in node.attributes["gecos_ws_mgmt"][ "software_mgmt"]["package_res"]: logger.debug( "Chef node %s contains a pkgs_to_remove value!" % (node_id)) # Remove pkgs_to_remove from mongodb node logger.info("Remove 'pkgs_to_remove' attribute!") try: del node.attributes["gecos_ws_mgmt"]["software_mgmt"][ "package_res"]["pkgs_to_remove"] node.save() except: logger.warn( "Problem deleting pkgs_to_remove value from node: %s" % (node_id)) if not "package_list" in node.attributes["gecos_ws_mgmt"][ "software_mgmt"]["package_res"]: logger.error( "Chef node %s doesn\'t contains a package_list value!" % (node_id)) continue package_list = node.attributes["gecos_ws_mgmt"][ "software_mgmt"]["package_res"]["package_list"] for element in package_list: if not 'action' in element: logger.debug( 'Chef node: %s doesn\'t have an action value in package_res! (package_list:%s)' % (node_id, str(package_list))) break # Final check bad_nodes = Search('node', "pkgs_to_remove:*", rows=1000, start=0, api=self.api) for node in bad_nodes: logger.warn('Detected bad node: %s' % (node.object.name)) gecos_node = self.db.nodes.find_one( {"node_chef_id": node.object.name}) if gecos_node is None: logger.warn('Can\'t find node in MongoDB for: %s' % (node.object.name)) else: logger.warn( 'For an unknown reason a computer called %s wasn\'t updated!' % (gecos_node['name'])) logger.info('END ;)')
def command(self): # Initialization logger.info("MIGRATION SCRIPT FOR FILES LIST POLICY") logger.info("######################################") # Disabling InsecureRequestWarning Unverified HTTPS request requests.packages.urllib3.disable_warnings() sanitized = False computers = set() self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.auth_user = self.db.adminusers.find_one({'username': self.options.chef_username}) if self.auth_user is None: logger.error('The administrator user must exist in MongoDB') sys.exit(1) self.db = self.pyramid.db # Get local_file (File list) policy logger.info('Getting policy schema (local_file_res) ...') policy = self.db.policies.find_one({'slug':'local_file_res'}) schema = policy['schema'] policyId = policy['_id'] logger.info('schema = %s'%str(schema)) logger.debug('policyId = %s'%str(policyId)) # Searching nodes with the File List policy # Query Fields of an Embedded Document (Mongo documentation) # Example: # db.nodes.find({"policies.58c8122a0dfd425b0894d5b6":{$exists:true}}) logger.info('Searching for nodes with applied policy...') field = 'policies.' + str(policyId) filters = {field:{'$exists':True}} nodes = self.db.nodes.find(filters) # Validating data and, where appropiate, fixing for node in nodes: instance = node['policies'][unicode(policyId)] logger.debug("node = %s" % str(node)) logger.info('-----------------------------------------------') logger.info('Node name = %s, mongo_id = %s'%(node['name'],str(node['_id']))) logger.info('Instance of the policy on the node: %s'%str(instance)) while True: try: validate(instance, schema) break except ValidationError as e: logger.warn('Validation error on instance: instance = %s'%str(instance)) logger.warn('Validation error on instance: message error = %s'%str(e.message)) # Sanitize instance self.sanitize(e, instance) sanitized = True if sanitized: # Setting false sanitized for next iteration sanitized = False logger.info('Sanitized instance of the policy on the node AFTER calling the validate method: %s'%str(instance)) # Update mongo logger.info('Updating instance in database (mongo) ...') self.db.nodes.update({'_id': node['_id']},{'$set':{field:instance}}) logger.info('Recalculating policies in the node.') # Affected nodes if node['type'] == 'ou': result = list(self.db.nodes.find({'path': get_filter_nodes_belonging_ou(node['_id']),'type': 'computer'},{'_id':1})) logger.info('OU computers = %s'%str(result)) elif node['type'] == 'group': result = list(self.db.nodes.find({'_id':{'$in':node['members']},'type':'computer'},{'_id':1})) logger.info('GROUP computers = %s'%str(result)) elif node['type'] == 'computer': result = [node] logger.info('COMPUTER computers = %s'%str(result)) [computers.add(str(n['_id'])) for n in result] for computer in computers: logger.info('Applying policies to COMPUTER. For more information, see "gecosccui-celery.log" file') computer = self.db.nodes.find_one({'_id': ObjectId(computer)}) apply_policies_to_computer(self.db.nodes, computer, self.auth_user, api=self.api, initialize=False, use_celery=True) # Removing unused attributes (copy_files, delete_files) in chef nodes logger.info('\n') attrs = ["%s.copy_files" % (policy['path']), "%s.delete_files" % (policy['path'])] logger.info('Removing unused attributes %s in chef nodes ...' % attrs) logger.info('\n') for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s'%(node_id)) for attr in attrs: try: if node.attributes.has_dotted(attr): logger.warn("Remove %s attribute!" % attr) delete_dotted(node.attributes, attr) node.save() except: logger.warn("Problem deleting attribute %s value from node: %s"%(attr, node_id)) logger.warn("You may be trying to delete a default attribute instead normal attribute: %s"%(node_id)) logger.info('Finished.')
def command(self): # Initialization logger.info("MIGRATION SCRIPT FOR FILES LIST POLICY") logger.info("######################################") # Disabling InsecureRequestWarning Unverified HTTPS request requests.packages.urllib3.disable_warnings() sanitized = False computers = set() self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.auth_user = self.db.adminusers.find_one( {'username': self.options.chef_username}) if self.auth_user is None: logger.error('The administrator user must exist in MongoDB') sys.exit(1) self.db = self.pyramid.db # Get local_file (File list) policy logger.info('Getting policy schema (local_file_res) ...') policy = self.db.policies.find_one({'slug': 'local_file_res'}) schema = policy['schema'] policyId = policy['_id'] logger.info('schema = %s' % str(schema)) logger.debug('policyId = %s' % str(policyId)) # Searching nodes with the File List policy # Query Fields of an Embedded Document (Mongo documentation) # Example: # db.nodes.find({"policies.58c8122a0dfd425b0894d5b6":{$exists:true}}) logger.info('Searching for nodes with applied policy...') field = 'policies.' + str(policyId) filters = {field: {'$exists': True}} nodes = self.db.nodes.find(filters) # Validating data and, where appropiate, fixing for node in nodes: instance = node['policies'][unicode(policyId)] logger.debug("node = %s" % str(node)) logger.info('-----------------------------------------------') logger.info('Node name = %s, mongo_id = %s' % (node['name'], str(node['_id']))) logger.info('Instance of the policy on the node: %s' % str(instance)) while True: try: validate(instance, schema) break except ValidationError as e: logger.warn('Validation error on instance: instance = %s' % str(instance)) logger.warn( 'Validation error on instance: message error = %s' % str(e.message)) # Sanitize instance self.sanitize(e, instance) sanitized = True if sanitized: # Setting false sanitized for next iteration sanitized = False logger.info( 'Sanitized instance of the policy on the node AFTER calling the validate method: %s' % str(instance)) # Update mongo logger.info('Updating instance in database (mongo) ...') self.db.nodes.update({'_id': node['_id']}, {'$set': { field: instance }}) logger.info('Recalculating policies in the node.') # Affected nodes if node['type'] == 'ou': result = list( self.db.nodes.find( { 'path': get_filter_nodes_belonging_ou( node['_id']), 'type': 'computer' }, {'_id': 1})) logger.info('OU computers = %s' % str(result)) elif node['type'] == 'group': result = list( self.db.nodes.find( { '_id': { '$in': node['members'] }, 'type': 'computer' }, {'_id': 1})) logger.info('GROUP computers = %s' % str(result)) elif node['type'] == 'computer': result = [node] logger.info('COMPUTER computers = %s' % str(result)) [computers.add(str(n['_id'])) for n in result] for computer in computers: logger.info( 'Applying policies to COMPUTER. For more information, see "gecosccui-celery.log" file' ) computer = self.db.nodes.find_one({'_id': ObjectId(computer)}) apply_policies_to_computer(self.db.nodes, computer, self.auth_user, api=self.api, initialize=False, use_celery=True) # Removing unused attributes (copy_files, delete_files) in chef nodes logger.info('\n') attrs = [ "%s.copy_files" % (policy['path']), "%s.delete_files" % (policy['path']) ] logger.info('Removing unused attributes %s in chef nodes ...' % attrs) logger.info('\n') for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s' % (node_id)) for attr in attrs: try: if node.attributes.has_dotted(attr): logger.warn("Remove %s attribute!" % attr) delete_dotted(node.attributes, attr) node.save() except: logger.warn( "Problem deleting attribute %s value from node: %s" % (attr, node_id)) logger.warn( "You may be trying to delete a default attribute instead normal attribute: %s" % (node_id)) logger.info('Finished.')
def command(self): db = self.pyramid.db ou = db.nodes.find_one({'_id': ObjectId(self.options.ou_id)}) if not ou: print 'Error OU does not exists' return comp = db.nodes.find_one({'_id': ObjectId(self.options.comp_id)}) if not comp: print 'Error computer does not exists' return node_id = comp.get('node_chef_id', None) if not comp: print 'Error this computer has not node_chef_id' return policies = comp.get('policies', None) if policies != {}: print 'Error this computer should not have any policies' return admin = db.adminusers.find_one({'username': self.options.gcc_username}) if not admin: print 'Error this admin does not exists' return elif not admin.get('is_superuser', None): print 'You need a super admin' return number_nodes = int(self.options.number) api = get_chef_api(self.settings, admin) node = ChefNode(node_id, api) for i in range(number_nodes): new_node_name = '%s-%s' % (self.options.prefix, i) new_node = ChefNode(new_node_name, api) for attr in node.to_dict().keys(): if hasattr(node, attr) and attr != 'name': if attr == 'automatic': automatic_dict = node.automatic.to_dict() automatic_dict['ohai_gecos']['pclabel'] = new_node_name user1 = 'user.name-%s-1' % new_node_name user2 = 'user.name-%s-2' % new_node_name automatic_dict['ohai_gecos']['users'] = [{'username': user1, 'home': '/home/%s' % user1, 'gid': 1000, 'sudo': False, 'uid': 1000}, {'username': user2, 'home': '/home/%s' % user2, 'gid': 1000, 'sudo': False, 'uid': 1001}] automatic = NodeAttributes(automatic_dict) setattr(new_node, attr, automatic) elif attr == 'normal': node.normal.set_dotted('ohai_gecos', {}) else: setattr(new_node, attr, getattr(node, attr)) new_node.save() print 'Created %s at chef' % new_node_name res = requests.post('%s/register/computer/' % self.options.gcc_url, {'ou_id': self.options.ou_id, 'node_id': new_node_name}, auth=(self.options.gcc_username, self.options.gcc_password)) if res.ok and res.json()['ok']: print 'Created %s at gcc' % new_node_name elif res.ok and not res.json()['ok']: print 'Error %s at gcc' % new_node_name print '\t %s' % res.json()['message'] else: print 'Unknow error %s at gcc' % new_node_name res = requests.put('%s/chef/status/' % self.options.gcc_url, {'node_id': new_node_name, 'gcc_username': self.options.gcc_username}) if res.ok and res.json()['ok']: print 'Chef client %s' % new_node_name elif res.ok and not res.json()['ok']: print 'Error %s at chef client' % new_node_name print '\t %s' % res.json()['message'] else: print 'Unknow error %s at chef client' % new_node_name waiting_to_celery(db)
def command(self): from gecoscc.api.chef_status import USERS_OLD, USERS_OHAI # Initialization self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.db = self.pyramid.db # Check administrator user auth_user = self.db.adminusers.find_one( {'username': self.options.chef_username}) if auth_user is None: logger.error('The administrator user must exist in MongoDB') sys.exit(1) # Recorriendo todos los nodos for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s' % (node_id)) try: if node.attributes.get_dotted(USERS_OLD): delete_dotted(node.attributes, USERS_OLD) node.save() except KeyError: logger.warn("Not found attribute: %s" % (USERS_OLD)) except: logger.warn( "Problem deleting users_old attribute from node: %s" % (node_id)) # Updating users list computer = self.db.nodes.find_one({ 'node_chef_id': node_id, 'type': 'computer' }) if not computer: logger.error('This node does not exist (mongodb)') continue chef_node_usernames = set([ d['username'] for d in node.attributes.get_dotted(USERS_OHAI) ]) gcc_node_usernames = set([ d['name'] for d in self.db.nodes.find( { 'type': 'user', 'computers': { '$in': [computer['_id']] } }, { '_id': 0, 'name': 1 }) ]) users_recalculate_policies = [] users_remove_policies = [] # Users added/removed ? if set.symmetric_difference(chef_node_usernames, gcc_node_usernames): logger.info("Users added/removed found.") # Add users or vinculate user to computer if already exists addusers = set.difference(chef_node_usernames, gcc_node_usernames) for add in addusers: logger.info("Added user: %s" % (add)) user = self.db.nodes.find_one({ 'name': add, 'type': 'user', 'path': get_filter_in_domain(computer) }) if not user: user_model = User() user = user_model.serialize({ 'name': add, 'path': computer.get('path', ''), 'type': 'user', 'lock': computer.get('lock', ''), 'source': computer.get('source', '') }) user = update_computers_of_user( self.db, user, self.api) del user['_id'] user_id = self.db.nodes.insert(user) user = self.db.nodes.find_one({'_id': user_id}) users_recalculate_policies.append(user) else: computers = user.get('computers', []) if computer['_id'] not in computers: computers.append(computer['_id']) self.db.nodes.update( {'_id': user['_id']}, {'$set': { 'computers': computers }}) users_recalculate_policies.append(user) add_computer_to_user(computer['_id'], user['_id']) # Removed users delusers = set.difference(gcc_node_usernames, chef_node_usernames) for delete in delusers: logger.info("Deleted user: %s" % (delete)) user = self.db.nodes.find_one({ 'name': delete, 'type': 'user', 'path': get_filter_in_domain(computer) }) computers = user['computers'] if user else [] if computer['_id'] in computers: users_remove_policies.append(deepcopy(user)) computers.remove(computer['_id']) self.db.nodes.update( {'_id': user['_id']}, {'$set': { 'computers': computers }}) for user in users_recalculate_policies: apply_policies_to_user(self.db.nodes, user, auth_user) for user in users_remove_policies: remove_policies_of_computer(user, computer, auth_user)
def command(self): # Initialization self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.db = self.pyramid.db self.referenced_data_type = {} self.referenced_data_type['storage_can_view'] = 'storage' self.referenced_data_type['repository_can_view'] = 'repository' self.referenced_data_type['printer_can_view'] = 'printer' # Get gecos_ws_mgmt cookbook default data structure default_data_dotted_keys = {} default_data = self.get_default_data(default_data_dotted_keys) if default_data is None: logger.error("Can't find default data!") return # Get all the policies structures logger.info('Getting all the policies structures from database...') dbpolicies = self.db.policies.find() self.policiesdata = {} self.slug_check = {} for policy in dbpolicies: logger.debug('Adding to dictionary: %s => %s' % (policy['_id'], json.dumps(policy['schema']))) self.policiesdata[str(policy['_id'])] = policy # Check policy slug field (must be unique) if policy['slug'] in self.slug_check: logger.error("There are more than one policy with '%s' slug!" % (policy['slug'])) else: slug = policy['slug'] # The slug of the emitter policies is different from the others if slug == 'printer_can_view': slug = 'printers_res' elif slug == 'storage_can_view': slug = 'user_shared_folders_res' elif slug == 'repository_can_view': slug = 'software_sources_res' self.slug_check[slug] = policy # Check policy serialization try: logger.debug('Serialized policy: %s' % (json.dumps(Policy().serialize(policy)))) except Exception as err: logger.error( 'Policy %s with slug %s can\'t be serialized: %s' % (policy['_id'], policy['slug'], str(err))) logger.warn( 'Possible cause: New fields in models (Colander) but the import_policies command has not yet been executed to update schema.' ) if self.options.clean_inheritance: logger.info('Cleaning inheritance field...') self.db.nodes.update({"inheritance": { '$exists': True }}, {'$unset': { "inheritance": { '$exist': True } }}, multi=True) if self.options.clean_variables: logger.info('Cleaning variables data from Chef nodes') for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) if node.normal.has_dotted('gecos_info'): del node.normal['gecos_info'] node.save() logger.info('Checking tree...') # Look for the root of the nodes tree root_nodes = self.db.nodes.find({"path": "root"}) for root in root_nodes: self.check_node_and_subnodes(root) logger.info( 'Checking nodes that are outside the tree (missing OUs in the PATH)...' ) # Check node path nodes = self.db.nodes.find({}) for node in nodes: if not 'path' in node: logger.error('Node with ID: %s has no "path" attribute!' % (str(node['_id']))) continue if not 'name' in node: logger.error('Node with ID: %s has no "name" attribute!' % (str(node['_id']))) continue if not 'type' in node: logger.error('Node with ID: %s has no "type" attribute!' % (str(node['_id']))) continue for ou_id in node['path'].split(','): if ou_id == 'root': continue ou = self.db.nodes.find_one({"_id": ObjectId(ou_id)}) if not ou: logger.error( 'Can\'t find OU %s that belongs to node path (node ID: %s NAME: %s)' % (str(ou_id), str(node['_id']), node['name'])) continue logger.info('Checking chef node references...') # Check the references to Chef nodes computers = self.db.nodes.find({"type": "computer"}) for computer in computers: if "node_chef_id" in computer: # Check Chef node computer_node = ChefNode(computer['node_chef_id'], self.api) logger.info("Computer: %s Chef ID: %s" % (computer['name'], computer['node_chef_id'])) if not computer_node.exists: logger.error("No Chef node with ID %s!" % (computer['node_chef_id'])) else: logger.error("No Chef ID in '%s' computer!" % (computer['name'])) logger.info( 'Checking MongoDB computer references and deprecated policies...') # Check the references to computer nodes for node_id in ChefNode.list(): found = False computers = self.db.nodes.find({"node_chef_id": node_id}) node_path = None for computer in computers: found = True node_path = computer['path'] computer_node = ChefNode(node_id, self.api) if not found: pclabel = "(No OHAI-GECOS data in the node)" try: pclabel = "(pclabel = %s)" % ( computer_node.attributes.get_dotted( 'ohai_gecos.pclabel')) except KeyError: pass logger.error("No computer node for Chef ID: '%s' %s!" % (node_id, pclabel)) logger.warn( "Possible cause: The node has been deleted in Gecos Control Center but not in Chef server, either because it was in use at that time or for another unknown reason." ) # Check default data for chef node if not computer_node.default.to_dict( ) or not computer_node.attributes.has_dotted('gecos_ws_mgmt'): logger.info( "FIXED: For an unknown reason Chef node: %s has no default attributes." % (node_id)) computer_node.default = default_data computer_node.save() # Check "updated_by" field attributes = computer_node.normal.to_dict() updated, updated_attributes = self.check_updated_by_field( node_id, None, attributes) if updated: computer_node.normal = updated_attributes computer_node.save() updated, updated_attributes = self.check_chef_node_policies( node_id, None, attributes) if updated: computer_node.normal = updated_attributes computer_node.save() if node_path is not None: # Check "gecos_path_ids" field if not computer_node.attributes.has_dotted( 'gecos_path_ids' ) or computer_node.attributes.get_dotted( 'gecos_path_ids') != node_path: logger.info( "FIXED: gecos_path_ids attribute in node: %s." % (node_id)) computer_node.attributes.set_dotted( 'gecos_path_ids', node_path) computer_node.save() # Check "gecos_path_names" field node_path_names = 'root' for elm in node_path.split(','): if elm == 'root': continue ou = self.db.nodes.find_one({'_id': ObjectId(elm)}) node_path_names += ',' + ou['name'] if not computer_node.attributes.has_dotted( 'gecos_path_names' ) or computer_node.attributes.get_dotted( 'gecos_path_names') != node_path_names: logger.info( "FIXED: gecos_path_names attribute in node: %s." % (node_id)) computer_node.attributes.set_dotted( 'gecos_path_names', node_path_names) computer_node.save() logger.info('END ;)')
def command(self): # Initialization sanitized = False computers = set() self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.auth_user = self.db.adminusers.find_one( {'username': self.options.chef_username}) if self.auth_user is None: logger.error('The administrator user must exist in MongoDB') sys.exit(1) self.db = self.pyramid.db # Get local_admin_users_res (Local Administrators) policy logger.info( 'Getting Local Administrators (local_admin_users_res) policy ...') policy = self.db.policies.find_one({'slug': 'local_admin_users_res'}) schema = policy['schema'] policyId = policy['_id'] logger.info('schema = %s' % str(schema)) logger.info('Id.policy = %s' % str(policyId)) # Searching nodes with the Local Administrators policy # Query Fields of an Embedded Document (Mongo documentation) # Example: # db.nodes.find({"policies.58c8122a0dfd425b0894d5b6":{$exists:true}}) logger.info('Searching nodes with the Local Administrators policy...') field = 'policies.' + str(policyId) filters = {field: {'$exists': True}} nodes = self.db.nodes.find(filters) # Validating data and, where appropiate, fixing for node in nodes: instance = node['policies'][unicode(policyId)] logger.info('Node name = %s, _id = %s' % (node['name'], str(node['_id']))) logger.info('Instance before validate method: %s' % str(instance)) while True: try: validate(instance, schema) break except ValidationError as e: logger.warning('Validation error on instance = %s' % str(e.message)) # Sanitize instance self.sanitize(e, instance) sanitized = True if sanitized: # Setting false sanitized for next iteration sanitized = False logger.info('Sanitized instance: %s' % str(instance)) # Update mongo self.db.nodes.update({'_id': node['_id']}, {'$set': { field: instance }}) # Affected nodes if node['type'] == 'ou': result = list( self.db.nodes.find( { 'path': get_filter_nodes_belonging_ou( node['_id']), 'type': 'computer' }, {'_id': 1})) logger.info('OU computers = %s' % str(result)) elif node['type'] == 'group': result = list( self.db.nodes.find( { '_id': { '$in': node['members'] }, 'type': 'computer' }, {'_id': 1})) logger.info('GROUP computers = %s' % str(result)) elif node['type'] == 'computer': result = [node] logger.info('COMPUTER computers = %s' % str(result)) [computers.add(str(n['_id'])) for n in result] # Removing unused local_admin_remove_list attribute in chef nodes for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s' % (node_id)) attr_dotted = policy['path'] + '.local_admin_remove_list' logger.info('Atttribute dotted path: %s' % (attr_dotted)) if node.attributes.has_dotted(attr_dotted): logger.info("Remove 'local_admin_remove_list' attribute!") try: logger.info( "node.attributes = %s" % str(node.attributes['gecos_ws_mgmt']['misc_mgmt'] ['local_admin_users_res'].to_dict())) delete_dotted(node.attributes, attr_dotted) node.save() except: logger.warn( "Problem deleting local_admin_remove_list value from node: %s" % (node_id)) logger.warn( "You may be trying to delete a default attribute instead normal attribute: %s" % (node_id)) for computer in computers: logger.info('computer = %s' % str(computer)) computer = self.db.nodes.find_one({'_id': ObjectId(computer)}) apply_policies_to_computer(self.db.nodes, computer, self.auth_user, api=self.api, initialize=False, use_celery=False) logger.info('Finished.')
def command(self): # Initialization self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.auth_user = self.db.adminusers.find_one({'username': self.options.chef_username}) if self.auth_user is None: logger.error('The administrator user must exist in MongoDB') return self.db = self.pyramid.db ous = [] groups = [] computers = [] # Get packages policy ID packages_policy = self.db.policies.find_one({"slug": "package_res"}) if packages_policy is None: logger.error('Can\'t detect "package_res" policy!') return if 'pkgs_to_remove' in packages_policy["schema"]["properties"]: logger.error("The 'package_res' policy in the system is deprecated, please update to new package policy!") return logger.info('Packages policy ID: %s'%(str(packages_policy['_id']))) # Get all nodes with old package policy data logger.info('Getting all nodes with old package policy data...') path_to_find = "policies.%s.pkgs_to_remove"%(str(packages_policy['_id'])) old_policy_nodes = self.db.nodes.find({ '$query': {path_to_find: { '$exists': True }}, '$orderby': { "path" : 1 }}) updated_nodes = [] for node in old_policy_nodes: logger.info('Updating node %s ...'%(str(node['_id']))) updated_nodes.append(str(node['_id'])) logger.debug('Packages to add: %s'%(str(node['policies'][str(packages_policy['_id'])]['package_list']))) logger.debug('Packages to remove: %s'%(str(node['policies'][str(packages_policy['_id'])]['pkgs_to_remove']))) # Join the lists package_list = [] for package_name in node['policies'][str(packages_policy['_id'])]['package_list']: package_list.append({'name': package_name, 'version': 'current', 'action': 'add'}) for package_name in node['policies'][str(packages_policy['_id'])]['pkgs_to_remove']: package_list.append({'name': package_name, 'version': 'current', 'action': 'remove'}) if 'pkgs_to_remove' in node['policies'][str(packages_policy['_id'])]: del node['policies'][str(packages_policy['_id'])]['pkgs_to_remove'] node['policies'][str(packages_policy['_id'])]['package_list'] = package_list # Update policies self.db.nodes.update({'_id': node['_id']}, {'$set': {'policies': node['policies']}}) logger.debug('Joined list: %s'%(str(node['policies'][str(packages_policy['_id'])]['package_list']))) if node['type'] == 'ou': ous.append(node) elif node['type'] == 'group': groups.append(node) elif node['type'] == 'computer': computers.append(node) logger.info('%s nodes were updated!'%(len(updated_nodes))) # We only go through the highest level OUs. # Therefore, we eliminate intermediate OUs and # then do not recalculate the policies # for the same node several times. for ou in ous: parents = [ObjectId(oid) for oid in ou['path'].split(',') if oid != 'root'] if any(o['_id'] in parents for o in ous): ous.remove(ou) # Users that are not under an OU or GROUP that have the migrated policy for computer in computers: parents = [ObjectId(oid) for oid in computer['path'].split(',') if oid != 'root'] if any(o['_id'] in parents for o in ous): computers.remove(computer) elif any(computer['_id'] in group['members'] for group in groups): computers.remove(computer) # Recalculating policies for OU for ou in ous: old = deepcopy(ou) del old["policies"][str(packages_policy['_id'])] object_changed(self.auth_user, 'ou', ou, old) # Recalculating policies for GROUP for group in groups: old = deepcopy(group) del old["policies"][str(packages_policy['_id'])] object_changed(self.auth_user, 'group', group, old) # Recalculating policies for USER for computer in computers: old = deepcopy(computer) del old["policies"][str(packages_policy['_id'])] object_changed(self.auth_user, 'computer', computer, old) # Recalculate policies for Chef nodes for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s'%(node_id)) if ("gecos_ws_mgmt" in node.attributes) and ("software_mgmt" in node.attributes["gecos_ws_mgmt"]) and ("package_res" in node.attributes["gecos_ws_mgmt"]["software_mgmt"]): if "pkgs_to_remove" in node.attributes["gecos_ws_mgmt"]["software_mgmt"]["package_res"]: logger.debug("Chef node %s contains a pkgs_to_remove value!"%(node_id)) # Remove pkgs_to_remove from mongodb node logger.info("Remove 'pkgs_to_remove' attribute!") try: del node.attributes["gecos_ws_mgmt"]["software_mgmt"]["package_res"]["pkgs_to_remove"] node.save() except: logger.warn("Problem deleting pkgs_to_remove value from node: %s"%(node_id)) if not "package_list" in node.attributes["gecos_ws_mgmt"]["software_mgmt"]["package_res"]: logger.error("Chef node %s doesn\'t contains a package_list value!"%(node_id)) continue package_list = node.attributes["gecos_ws_mgmt"]["software_mgmt"]["package_res"]["package_list"] for element in package_list: if not 'action' in element: logger.debug('Chef node: %s doesn\'t have an action value in package_res! (package_list:%s)'%(node_id, str(package_list))) break # Final check bad_nodes = Search('node', "pkgs_to_remove:*", rows=1000, start=0, api=self.api) for node in bad_nodes: logger.warn('Detected bad node: %s'%(node.object.name)) gecos_node = self.db.nodes.find_one({"node_chef_id": node.object.name}) if gecos_node is None: logger.warn('Can\'t find node in MongoDB for: %s'%(node.object.name)) else: logger.warn('For an unknown reason a computer called %s wasn\'t updated!'%(gecos_node['name'])) logger.info('END ;)')
def save_node_and_free(node, api=None, refresh=False): if refresh and api: node = ChefNode(node.name, api) node.attributes.set_dotted(USE_NODE, {}) node.save()
def command(self): # Initialization logger.info("MIGRATION SCRIPT FOR USER_APPS_AUTOSTART POLICY") logger.info("###############################################") # Disabling InsecureRequestWarning Unverified HTTPS request requests.packages.urllib3.disable_warnings() sanitized = False ous = [] groups = [] users = [] self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.auth_user = self.db.adminusers.find_one({'username': self.options.chef_username}) if self.auth_user is None: logger.error('The administrator user must exist in MongoDB') sys.exit(1) self.db = self.pyramid.db # Get local_users (Users) policy logger.info('Getting policy schema (user_apps_autostart_res) ...') policy = self.db.policies.find_one({'slug':'user_apps_autostart_res'}) schema = policy['schema'] policyId = policy['_id'] logger.info('schema = %s'%str(schema)) logger.debug('policyId = %s'%str(policyId)) # Searching nodes with the Local Administrators policy # Query Fields of an Embedded Document (Mongo documentation) # Example: # db.nodes.find({"policies.58c8122a0dfd425b0894d5b6":{$exists:true}}) logger.info('Searching for nodes with applied policy...') field = 'policies.' + str(policyId) filters = {field:{'$exists':True}} nodes = self.db.nodes.find(filters) # Validating data and, where appropiate, fixing for node in nodes: instance = node['policies'][unicode(policyId)] logger.info("node = %s" % str(node)) logger.info('-----------------------------------------------') logger.info('Node name = %s, mongo_id = %s'%(node['name'],str(node['_id']))) logger.info('Instance of the policy on the node: %s'%str(instance)) while True: try: validate(instance, schema) break except ValidationError as e: logger.warn('Validation error on instance = %s'%str(e.message)) # Sanitize instance self.sanitize(e, instance) sanitized = True if sanitized: # Setting false sanitized for next iteration sanitized = False logger.info('Sanitized instance of the policy on the node AFTER calling the validate method: %s'%str(instance)) # Update mongo logger.info('Updating instance in database (mongo) ...') self.db.nodes.update({'_id': node['_id']},{'$set':{field:instance}}) logger.info('Recalculating policies in the node.') # Affected nodes if node['type'] == 'ou': ous.append(node) elif node['type'] == 'group': groups.append(node) elif node['type'] == 'user': users.append(node) # We only go through the highest level OUs. # Therefore, we eliminate intermediate OUs and # then do not recalculate the policies # for the same node several times. for ou in ous: parents = [ObjectId(oid) for oid in ou['path'].split(',') if oid != 'root'] if any(o['_id'] in parents for o in ous): ous.remove(ou) # Users that are not under an OU or GROUP that have the migrated policy for user in users: parents = [ObjectId(oid) for oid in user['path'].split(',') if oid != 'root'] if any(o['_id'] in parents for o in ous): users.remove(user) elif any(user['_id'] in group['members'] for group in groups): users.remove(user) # Recalculating policies for OU for ou in ous: old = deepcopy(ou) del old["policies"][str(policyId)] object_changed(self.auth_user, 'ou', ou, old) # Recalculating policies for GROUP for group in groups: old = deepcopy(group) del old["policies"][str(policyId)] object_changed(self.auth_user, 'group', group, old) # Recalculating policies for USER for user in users: old = deepcopy(user) del old["policies"][str(policyId)] object_changed(self.auth_user, 'user', user, old) # Removing unused desktops_to_remove attribute in chef nodes logger.info('\n') logger.info('Removing unused desktops_to_remove attribute in chef nodes ...') for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s'%(node_id)) field_chef = '%s.users' % policy['path'] users = node.attributes.get_dotted(field_chef) if node.attributes.has_dotted(field_chef) else [] for user in users: logger.debug("user = %s" % (user)) attr_delete_path = '%s.%s.desktops_to_remove' % (field_chef, user) logger.debug('Atttribute dotted path: %s'%(attr_delete_path)) if node.attributes.has_dotted(attr_delete_path): logger.warn("Remove 'desktops_to_remove' attribute! for user %s" % (user)) try: delete_dotted(node.attributes, attr_delete_path) node.save() except: logger.warn("Problem deleting desktops_to_remove value from node: %s"%(node_id)) logger.warn("You may be trying to delete a default attribute instead normal attribute: %s"%(node_id)) logger.info('Finished.')
def command(self): # Initialization logger.info("MIGRATION SCRIPT FOR USER_APPS_AUTOSTART POLICY") logger.info("###############################################") # Disabling InsecureRequestWarning Unverified HTTPS request requests.packages.urllib3.disable_warnings() sanitized = False ous = [] groups = [] users = [] self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.auth_user = self.db.adminusers.find_one( {'username': self.options.chef_username}) if self.auth_user is None: logger.error('The administrator user must exist in MongoDB') sys.exit(1) self.db = self.pyramid.db # Get local_users (Users) policy logger.info('Getting policy schema (user_apps_autostart_res) ...') policy = self.db.policies.find_one({'slug': 'user_apps_autostart_res'}) schema = policy['schema'] policyId = policy['_id'] logger.info('schema = %s' % str(schema)) logger.debug('policyId = %s' % str(policyId)) # Searching nodes with the Local Administrators policy # Query Fields of an Embedded Document (Mongo documentation) # Example: # db.nodes.find({"policies.58c8122a0dfd425b0894d5b6":{$exists:true}}) logger.info('Searching for nodes with applied policy...') field = 'policies.' + str(policyId) filters = {field: {'$exists': True}} nodes = self.db.nodes.find(filters) # Validating data and, where appropiate, fixing for node in nodes: instance = node['policies'][unicode(policyId)] logger.info("node = %s" % str(node)) logger.info('-----------------------------------------------') logger.info('Node name = %s, mongo_id = %s' % (node['name'], str(node['_id']))) logger.info('Instance of the policy on the node: %s' % str(instance)) while True: try: validate(instance, schema) break except ValidationError as e: logger.warn('Validation error on instance = %s' % str(e.message)) # Sanitize instance self.sanitize(e, instance) sanitized = True if sanitized: # Setting false sanitized for next iteration sanitized = False logger.info( 'Sanitized instance of the policy on the node AFTER calling the validate method: %s' % str(instance)) # Update mongo logger.info('Updating instance in database (mongo) ...') self.db.nodes.update({'_id': node['_id']}, {'$set': { field: instance }}) logger.info('Recalculating policies in the node.') # Affected nodes if node['type'] == 'ou': ous.append(node) elif node['type'] == 'group': groups.append(node) elif node['type'] == 'user': users.append(node) # We only go through the highest level OUs. # Therefore, we eliminate intermediate OUs and # then do not recalculate the policies # for the same node several times. for ou in ous: parents = [ ObjectId(oid) for oid in ou['path'].split(',') if oid != 'root' ] if any(o['_id'] in parents for o in ous): ous.remove(ou) # Users that are not under an OU or GROUP that have the migrated policy for user in users: parents = [ ObjectId(oid) for oid in user['path'].split(',') if oid != 'root' ] if any(o['_id'] in parents for o in ous): users.remove(user) elif any(user['_id'] in group['members'] for group in groups): users.remove(user) # Recalculating policies for OU for ou in ous: old = deepcopy(ou) del old["policies"][str(policyId)] object_changed(self.auth_user, 'ou', ou, old) # Recalculating policies for GROUP for group in groups: old = deepcopy(group) del old["policies"][str(policyId)] object_changed(self.auth_user, 'group', group, old) # Recalculating policies for USER for user in users: old = deepcopy(user) del old["policies"][str(policyId)] object_changed(self.auth_user, 'user', user, old) # Removing unused desktops_to_remove attribute in chef nodes logger.info('\n') logger.info( 'Removing unused desktops_to_remove attribute in chef nodes ...') for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s' % (node_id)) field_chef = '%s.users' % policy['path'] users = node.attributes.get_dotted( field_chef) if node.attributes.has_dotted(field_chef) else [] for user in users: logger.debug("user = %s" % (user)) attr_delete_path = '%s.%s.desktops_to_remove' % (field_chef, user) logger.debug('Atttribute dotted path: %s' % (attr_delete_path)) if node.attributes.has_dotted(attr_delete_path): logger.warn( "Remove 'desktops_to_remove' attribute! for user %s" % (user)) try: delete_dotted(node.attributes, attr_delete_path) node.save() except: logger.warn( "Problem deleting desktops_to_remove value from node: %s" % (node_id)) logger.warn( "You may be trying to delete a default attribute instead normal attribute: %s" % (node_id)) logger.info('Finished.')
def command(self): # Initialization self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.db = self.pyramid.db self.referenced_data_type = {} self.referenced_data_type['storage_can_view'] = 'storage' self.referenced_data_type['repository_can_view'] = 'repository' self.referenced_data_type['printer_can_view'] = 'printer' # Get gecos_ws_mgmt cookbook default data structure default_data_dotted_keys = {} default_data = self.get_default_data(default_data_dotted_keys) if default_data is None: logger.error("Can't find default data!") return # Get all the policies structures logger.info('Getting all the policies structures from database...') dbpolicies = self.db.policies.find() self.policiesdata = {} self.slug_check = {} for policy in dbpolicies: logger.debug('Addig to dictionary: %s => %s'%(policy['_id'], json.dumps(policy['schema']))) self.policiesdata[str(policy['_id'])] = policy # Check policy slug field (must be unique) if policy['slug'] in self.slug_check: logger.error("There are more than one policy with '%s' slug!"%(policy['slug'])) else: self.slug_check[policy['slug']] = policy # Check policy serialization try: logger.debug('Serialized policy: %s'%(json.dumps(Policy().serialize(policy)))) except Exception as err: logger.error('Policy %s with slug %s can\'t be serialized: %s'%(policy['_id'], policy['slug'], str(err))) logger.warn('Possible cause: New fields in models (Colander) but the import_policies command has not yet been executed to update schema.') if self.options.clean_inheritance: logger.info('Cleaning inheritance field...') self.db.nodes.update({"inheritance": { '$exists': True }}, { '$unset': { "inheritance": {'$exist': True } }}, multi=True) logger.info('Checking tree...') # Look for the root of the nodes tree root_nodes = self.db.nodes.find({"path" : "root"}) for root in root_nodes: self.check_node_and_subnodes(root) logger.info('Checking nodes that are outside the tree (missing OUs in the PATH)...') # Check node path nodes = self.db.nodes.find({}) for node in nodes: if not 'path' in node: logger.error('Node with ID: %s has no "path" attribute!'%(str(node['_id']))) continue if not 'name' in node: logger.error('Node with ID: %s has no "name" attribute!'%(str(node['_id']))) continue if not 'type' in node: logger.error('Node with ID: %s has no "type" attribute!'%(str(node['_id']))) continue for ou_id in node['path'].split(','): if ou_id == 'root': continue ou = self.db.nodes.find_one({ "_id" : ObjectId(ou_id) }) if not ou: logger.error('Can\'t find OU %s that belongs to node path (node ID: %s NAME: %s)'%(str(ou_id), str(node['_id']), node['name'])) continue logger.info('Checking chef node references...') # Check the references to Chef nodes computers = self.db.nodes.find({"type" : "computer"}) for computer in computers: if "node_chef_id" in computer: # Check Chef node computer_node = ChefNode(computer['node_chef_id'], self.api) logger.info("Computer: %s Chef ID: %s"%(computer['name'], computer['node_chef_id'])) if not computer_node.exists: logger.error("No Chef node with ID %s!"%(computer['node_chef_id'])) else: logger.error("No Chef ID in '%s' computer!"%(computer['name'])) logger.info('Checking MongoDB computer references...') # Check the references to computer nodes for node_id in ChefNode.list(): found = False computers = self.db.nodes.find({"node_chef_id" : node_id}) for computer in computers: found = True computer_node = ChefNode(node_id, self.api) if not found: pclabel = "(No OHAI-GECOS data in the node)" try: pclabel = "(pclabel = %s)"%( computer_node.attributes.get_dotted('ohai_gecos.pclabel') ) except KeyError: pass logger.error("No computer node for Chef ID: '%s' %s!"%(node_id, pclabel)) logger.warn("Possible cause: The node has been deleted in Gecos Control Center but not in Chef server, either because it was in use at that time or for another unknown reason.") # Check default data for chef node if not computer_node.default.to_dict() or not computer_node.attributes.has_dotted('gecos_ws_mgmt'): logger.info("FIXED: For an unknown reason Chef node: %s has no default attributes."%(node_id)) computer_node.default = default_data computer_node.save() # Check "updated_by" field atrributes = computer_node.normal.to_dict() updated, updated_attributes = self.check_updated_by_field(node_id, None, atrributes) if updated: computer_node.normal = atrributes computer_node.save() logger.info('END ;)')
def command(self): # Initialization sanitized = False computers = set() self.api = _get_chef_api(self.settings.get('chef.url'), toChefUsername(self.options.chef_username), self.options.chef_pem, False, self.settings.get('chef.version')) self.auth_user = self.db.adminusers.find_one({'username': self.options.chef_username}) if self.auth_user is None: logger.error('The administrator user must exist in MongoDB') sys.exit(1) self.db = self.pyramid.db # Get local_admin_users_res (Local Administrators) policy logger.info('Getting Local Administrators (local_admin_users_res) policy ...') policy = self.db.policies.find_one({'slug':'local_admin_users_res'}) schema = policy['schema'] policyId = policy['_id'] logger.info('schema = %s'%str(schema)) logger.info('Id.policy = %s'%str(policyId)) # Searching nodes with the Local Administrators policy # Query Fields of an Embedded Document (Mongo documentation) # Example: # db.nodes.find({"policies.58c8122a0dfd425b0894d5b6":{$exists:true}}) logger.info('Searching nodes with the Local Administrators policy...') field = 'policies.' + str(policyId) filters = {field:{'$exists':True}} nodes = self.db.nodes.find(filters) # Validating data and, where appropiate, fixing for node in nodes: instance = node['policies'][unicode(policyId)] logger.info('Node name = %s, _id = %s'%(node['name'],str(node['_id']))) logger.info('Instance before validate method: %s'%str(instance)) while True: try: validate(instance, schema) break except ValidationError as e: logger.warning('Validation error on instance = %s'%str(e.message)) # Sanitize instance self.sanitize(e, instance) sanitized = True if sanitized: # Setting false sanitized for next iteration sanitized = False logger.info('Sanitized instance: %s'%str(instance)) # Update mongo self.db.nodes.update({'_id': node['_id']},{'$set':{field:instance}}) # Affected nodes if node['type'] == 'ou': result = list(self.db.nodes.find({'path': get_filter_nodes_belonging_ou(node['_id']),'type': 'computer'},{'_id':1})) logger.info('OU computers = %s'%str(result)) elif node['type'] == 'group': result = list(self.db.nodes.find({'_id':{'$in':node['members']},'type':'computer'},{'_id':1})) logger.info('GROUP computers = %s'%str(result)) elif node['type'] == 'computer': result = [node] logger.info('COMPUTER computers = %s'%str(result)) [computers.add(str(n['_id'])) for n in result] # Removing unused local_admin_remove_list attribute in chef nodes for node_id in ChefNode.list(): node = ChefNode(node_id, self.api) logger.info('Checking node: %s'%(node_id)) attr_dotted = policy['path'] + '.local_admin_remove_list' logger.info('Atttribute dotted path: %s'%(attr_dotted)) if node.attributes.has_dotted(attr_dotted): logger.info("Remove 'local_admin_remove_list' attribute!") try: logger.info("node.attributes = %s" % str(node.attributes['gecos_ws_mgmt']['misc_mgmt']['local_admin_users_res'].to_dict())) delete_dotted(node.attributes, attr_dotted) node.save() except: logger.warn("Problem deleting local_admin_remove_list value from node: %s"%(node_id)) logger.warn("You may be trying to delete a default attribute instead normal attribute: %s"%(node_id)) for computer in computers: logger.info('computer = %s'%str(computer)) computer = self.db.nodes.find_one({'_id': ObjectId(computer)}) apply_policies_to_computer(self.db.nodes, computer, self.auth_user, api=self.api, initialize=False, use_celery=False) logger.info('Finished.')