def POST(self, **data): # handle force force = data.get("force", False) == "True" if "force" in data: del data["force"] _host_name = data.pop("host") # make sure post data passed in is ready to pass to mongo engine result, data = self.model.prep_data(data) # Check if there were data validation errors if result: result = ["Data validation failed: %s" % ", ".join(result)] cherrypy.response.status = "400 Bad Request" else: _host = Host.objects(name=_host_name, interfaces__name=data["name"]).first() if _host and not force: result.append("%s %s already exists." % (self.name.capitalize(), data["name"])) cherrypy.response.status = "409 Conflict" else: try: interface = self.model(**data) if force and _host: updated = Host.objects( name=_host_name, interfaces__name=data["name"]).update_one( set__interfaces__S=interface) if updated: cherrypy.response.status = "201 Resource Created" result.append("Updated %s %s" % (self.name, data["name"])) else: cherrypy.response.status = "400 Bad Request" result.append("Host %s not found." % _host_name) else: updated = Host.objects(name=_host_name).update_one( push__interfaces=interface) if updated: cherrypy.response.status = "201 Resource Created" result.append("Created %s %s" % (self.name, data["name"])) else: cherrypy.response.status = "400 Bad Request" result.append("Host %s not found." % _host_name) except Exception as e: # TODO: make sure when this is thrown the output # points back to here and gives the end user # enough information to fix the issue cherrypy.response.status = "500 Internal Server Error" result.append("Error: %s" % e) return json.dumps({"result": result})
def DELETE(self, **data): _host = Host.objects(name=data["host"], interfaces__name=data["name"]).first() result = [] if _host: try: Host.objects( name=data["host"], interfaces__name=data["name"] ).update_one(pull__interfaces__name=data["name"]) cherrypy.response.status = "204 No Content" result.append("Removed %s." % data["name"]) except Exception as e: cherrypy.response.status = "500 Internal Server Error" result.append("Error: %s" % e) return json.dumps({"result": result})
def post_network_test(_cloud): _hosts = Host.objects(cloud=_cloud) test_host = _hosts[0] try: ssh_helper = SSHHelper(test_host.name) except Exception: logger.exception("Could not establish connection with host: %s." % test_host.name) return False host_list = " ".join([host.name for host in _hosts]) if type(ssh_helper.run_cmd("fping -u %s" % host_list)) != list: return False for interface in test_host.interfaces: new_ips = [] host_ips = [ socket.gethostbyname(host.name) for host in _hosts if interface.name in [_interface.name for _interface in host.interfaces] ] for ip in host_ips: for value in INTERFACES[interface.name]: ip_apart = ip.split(".") octets = value.split(".") ip_apart[0] = octets[0] ip_apart[1] = octets[1] new_ips.append(".".join(ip_apart)) if type(ssh_helper.run_cmd("fping -u %s" % " ".join(new_ips))) != list: return False ssh_helper.disconnect() return True
def GET(self, **data): date = datetime.datetime.now() if "date" in data: date = datetime.datetime.strptime(data["date"], "%Y-%m-%dt%H:%M:%S") if self.name == "moves": try: _hosts = Host.objects() result = [] for _host in _hosts: _scheduled_cloud = _host.default_cloud.name _host_defined_cloud = _host.cloud.name _current_schedule = self.model.current_schedule( host=_host, date=date).first() try: if _current_schedule: _scheduled_cloud = _current_schedule.cloud.name if _scheduled_cloud != _host_defined_cloud: result.append({ "host": _host.name, "new": _scheduled_cloud, "current": _host_defined_cloud, }) except DoesNotExist: continue return json.dumps({"result": result}) except Exception as ex: logger.debug(ex) logger.info("400 Bad Request") cherrypy.response.status = "400 Bad Request" return json.dumps({"result": ["400 Bad Request"]})
def GET(self, **data): _args = {} if "date" in data: date = datetime.datetime.strptime(data["date"], "%Y-%m-%dt%H:%M:%S") _args["date"] = date if "host" in data: host = Host.objects(name=data["host"]).first() if host: _args["host"] = host else: return json.dumps({ "result": ["Couldn't find host %s on Quads DB." % data["host"]] }) if "cloud" in data: cloud = Cloud.objects(name=data["cloud"]).first() if cloud: _args["cloud"] = cloud if self.name == "current_schedule": _schedule = self.model.current_schedule(**_args) if _schedule: return _schedule.to_json() else: return json.dumps({"result": ["No results."]}) return self.model.objects(**_args).to_json()
def process_scheduled(_logger, month, now): _date = now if month > 0: _date = month_delta_past(now, month) start = first_day_month(_date) start_id = date_to_object_id(start) end = last_day_month(_date) end_id = date_to_object_id(end) scheduled = CloudHistory.objects(__raw__={ "_id": { "$lt": end_id, "$gt": start_id, }, }).order_by("-_id").count() hosts = Host.objects(__raw__={ "_id": { "$lt": start_id, }, }).count() days = 0 scheduled_count = 0 utilization = 0 for date in date_span(start, end): days += 1 scheduled_count += Schedule.current_schedule(date=date).count() if hosts and days: utilization = scheduled_count * 100 // (days * hosts) f_month = f"{start.month:02}" _logger.info(f"{start.year}-{f_month:<3}| " f"{scheduled:>9}| " f"{hosts:>8}| " f"{utilization:>10}%| ")
def __init__(self, cloud): self.cloud = cloud self.report = "" self.hosts = Host.objects(cloud=self.cloud, validated=False) self.hosts = [ host for host in self.hosts if Schedule.current_schedule(host=host) ]
def generator(_host_file, _days, _month, _year, _gentime): if _host_file: with open(_host_file, 'r') as f: reader = csv.reader(f) hosts = list(reader) else: hosts = sorted(Host.objects(), key=lambda x: x.name) lines = [] __days = [] non_allocated_count = 0 for i, host in enumerate(hosts): line = {"hostname": host.name} __days = [] for j in range(1, _days + 1): cell_date = "%s-%.2d-%.2d 01:00" % (_year, _month, j) cell_time = datetime.strptime(cell_date, '%Y-%m-%d %H:%M') schedule = Schedule.current_schedule(host=host, date=cell_time).first() if schedule: chosen_color = schedule.cloud.name[5:] else: non_allocated_count += 1 chosen_color = "01" _day = { "day": j, "chosen_color": chosen_color, "color": conf["visual_colors"]["cloud%s" % chosen_color], "cell_date": cell_date, "cell_time": cell_time } if schedule: cloud = CloudHistory.objects(__raw__={ "_id": { "$lt": schedule.id }, "name": schedule.cloud.name }).order_by("-_id").first() _day["display_description"] = cloud.description _day["display_owner"] = cloud.owner _day["display_ticket"] = cloud.ticket __days.append(_day) line["days"] = __days lines.append(line) utilization = 100 - (non_allocated_count * 100 // (_days * len(hosts))) with open(os.path.join(TEMPLATES_PATH, "simple_table")) as _file: template = Template(_file.read()) content = template.render( gentime=_gentime, _days=_days, lines=lines, utilization=utilization, ) return content
def verify(cloud): _cloud_obj = Cloud.objects(name=cloud).first() logger.info(f"Cloud qinq: {_cloud_obj.qinq}") if not _cloud_obj: logger.error(f"Cloud not found.") return if args.all: hosts = Host.objects(cloud=_cloud_obj) else: hosts = [Host.objects(cloud=_cloud_obj).first()] for host in hosts: if args.all: logger.info(f"{host.name}:") if host and host.interfaces: interfaces = sorted(host.interfaces, key=lambda k: k["name"]) for i, interface in enumerate(interfaces): ssh_helper = SSHHelper(interface.switch_ip, Config["junos_username"]) try: _, vlan_member_out = ssh_helper.run_cmd( "show configuration vlans | display set | match %s.0" % interface.switch_port) vlan_member = vlan_member_out[0].split()[2][4:].strip(",") except IndexError: logger.warning( "Could not determine the previous VLAN member for %s, switch %s, switch port %s " % ( interface.name, interface.switch_ip, interface.switch_port, )) vlan_member = 0 ssh_helper.disconnect() logger.info( f"Interface em{i+1} appears to be a member of VLAN {vlan_member}", ) else: logger.error( f"The cloud has no hosts or the host has no interfaces defined" )
def __init__(self, cloud, _args, _loop=None): self.cloud = cloud self.report = "" self.args = _args self.hosts = Host.objects(cloud=self.cloud, validated=False) self.hosts = [ host for host in self.hosts if Schedule.current_schedule(host=host) ] self.loop = _loop if _loop else get_running_loop()
def main(_loop): validator = Populator(_loop) hosts = Host.objects() for host in hosts: try: _loop.run_until_complete(validator.populate(host)) except Exception as ex: logger.debug(ex) logger.info(" Failed to populate interfaces for %s" % host.name)
def GET(self, **data): if "host" in data: host = Host.objects(name=data["host"]).first() if host: result = [] for i in host.interfaces: result.append(i.to_json()) return json.dumps({"result": result}) return json.dumps({"result": "No host provided"})
def main(): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) foreman = Foreman( Config["foreman_api_url"], Config["foreman_username"], Config["foreman_password"], loop=loop, ) all_hosts = loop.run_until_complete(foreman.get_all_hosts()) blacklist = re.compile("|".join( [re.escape(word) for word in Config["exclude_hosts"].split("|")])) hosts = {} for host, properties in all_hosts.items(): if not blacklist.search(host): if properties.get("sp_name", False): properties["host_ip"] = properties["ip"] properties["host_mac"] = properties["mac"] properties["ip"] = properties.get("sp_ip") properties["mac"] = properties.get("sp_mac") consolidate_ipmi_data(host, "macaddr", properties["host_mac"]) consolidate_ipmi_data(host, "oobmacaddr", properties.get("sp_mac")) svctag_file = os.path.join(Config["data_dir"], "ipmi", host, "svctag") svctag = "" if os.path.exists(svctag_file): with open(svctag_file) as _file: svctag = _file.read() properties["svctag"] = svctag.strip() hosts[host] = properties _full_path = os.path.join(Config["wp_wiki_git_repo_path"], "main.md") if not os.path.exists(Config["wp_wiki_git_repo_path"]): pathlib.Path(Config["wp_wiki_git_repo_path"]).mkdir(parents=True, exist_ok=True) with open(_full_path, "w") as _f: _f.seek(0) for rack in Config["racks"].split(): if rack_has_hosts(rack, hosts): _f.write(render_header(rack)) for host, properties in hosts.items(): if rack in host: host_obj = Host.objects(name=host).first() if host_obj and not host_obj.retired: _f.write(render_row(host_obj, properties)) _f.write("\n") _f.truncate()
def DELETE(self, **data): result = [] _host = Host.objects(name=data["host"]).first() if _host: schedule = self.model.objects(host=_host, index=data["index"]).first() if schedule: schedule.delete() cherrypy.response.status = "204 No Content" result = ["deleted %s " % self.name] else: cherrypy.response.status = "404 Not Found" result = ["%s Not Found" % self.name] return json.dumps({"result": result})
def main() -> None: """ Main function :return: None """ cloud = Cloud.objects(name="cloud01").first() hosts = Host.objects(cloud=cloud, retired=False, broken=False) for host in hosts: file_name = f"{host.name}.xml" file_path = os.path.join(LSHW_OUTPUT_DIR, file_name) if os.path.exists(file_path): if os.path.getsize(file_path) < 1: run_lshw(host.name, file_path) else: run_lshw(host.name, file_path)
def print_unmanaged(hosts): lines = ["\n", '### <a name="unmanaged"></a>Unmanaged systems ###\n', "\n"] _headers = ["**SystemHostname**", "**OutOfBand**"] lines.append("| %s |\n" % " | ".join(_headers)) lines.append("| %s |\n" % " | ".join(["---" for _ in range(len(_headers))])) for host, properties in hosts.items(): real_host = host[5:] host_obj = Host.objects(name=real_host).first() if not host_obj: short_host = real_host.split(".")[0] lines.append( "| %s | <a href=http://%s/ target=_blank>console</a> |\n" % (short_host, host)) return lines
def generator(_host_file, _days, _month, _year, _gentime): if _host_file: with open(_host_file, 'r') as f: reader = csv.reader(f) data = list(reader) else: data = Host.objects() lines = [] __days = [] for i, host in enumerate(data): line = {"hostname": host.name} __days = [] for j in range(1, _days + 1): cell_date = "%s-%.2d-%.2d 01:00" % (_year, _month, j) cell_time = datetime.strptime(cell_date, '%Y-%m-%d %H:%M') schedule = Schedule.current_schedule(host=host, date=cell_time).first() if schedule: chosen_color = schedule.cloud.name[5:] else: chosen_color = "01" _day = { "day": j, "chosen_color": chosen_color, "color": conf["visual_colors"]["cloud%s" % chosen_color], "cell_date": cell_date, "cell_time": cell_time } if schedule: _day["display_description"] = schedule.cloud.description _day["display_owner"] = schedule.cloud.owner _day["display_ticket"] = schedule.cloud.ticket __days.append(_day) line["days"] = __days lines.append(line) with open(os.path.join(TEMPLATES_PATH, "simple_table")) as _file: template = Template(_file.read()) content = template.render( gentime=_gentime, _days=_days, lines=lines, ) return content
def available(search): models = search.data['model'] if models: query = None for model in models: if query: query = query | Q(model=model.upper()) else: query = Q(model=model.upper()) hosts = Host.objects.filter(query) else: hosts = Host.objects().all() loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) foreman = Foreman( conf["foreman_api_url"], conf["foreman_username"], conf["foreman_password"], loop=loop, ) broken_hosts = loop.run_until_complete(foreman.get_broken_hosts()) available_hosts = [] start = datetime.combine(search.data['start'], time.min) end = datetime.combine(search.data['end'], time.min) if hosts: for host in hosts: if Schedule.is_host_available( host=host["name"], start=start, end=end) and not broken_hosts.get(host["name"], False): host_dict = {"name": host.name, "model": host.model} available_hosts.append(host_dict) return jsonify(available_hosts)
def available(search): models = search.data["model"] if models: query = None for model in models: if query: query = query | Q(model=model.upper()) else: query = Q(model=model.upper()) hosts = Host.objects.filter(query) else: hosts = Host.objects().all() available_hosts = [] start = datetime.combine(search.data["start"], time(hour=22)) end = datetime.combine(search.data["end"], time(hour=22)) if hosts: for host in hosts: if Schedule.is_host_available(host=host["name"], start=start, end=end): current = False if Schedule.current_schedule(host=host): current = True host_dict = { "name": host.name, "cloud": host.cloud.name, "model": host.model, "current": current } available_hosts.append(host_dict) return jsonify(available_hosts)
def POST(self, **data): # make sure post data passed in is ready to pass to mongo engine result, data = Schedule.prep_data(data) _start = None _end = None if "start" in data: _start = datetime.datetime.strptime(data["start"], "%Y-%m-%d %H:%M") if "end" in data: _end = datetime.datetime.strptime(data["end"], "%Y-%m-%d %H:%M") _host = data["host"] _host_obj = Host.objects(name=_host).first() broken_hosts = Host.objects(broken=True) if _host_obj in broken_hosts: result.append(f"Host {_host_obj.name} is in broken state") # Check if there were data validation errors if result: result = ["Data validation failed: %s" % ", ".join(result)] cherrypy.response.status = "400 Bad Request" return json.dumps({"result": result}) cloud_obj = None if "cloud" in data: cloud_obj = Cloud.objects(name=data["cloud"]).first() if not cloud_obj: result.append("Provided cloud does not exist") cherrypy.response.status = "400 Bad Request" return json.dumps({"result": result}) if "index" in data: data["host"] = _host_obj schedule = self.model.objects(index=data["index"], host=data["host"]).first() if schedule: if not _start: _start = schedule.start if not _end: _end = schedule.end if not cloud_obj: cloud_obj = schedule.cloud if Schedule.is_host_available(host=_host, start=_start, end=_end, exclude=schedule.index): data["cloud"] = cloud_obj notification_obj = Notification.objects( cloud=cloud_obj, ticket=cloud_obj.ticket).first() if notification_obj: notification_obj.update( one_day=False, three_days=False, five_days=False, seven_days=False, ) schedule.update(**data) result.append("Updated %s %s" % (self.name, schedule.index)) else: result.append( "Host is not available during that time frame") else: try: if Schedule.is_host_available(host=_host, start=_start, end=_end): if (self.model.current_schedule(cloud=cloud_obj) and cloud_obj.validated): if not cloud_obj.wipe: _host_obj.update(validated=True) notification_obj = Notification.objects( cloud=cloud_obj, ticket=cloud_obj.ticket).first() if notification_obj: notification_obj.update(success=False) schedule = Schedule() data["cloud"] = cloud_obj schedule.insert_schedule(**data) cherrypy.response.status = "201 Resource Created" result.append("Added schedule for %s on %s" % (data["host"], cloud_obj.name)) else: result.append( "Host is not available during that time frame") except Exception as e: # TODO: make sure when this is thrown the output # points back to here and gives the end user # enough information to fix the issue cherrypy.response.status = "500 Internal Server Error" result.append("Error: %s" % e) return json.dumps({"result": result})
def main(): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) foreman = Foreman( conf["foreman_api_url"], conf["foreman_username"], conf["foreman_password"], loop=loop, ) lines = [] all_hosts = loop.run_until_complete(foreman.get_all_hosts()) blacklist = re.compile("|".join( [re.escape(word) for word in conf["exclude_hosts"].split("|")])) broken_hosts = Host.objects(broken=True) domain_broken_hosts = [ host for host in broken_hosts if conf["domain"] in host.name ] mgmt_hosts = {} for host, properties in all_hosts.items(): if not blacklist.search(host): if properties.get("sp_name", False): properties["host_ip"] = all_hosts.get(host, {"ip": None})["ip"] properties["host_mac"] = all_hosts.get(host, {"mac": None})["mac"] properties["ip"] = properties.get("sp_ip") properties["mac"] = properties.get("sp_mac") mgmt_hosts[properties.get("sp_name")] = properties lines.append("### **SUMMARY**\n") _summary = print_summary() lines.extend(_summary) details_header = ["\n", "### **DETAILS**\n", "\n"] lines.extend(details_header) summary_response = requests.get(os.path.join(API_URL, "summary")) _cloud_summary = [] if summary_response.status_code == 200: _cloud_summary = summary_response.json() for cloud in [cloud for cloud in _cloud_summary if cloud["count"] > 0]: name = cloud["name"] owner = cloud["owner"] lines.append("### <a name=%s></a>\n" % name.strip()) lines.append( "### **%s : %s (%s) -- %s**\n\n" % (name.strip(), cloud["count"], cloud["description"], owner)) lines.extend(print_header()) _cloud_obj = Cloud.objects(name=name).first() _hosts = sorted( Host.objects(cloud=_cloud_obj, retired=False, broken=False), key=lambda x: x.name, ) for host in _hosts: lines.extend(add_row(host)) lines.append("\n") lines.extend(print_unmanaged(mgmt_hosts)) lines.extend(print_faulty(domain_broken_hosts)) _full_path = os.path.join(conf["wp_wiki_git_repo_path"], "assignments.md") if not os.path.exists(conf["wp_wiki_git_repo_path"]): pathlib.Path(conf["wp_wiki_git_repo_path"]).mkdir(parents=True, exist_ok=True) with open(_full_path, "w+") as _f: _f.seek(0) for cloud in lines: _line = cloud if cloud else "" _f.write(_line) _f.truncate()
def main(): if conf["openstack_management"]: foreman = Foreman(conf["foreman_api_url"], conf["foreman_username"], conf["foreman_password"]) cloud_list = Cloud.objects() if not os.path.exists(conf["json_web_path"]): os.makedirs(conf["json_web_path"]) now = time.time() old_jsons = [ file for file in os.listdir(conf["json_web_path"]) if ":" in file ] for file in old_jsons: if os.stat(os.path.join(conf["json_web_path"], file) ).st_mtime < now - conf["json_retention_days"] * 86400: os.remove(os.path.join(conf["json_web_path"], file)) for cloud in cloud_list: host_list = Host.objects(cloud=cloud).order_by("name") foreman_password = conf["ipmi_password"] if cloud.ticket: foreman_password = cloud.ticket json_data = defaultdict(list) for host in host_list[1:]: if conf["foreman_unavailable"]: overcloud = {"result": "true"} else: overcloud = foreman.get_host_param(host.name, "overcloud") if not overcloud: overcloud = {"result": "true"} if "result" in overcloud and strtobool(overcloud["result"]): mac = "00:00:00:00:00:00" if len(host.interfaces) > 1: mac = host.interfaces[1].mac_address json_data['nodes'].append({ 'pm_password': foreman_password, 'pm_type': "pxe_ipmitool", 'mac': [mac], 'cpu': "2", 'memory': "1024", 'disk': "20", 'arch': "x86_64", 'pm_user': conf["ipmi_cloud_username"], 'pm_addr': "mgmt-%s" % host.name }) content = json.dumps(json_data, indent=4, sort_keys=True) if not os.path.exists(conf["json_web_path"]): pathlib.Path(conf["json_web_path"]).mkdir(parents=True, exist_ok=True) now = datetime.now() new_json_file = os.path.join( conf["json_web_path"], "%s_instackenv.json_%s" % (cloud.name, now.strftime("%Y-%m-%d_%H:%M:%S"))) json_file = os.path.join(conf["json_web_path"], "%s_instackenv.json" % cloud.name) with open(new_json_file, "w+") as _json_file: _json_file.seek(0) _json_file.write(content) os.chmod(new_json_file, 0o644) copyfile(new_json_file, json_file)
def switch_config(host, old_cloud, new_cloud): _host_obj = Host.objects(name=host).first() _old_cloud_obj = Cloud.objects(name=old_cloud).first() _new_cloud_obj = Cloud.objects(name=new_cloud).first() if not _host_obj.interfaces: logger.error("Host has no interfaces defined.") return False logger.debug("Connecting to switch on: %s" % _host_obj.interfaces[0].switch_ip) switch_ip = None ssh_helper = None interfaces = sorted(_host_obj.interfaces, key=lambda k: k["name"]) for i, interface in enumerate(interfaces): last_nic = i == len(_host_obj.interfaces) - 1 if not switch_ip: switch_ip = interface.switch_ip try: ssh_helper = SSHHelper(switch_ip, Config["junos_username"]) except SSHHelperException: logger.error(f"Failed to connect to switch: {switch_ip}") return False else: if switch_ip != interface.switch_ip: ssh_helper.disconnect() switch_ip = interface.switch_ip ssh_helper = SSHHelper(switch_ip, Config["junos_username"]) result, old_vlan_out = ssh_helper.run_cmd( "show configuration interfaces %s" % interface.switch_port) old_vlan = None if result and old_vlan_out: old_vlan = old_vlan_out[0].split(";")[0].split()[1][7:] if not old_vlan: if not _new_cloud_obj.vlan and not last_nic: logger.warning( "Warning: Could not determine the previous VLAN for %s on %s, switch %s, switchport %s" % ( host, interface.name, interface.switch_ip, interface.switch_port, )) old_vlan = get_vlan(_old_cloud_obj, i) new_vlan = get_vlan(_new_cloud_obj, i) if _new_cloud_obj.vlan and last_nic: if int(old_vlan) != int(_new_cloud_obj.vlan.vlan_id): logger.info("Setting last interface to public vlan %s." % new_vlan) juniper = Juniper( interface.switch_ip, interface.switch_port, old_vlan, _new_cloud_obj.vlan.vlan_id, ) success = juniper.convert_port_public() if success: logger.info("Successfully updated switch settings.") else: logger.error( "There was something wrong updating switch for %s:%s" % (host, interface.name)) return False else: if int(old_vlan) != int(new_vlan): juniper = Juniper(interface.switch_ip, interface.switch_port, old_vlan, new_vlan) success = juniper.set_port() if success: logger.info("Successfully updated switch settings.") else: logger.error( "There was something wrong updating switch for %s:%s" % (host, interface.name)) return False if ssh_helper: ssh_helper.disconnect() return True
async def move_and_rebuild(host, new_cloud, semaphore, rebuild=False, loop=None): build_start = datetime.now() logger.debug("Moving and rebuilding host: %s" % host) untouchable_hosts = Config["untouchable_hosts"] logger.debug("Untouchable hosts: %s" % untouchable_hosts) _host_obj = Host.objects(name=host).first() if host in untouchable_hosts: logger.error("No way...") return False _target_cloud = Cloud.objects(name=new_cloud).first() ipmi_new_pass = (f"{Config['infra_location']}@{_target_cloud.ticket}" if _target_cloud.ticket else Config["ipmi_password"]) ipmi_set_pass = [ "user", "set", "password", str(Config["ipmi_cloud_username_id"]), ipmi_new_pass, ] new_semaphore = asyncio.Semaphore(20) await execute_ipmi(host, arguments=ipmi_set_pass, semaphore=new_semaphore) ipmi_set_operator = [ "user", "priv", str(Config["ipmi_cloud_username_id"]), "0x4" ] await execute_ipmi(host, arguments=ipmi_set_operator, semaphore=new_semaphore) badfish = None if rebuild and _target_cloud.name != _host_obj.default_cloud.name: if Config.pdu_management: # TODO: pdu management pass try: badfish = await badfish_factory( "mgmt-%s" % host, Config["ipmi_username"], Config["ipmi_password"], propagate=True, ) except BadfishException: logger.error( f"Could not initialize Badfish. Verify ipmi credentials for mgmt-{host}." ) return False if is_supported(host): try: interfaces_path = os.path.join( os.path.dirname(__file__), "../../conf/idrac_interfaces.yml") await badfish.change_boot("director", interfaces_path) # wait 10 minutes for the boot order job to complete await asyncio.sleep(600) except BadfishException: logger.error( f"Could not set boot order via Badfish for mgmt-{host}.") return False try: await badfish.set_power_state("on") except BadfishException: logger.error(f"Failed to power on {host}") return False foreman_results = [] params = [ { "name": "operatingsystems", "value": Config["foreman_default_os"], "identifier": "title", }, { "name": "ptables", "value": Config["foreman_default_ptable"] }, { "name": "media", "value": Config["foreman_default_medium"] }, ] foreman = Foreman( Config["foreman_api_url"], Config["foreman_username"], Config["foreman_password"], semaphore=semaphore, loop=loop, ) set_result = await foreman.set_host_parameter(host, "overcloud", "true") foreman_results.append(set_result) put_result = await foreman.put_parameter(host, "build", 1) foreman_results.append(put_result) put_param_result = await foreman.put_parameters_by_name(host, params) foreman_results.append(put_param_result) owner_id = await foreman.get_user_id(new_cloud) host_id = await foreman.get_host_id(host) put_result = await foreman.put_element("hosts", host_id, "owner_id", owner_id) foreman_results.append(put_result) for result in foreman_results: if isinstance(result, Exception) or not result: logger.error( "There was something wrong setting Foreman host parameters." ) return False if is_supported(host): try: await badfish.boot_to_type( "foreman", os.path.join(os.path.dirname(__file__), "../../conf/idrac_interfaces.yml"), ) await badfish.reboot_server(graceful=False) except BadfishException: logger.error(f"Error setting PXE boot via Badfish on {host}.") await badfish.reboot_server(graceful=False) return False else: try: asyncio.run_coroutine_threadsafe( badfish.unmount_virtual_media(), loop, ) except BadfishException: logger.warning( f"Could not unmount virtual media for mgmt-{host}.") try: ipmi_pxe_persistent = [ "chassis", "bootdev", "pxe", "options=persistent", ] await execute_ipmi(host, arguments=ipmi_pxe_persistent, semaphore=new_semaphore) await ipmi_reset(host, new_semaphore) except Exception as ex: logger.debug(ex) logger.error( f"There was something wrong setting PXE flag or resetting IPMI on {host}." ) if _target_cloud.name == _host_obj.default_cloud.name: if not badfish: try: badfish = await badfish_factory( "mgmt-%s" % host, Config["ipmi_username"], Config["ipmi_password"], propagate=True, ) except BadfishException: logger.error( f"Could not initialize Badfish. Verify ipmi credentials for mgmt-{host}." ) return False await badfish.set_power_state("off") source_cloud_schedule = Schedule.current_schedule( cloud=_host_obj.cloud.name) if not source_cloud_schedule: _old_cloud_obj = Cloud.objects(name=_host_obj.cloud.name).first() _old_cloud_obj.update(vlan=None) schedule = Schedule.current_schedule(cloud=_target_cloud, host=_host_obj).first() if schedule: schedule.update(build_start=build_start, build_end=datetime.now()) schedule.save() logger.debug("Updating host: %s") _host_obj.update(cloud=_target_cloud, build=False, last_build=datetime.now(), validated=False) return True
def make_env_json(filename): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) foreman = Foreman( conf["foreman_api_url"], conf["foreman_username"], conf["foreman_password"], loop=loop, ) cloud_list = Cloud.objects() if not os.path.exists(conf["json_web_path"]): os.makedirs(conf["json_web_path"]) now = time.time() old_jsons = [ file for file in os.listdir(conf["json_web_path"]) if ":" in file ] for file in old_jsons: if (os.stat(os.path.join(conf["json_web_path"], file)).st_mtime < now - conf["json_retention_days"] * 86400): os.remove(os.path.join(conf["json_web_path"], file)) for cloud in cloud_list: host_list = Host.objects(cloud=cloud).order_by("name") foreman_password = conf["ipmi_password"] if cloud.ticket: foreman_password = f"{conf['infra_location']}@{cloud.ticket}" data = defaultdict(list) for host in host_list: if conf["foreman_unavailable"]: overcloud = {"result": "true"} else: overcloud = loop.run_until_complete( foreman.get_host_param(host.name, "overcloud")) if not overcloud: overcloud = {"result": "true"} if type(overcloud["result"]) != bool: try: _overcloud_result = strtobool(overcloud["result"]) except ValueError: print( f"WARN: {host.name} overcloud value is not set correctly." ) _overcloud_result = 1 else: _overcloud_result = overcloud["result"] if "result" in overcloud and _overcloud_result: mac = [] if filename == "instackenv": for interface in host.interfaces: if interface.pxe_boot: mac.append(interface.mac_address) if filename == "ocpinventory": mac = [ interface.mac_address for interface in host.interfaces ] data["nodes"].append({ "pm_password": foreman_password, "pm_type": "pxe_ipmitool", "mac": mac, "cpu": "2", "memory": "1024", "disk": "20", "arch": "x86_64", "pm_user": conf["ipmi_cloud_username"], "pm_addr": "mgmt-%s" % host.name, }) content = json.dumps(data, indent=4, sort_keys=True) if not os.path.exists(conf["json_web_path"]): pathlib.Path(conf["json_web_path"]).mkdir(parents=True, exist_ok=True) now = datetime.now() new_json_file = os.path.join( conf["json_web_path"], "%s_%s.json_%s" % (cloud.name, filename, now.strftime("%Y-%m-%d_%H:%M:%S")), ) json_file = os.path.join(conf["json_web_path"], "%s_%s.json" % (cloud.name, filename)) with open(new_json_file, "w+") as _json_file: _json_file.seek(0) _json_file.write(content) os.chmod(new_json_file, 0o644) copyfile(new_json_file, json_file)
def verify(_host_name, change=False, nic1=None, nic2=None, nic3=None, nic4=None, nic5=None): _nics = {"em1": nic1, "em2": nic2, "em3": nic3, "em4": nic4, "em5": nic5} _host_obj = Host.objects(name=_host_name).first() if not _host_obj: logger.error(f"Hostname not found.") return logger.info(f"Host: {_host_obj.name}") if _host_obj.interfaces: interfaces = sorted(_host_obj.interfaces, key=lambda k: k["name"]) for i, interface in enumerate(interfaces): vlan = _nics.get(interface.name) if vlan: ssh_helper = SSHHelper(interface.switch_ip, Config["junos_username"]) try: _, old_vlan_out = ssh_helper.run_cmd( "show configuration interfaces %s" % interface.switch_port) old_vlan = old_vlan_out[0].split(";")[0].split()[1] if old_vlan.startswith("QinQ"): old_vlan = old_vlan[7:] except IndexError: old_vlan = 0 try: _, vlan_member_out = ssh_helper.run_cmd( "show configuration vlans | display set | match %s.0" % interface.switch_port) vlan_member = vlan_member_out[0].split()[2][4:].strip(",") except IndexError: logger.warning( "Could not determine the previous VLAN member for %s, switch %s, switch port %s " % ( interface.name, interface.switch_ip, interface.switch_port, )) vlan_member = 0 ssh_helper.disconnect() if int(old_vlan) != int(vlan): logger.warning("Interface %s not using QinQ_vl%s", interface.switch_port, vlan) if int(vlan_member) != int(vlan): logger.warning( "Interface %s appears to be a member of VLAN %s, should be %s", interface.switch_port, vlan_member, vlan, ) if change: logger.info(f"Change requested for {interface.name}") juniper = Juniper( interface.switch_ip, interface.switch_port, vlan_member, vlan, ) success = juniper.set_port() if success: logger.info( "Successfully updated switch settings.") else: logger.error( f"There was something wrong updating switch for {interface.name}" ) else: logger.info( f"Interface {interface.name} is already configured for vlan{vlan}" ) else: logger.error(f"The host has no interfaces defined")
if metric: factor = 1000 return round(num / (factor**3)) for _d, _, _files in os.walk(MD_DIR): for _file in _files: filename = os.path.join(MD_DIR, _file) if os.path.getsize(filename): path, extension = os.path.splitext(filename) if extension == ".json": with open(filename) as _f: data = json.load(_f) children = parse('$..children[*]').find(data) hostname = parse('$.id').find(data)[0].value host_obj = Host.objects(name=hostname).first() if not host_obj: print(f"Host not found: {hostname}") break # interfaces for child in [ child for child in children if child.value["class"] == "network" ]: if child.value.get('vendor'): for host_interface in host_obj.interfaces: if host_interface.mac_address == child.value[ "serial"]: host_interface.vendor = child.value.get( 'vendor') host_interface.logical_name = child.value.get(
def GET(self, **data): args = {} _cloud = None _host = None if "cloudonly" in data: _cloud = Cloud.objects(cloud=data["cloudonly"]) if not _cloud: cherrypy.response.status = "404 Not Found" return json.dumps( {"result": "Cloud %s Not Found" % data["cloudonly"]}) else: return _cloud.to_json() if self.name == "host": if "id" in data: _host = Host.objects(id=data["id"]).first() elif "name" in data: _host = Host.objects(name=data["name"]).first() elif "cloud" in data: _cloud = Cloud.objects(name=data["cloud"]).first() _host = Host.objects(cloud=_cloud) else: _host = Host.objects() if not _host: return json.dumps({"result": ["Nothing to do."]}) return _host.to_json() if self.name == "ccuser": _clouds = self.model.objects().all() clouds_summary = [] for cloud in _clouds: count = Schedule.current_schedule(cloud=cloud).count() clouds_summary.append({ "name": cloud.name, "count": count, "description": cloud.description, "owner": cloud.owner, "ticket": cloud.ticket, "ccuser": cloud.ccuser, "provisioned": cloud.provisioned, }) return json.dumps(clouds_summary) if self.name == "cloud": if "id" in data: _cloud = Cloud.objects(id=data["id"]).first() elif "name" in data: _cloud = Cloud.objects(name=data["name"]).first() elif "owner" in data: _cloud = Cloud.to_json(owner=data["owner"]).first() if _cloud: return _cloud.to_json() if self.name == "available": _start = _end = datetime.datetime.now() if "start" in data: _start = datetime.datetime.strptime(data["start"], "%Y-%m-%dT%H:%M:%S") if "end" in data: _end = datetime.datetime.strptime(data["end"], "%Y-%m-%dT%H:%M:%S") available = [] all_hosts = Host.objects().all() for host in all_hosts: if Schedule.is_host_available(host=host["name"], start=_start, end=_end): available.append(host.name) return json.dumps(available) if self.name == "summary": _clouds = Cloud.objects().all() clouds_summary = [] total_count = 0 for cloud in _clouds: if cloud.name == "cloud01": count = Host.objects(cloud=cloud, retired=False, broken=False).count() else: date = datetime.datetime.now() if "date" in data: date = datetime.datetime.strptime( data["date"], "%Y-%m-%dT%H:%M:%S") count = self.model.current_schedule(cloud=cloud, date=date).count() total_count += count clouds_summary.append({ "name": cloud.name, "count": count, "description": cloud.description, "owner": cloud.owner, "ticket": cloud.ticket, "ccuser": cloud.ccuser, "provisioned": cloud.provisioned, "validated": cloud.validated, }) if "date" in data: host_count = Host.objects(retired=False, broken=False).count() for cloud in clouds_summary: if cloud["name"] == "cloud01": cloud["count"] = host_count - total_count return json.dumps(clouds_summary) if self.name == "qinq": _clouds = Cloud.objects().all() clouds_qinq = [] for cloud in _clouds: _type = "Isolated" if cloud.qinq == 1: _type = "Combined" qinq_value = f"{cloud.qinq} ({_type})" clouds_qinq.append({"name": cloud.name, "qinq": qinq_value}) return json.dumps(clouds_qinq) if self.name == "broken": _hosts = self.model.objects(broken=True) broken = [] for host in _hosts: broken.append(host.name) return json.dumps(broken) if self.name == "retired": hosts = [host.name for host in self.model.objects(retired=True)] return json.dumps(hosts) objs = self.model.objects(**args) if objs: return objs.to_json() else: return json.dumps({"result": ["No results."]})
def print_summary(): _summary = [] _headers = [ "**NAME**", "**SUMMARY**", "**OWNER**", "**REQUEST**", '<span id="status">**STATUS**</span>', ] if conf["openstack_management"]: _headers.append("**OSPENV**") if conf["openshift_management"]: _headers.append("**OCPINV**") if conf["gather_ansible_facts"]: _headers.append("**HWFACTS**") _summary.append("| %s |\n" % " | ".join(_headers)) _summary.append("| %s |\n" % " | ".join(["---" for _ in range(len(_headers))])) _cloud_response = requests.get(os.path.join(API_URL, "summary")) _cloud_summary = [] if _cloud_response.status_code == 200: _cloud_summary = _cloud_response.json() for cloud in [cloud for cloud in _cloud_summary if cloud["count"] > 0]: cloud_name = cloud["name"] desc = "%s (%s)" % (cloud["count"], cloud["description"]) owner = cloud["owner"] ticket = cloud["ticket"] link = "<a href=%s/%s-%s target=_blank>%s</a>" % ( conf["ticket_url"], conf["ticket_queue"], ticket, ticket, ) cloud_specific_tag = "%s_%s_%s" % (cloud_name, owner, ticket) style_tag_end = "</span>" if cloud["validated"] or cloud_name == "cloud01": style_tag_start = '<span style="color:green">' instack_link = os.path.join(conf["quads_url"], "cloud", "%s_instackenv.json" % cloud_name) instack_text = "download" ocpinv_link = os.path.join(conf["quads_url"], "cloud", "%s_ocpinventory.json" % cloud_name) ocpinv_text = "download" status = ( '<span class="progress" style="margin-bottom:0px"><span role="progressbar" aria-valuenow="100" ' 'aria-valuemin="0" aria-valuemax="100" style="width:100%" class="progress-bar">100%</span></span> ' ) else: cloud_obj = Cloud.objects(name=cloud_name).first() scheduled_hosts = Schedule.current_schedule( cloud=cloud_obj).count() moved_hosts = Host.objects(cloud=cloud_obj).count() percent = moved_hosts / scheduled_hosts * 100 style_tag_start = '<span style="color:red">' instack_link = "#" instack_text = "validating" ocpinv_link = "#" ocpinv_text = "validating" if percent < 15: classes = [ "progress-bar", "progress-bar-striped", "progress-bar-danger", "active", ] status = ( '<span class="progress" style="margin-bottom:0px"><span role="progressbar" ' 'aria-valuenow="100" aria-valuemin="0" aria-valuemax="100" style="width:100%%" ' 'class="%s">%.0f%%</span></span>' % (" ".join(classes), percent)) else: classes = [ "progress-bar", "progress-bar-striped", "progress-bar-warning", "active", ] status = ( '<span class="progress" style="margin-bottom:0px"><span role="progressbar" ' 'aria-valuenow="%.0f" aria-valuemin="0" aria-valuemax="100" style="width:%.0f%%" ' 'class="%s">%.0f%%</span></span>' % (percent, percent, " ".join(classes), percent)) _data = [ "[%s%s%s](#%s)" % (style_tag_start, cloud_name, style_tag_end, cloud_name), desc, owner, link, ] if conf["gather_ansible_facts"]: factstyle_tag_end = "</span>" if os.path.exists( os.path.join( conf["ansible_facts_web_path"], "ansible_facts", "%s_overview.html" % cloud_specific_tag, )): factstyle_tag_start = '<span style="color:green">' ansible_facts_link = os.path.join( conf["quads_url"], "ansible_facts", "%s_overview.html" % cloud_specific_tag, ) else: factstyle_tag_start = '<span style="color:red">' ansible_facts_link = os.path.join(conf["quads_url"], "underconstruction") if cloud_name == "cloud01": _data.append("") _data.append("") _data.append(status) _data.append("") else: _data.append("<a href=%s target=_blank>%s%s%s</a>" % (instack_link, style_tag_start, instack_text, style_tag_end)) _data.append( "<a href=%s target=_blank>%s%s%s</a>" % (ocpinv_link, style_tag_start, ocpinv_text, style_tag_end)) _data.append(status) _data.append("<a href=%s target=_blank>%sinventory%s</a>" % (ansible_facts_link, factstyle_tag_start, factstyle_tag_end)) else: _data.append(status) if cloud_name == "cloud01": if conf["openstack_management"]: _data.append("") if conf["openshift_management"]: _data.append("") else: if conf["openstack_management"]: _data.append("<a href=%s target=_blank>%s%s%s</a>" % (instack_link, style_tag_start, instack_text, style_tag_end)) if conf["openshift_management"]: _data.append("<a href=%s target=_blank>%s%s%s</a>" % (ocpinv_link, style_tag_start, ocpinv_text, style_tag_end)) _summary.append("| %s |\n" % " | ".join(_data)) _hosts = Host.objects(broken=False, retired=False) _host_count = len(_hosts) _schedules = Schedule.current_schedule().count() _daily_percentage = _schedules * 100 // _host_count _summary.append(f"| Total | {_host_count} |\n") _summary.append("\n") _summary.append(f"Daily Utilization: {_daily_percentage}% \n") _summary.append("\n") _summary.append("[Unmanaged Hosts](#unmanaged)\n") _summary.append("\n") _summary.append("[Faulty Hosts](#faulty)\n") return _summary
def generator(_host_file, _days, _month, _year, _gentime): if _host_file: with open(_host_file, "r") as f: reader = csv.reader(f) hosts = list(reader) else: hosts = sorted(Host.objects(retired=False, broken=False), key=lambda x: x.name) lines = [] __days = [] non_allocated_count = 0 all_samples = [] all_samples.extend(range(129296, 129510)) all_samples.extend(range(128000, 128252)) samples = random.sample(all_samples, 100) exclude = [129401, 129484] emojis = [emoji for emoji in samples if emoji not in exclude] colors = [random_color() for _ in range(100)] colors[0] = "#A9A9A9" for i, host in enumerate(hosts): line = {"hostname": host.name} __days = [] for j in range(1, _days + 1): cell_date = "%s-%.2d-%.2d 01:00" % (_year, _month, j) cell_time = datetime.strptime(cell_date, "%Y-%m-%d %H:%M") schedule = Schedule.current_schedule(host=host, date=cell_time).first() if schedule: chosen_color = schedule.cloud.name[5:] else: non_allocated_count += 1 chosen_color = "01" _day = { "day": j, "chosen_color": chosen_color, "emoji": "&#%s;" % emojis[int(chosen_color) - 1], "color": colors[int(chosen_color) - 1], "cell_date": cell_date, "cell_time": cell_time, } if schedule: cloud = (CloudHistory.objects( __raw__={ "_id": { "$lt": schedule.id }, "name": schedule.cloud.name, }).order_by("-_id").first()) _day["display_description"] = cloud.description _day["display_owner"] = cloud.owner _day["display_ticket"] = cloud.ticket __days.append(_day) line["days"] = __days lines.append(line) total_hosts = len(hosts) total_use = Schedule.current_schedule().count() utilization = 100 - (non_allocated_count * 100 // (_days * total_hosts)) utilization_daily = total_use * 100 // total_hosts with open(os.path.join(TEMPLATES_PATH, "simple_table_emoji")) as _file: template = Template(_file.read()) content = template.render( gentime=_gentime, _days=_days, lines=lines, utilization=utilization, utilization_daily=utilization_daily, total_use=total_use, total_hosts=total_hosts, ) return content