Esempio n. 1
0
async def main(_loop):
    no_extend_label = "CANNOT_EXTEND"
    extend_label = "CAN_EXTEND"

    try:
        jira = Jira(
            Config["jira_url"],
            loop=_loop,
        )
    except JiraException as ex:
        logger.error(ex)
        return 1

    tickets = await jira.get_pending_tickets()
    for ticket in tickets["issues"]:
        ticket_key = ticket.get("key").split("-")[-1]
        fields = ticket.get("fields")
        if fields:
            description = fields.get("description")
            try:
                cloud_field = description.split("\n")[1]
                cloud = cloud_field.split()[-1]
            except IndexError:
                logger.warning(
                    f"Could not retrieve cloud name from ticket {ticket_key}")

            cloud_obj = Cloud.objects(name=cloud).first()
            schedules = Schedule.current_schedule(cloud=cloud_obj)
            conflict = False
            for schedule in schedules:
                end_date = schedule.end + timedelta(weeks=2)
                available = Schedule.is_host_available(host=schedule.host.name,
                                                       start=schedule.end,
                                                       end=end_date)
                if not available:
                    conflict = True
                    await jira.add_label(ticket_key, no_extend_label)
                    logger.info(f"{cloud} labeled {no_extend_label}")
                    break

            if not conflict:
                await jira.add_label(ticket_key, extend_label)
                logger.info(f"{cloud} labeled {extend_label}")

            parent = fields.get("parent")
            if parent:
                p_ticket_key = parent.get("key").split("-")[-1]
                watchers = await jira.get_watchers(p_ticket_key)
                for watcher in watchers["watchers"]:
                    await jira.add_watcher(ticket_key, watcher["key"])
    return 0
Esempio n. 2
0
def process_scheduled(_logger, month, now):
    _date = now
    if month > 0:
        _date = month_delta_past(now, month)
    start = first_day_month(_date)
    start_id = date_to_object_id(start)
    end = last_day_month(_date)
    end_id = date_to_object_id(end)
    scheduled = CloudHistory.objects(__raw__={
        "_id": {
            "$lt": end_id,
            "$gt": start_id,
        },
    }).order_by("-_id").count()
    hosts = Host.objects(__raw__={
        "_id": {
            "$lt": start_id,
        },
    }).count()
    days = 0
    scheduled_count = 0
    utilization = 0
    for date in date_span(start, end):
        days += 1
        scheduled_count += Schedule.current_schedule(date=date).count()
    if hosts and days:
        utilization = scheduled_count * 100 // (days * hosts)
    f_month = f"{start.month:02}"
    _logger.info(f"{start.year}-{f_month:<3}| "
                 f"{scheduled:>9}| "
                 f"{hosts:>8}| "
                 f"{utilization:>10}%| ")
Esempio n. 3
0
def env_allocation_time_exceeded(_cloud):
    now = datetime.now()
    schedule = Schedule.objects(cloud=_cloud, start__lt=now).first()
    time_delta = now - schedule.start
    if time_delta.seconds > TOLERANCE:
        return True
    return False
Esempio n. 4
0
 def __init__(self, cloud):
     self.cloud = cloud
     self.report = ""
     self.hosts = Host.objects(cloud=self.cloud, validated=False)
     self.hosts = [
         host for host in self.hosts if Schedule.current_schedule(host=host)
     ]
Esempio n. 5
0
def add_row(host):
    lines = []
    short_host = host.name.split(".")[0]

    _schedule_obj = Schedule.current_schedule(host=host).first()

    if not _schedule_obj:
        _date_start = "∞"
        _date_end = "∞"
        total_time = "∞"
        total_time_left = "∞"
    else:
        _date_now = datetime.now()
        _date_start = _schedule_obj.start
        _date_end = _schedule_obj.end
        total_sec_left = (_date_end - _date_now).total_seconds()
        total_days = (_date_end - _date_start).days
        total_days_left = total_sec_left // 86400
        total_hours_left = ((total_sec_left / 86400) - total_days_left) * 24
        total_time = "%0d day(s)" % total_days
        total_time_left = "%0d day(s)" % total_days_left
        if total_hours_left > 1:
            total_time_left = "%s, %0d hour(s)" % (total_time_left, total_hours_left)
        _date_start = _date_start.strftime("%Y-%m-%d")
        _date_end = _date_end.end.strftime("%Y-%m-%d")
    _columns = [
        short_host,
        "<a href=http://mgmt-%s/ target=_blank>console</a>" % host.name,
        _date_start,
        _date_end,
        total_time,
        total_time_left,
    ]
    lines.append("| %s |\n" % " | ".join(_columns))
    return lines
Esempio n. 6
0
def generator(_host_file, _days, _month, _year, _gentime):
    if _host_file:
        with open(_host_file, 'r') as f:
            reader = csv.reader(f)
            hosts = list(reader)
    else:
        hosts = sorted(Host.objects(), key=lambda x: x.name)

    lines = []
    __days = []
    non_allocated_count = 0
    for i, host in enumerate(hosts):
        line = {"hostname": host.name}
        __days = []
        for j in range(1, _days + 1):
            cell_date = "%s-%.2d-%.2d 01:00" % (_year, _month, j)
            cell_time = datetime.strptime(cell_date, '%Y-%m-%d %H:%M')
            schedule = Schedule.current_schedule(host=host,
                                                 date=cell_time).first()
            if schedule:
                chosen_color = schedule.cloud.name[5:]
            else:
                non_allocated_count += 1
                chosen_color = "01"
            _day = {
                "day": j,
                "chosen_color": chosen_color,
                "color": conf["visual_colors"]["cloud%s" % chosen_color],
                "cell_date": cell_date,
                "cell_time": cell_time
            }

            if schedule:
                cloud = CloudHistory.objects(__raw__={
                    "_id": {
                        "$lt": schedule.id
                    },
                    "name": schedule.cloud.name
                }).order_by("-_id").first()
                _day["display_description"] = cloud.description
                _day["display_owner"] = cloud.owner
                _day["display_ticket"] = cloud.ticket
            __days.append(_day)

        line["days"] = __days
        lines.append(line)

    utilization = 100 - (non_allocated_count * 100 // (_days * len(hosts)))
    with open(os.path.join(TEMPLATES_PATH, "simple_table")) as _file:
        template = Template(_file.read())
    content = template.render(
        gentime=_gentime,
        _days=_days,
        lines=lines,
        utilization=utilization,
    )

    return content
Esempio n. 7
0
 def __init__(self, cloud, _args, _loop=None):
     self.cloud = cloud
     self.report = ""
     self.args = _args
     self.hosts = Host.objects(cloud=self.cloud, validated=False)
     self.hosts = [
         host for host in self.hosts if Schedule.current_schedule(host=host)
     ]
     self.loop = _loop if _loop else get_running_loop()
Esempio n. 8
0
def main(_args, _loop):
    clouds = Cloud.objects(validated=False, provisioned=True, name__ne="cloud01")
    for _cloud in clouds:
        _schedule_count = Schedule.current_schedule(cloud=_cloud).count()
        if _schedule_count and _cloud.wipe:
            validator = Validator(_cloud, _args, _loop=_loop)
            try:
                _loop.run_until_complete(validator.validate_env())
            except Exception as ex:
                logger.debug(ex)
                logger.info("Failed validation for %s" % _cloud.name)
Esempio n. 9
0
 def env_allocation_time_exceeded(self):
     now = datetime.now()
     schedule = Schedule.objects(
         cloud=self.cloud, start__lt=now, end__gt=now
     ).first()
     time_delta = now - schedule.start
     if time_delta.seconds // 60 > Config["validation_grace_period"]:
         return True
     logger.warning(
         "You're still within the configurable validation grace period. Skipping validation for %s."
         % self.cloud.name
     )
     return False
Esempio n. 10
0
def available(search):
    models = search.data["model"]

    if models:
        query = None
        for model in models:
            if query:
                query = query | Q(model=model.upper())
            else:
                query = Q(model=model.upper())

        hosts = Host.objects.filter(query)
    else:
        hosts = Host.objects().all()

    available_hosts = []
    start = datetime.combine(search.data["start"], time(hour=22))
    end = datetime.combine(search.data["end"], time(hour=22))

    if hosts:
        for host in hosts:
            if Schedule.is_host_available(host=host["name"],
                                          start=start,
                                          end=end):
                current = False
                if Schedule.current_schedule(host=host):
                    current = True
                host_dict = {
                    "name": host.name,
                    "cloud": host.cloud.name,
                    "model": host.model,
                    "current": current
                }
                available_hosts.append(host_dict)

    return jsonify(available_hosts)
Esempio n. 11
0
def generator(_host_file, _days, _month, _year, _gentime):
    if _host_file:
        with open(_host_file, 'r') as f:
            reader = csv.reader(f)
            data = list(reader)
    else:
        data = Host.objects()

    lines = []
    __days = []
    for i, host in enumerate(data):
        line = {"hostname": host.name}
        __days = []
        for j in range(1, _days + 1):
            cell_date = "%s-%.2d-%.2d 01:00" % (_year, _month, j)
            cell_time = datetime.strptime(cell_date, '%Y-%m-%d %H:%M')
            schedule = Schedule.current_schedule(host=host,
                                                 date=cell_time).first()
            if schedule:
                chosen_color = schedule.cloud.name[5:]
            else:
                chosen_color = "01"
            _day = {
                "day": j,
                "chosen_color": chosen_color,
                "color": conf["visual_colors"]["cloud%s" % chosen_color],
                "cell_date": cell_date,
                "cell_time": cell_time
            }

            if schedule:
                _day["display_description"] = schedule.cloud.description
                _day["display_owner"] = schedule.cloud.owner
                _day["display_ticket"] = schedule.cloud.ticket
            __days.append(_day)

        line["days"] = __days
        lines.append(line)

    with open(os.path.join(TEMPLATES_PATH, "simple_table")) as _file:
        template = Template(_file.read())
    content = template.render(
        gentime=_gentime,
        _days=_days,
        lines=lines,
    )

    return content
Esempio n. 12
0
def report_detailed(_logger, _start, _end):
    start = _start.replace(hour=21, minute=59, second=0)
    start_defer = start - timedelta(weeks=1)
    start_defer_id = date_to_object_id(start_defer)
    end = _end.replace(hour=22, minute=1, second=0)
    end_id = date_to_object_id(end)
    cloud_history = CloudHistory.objects(__raw__={
        "_id": {
            "$lt": end_id,
            "$gt": start_defer_id,
        },
    }).order_by("-_id")

    headers = [
        "Owner",
        "Ticket",
        "Cloud",
        "Description",
        "Systems",
        "Scheduled",
        "Duration",
    ]
    _logger.info(f"{headers[0]:<9}| "
                 f"{headers[1]:>9}| "
                 f"{headers[2]:>8}| "
                 f"{headers[3]:>10}| "
                 f"{headers[4]:>5}| "
                 f"{headers[5]:>10}| "
                 f"{headers[6]:>5}| ")

    for cloud in cloud_history:
        cloud_ref = Cloud.objects(name=cloud.name).first()
        schedule = Schedule.objects(
            Q(end__lt=end) & Q(start__gt=start)
            & Q(cloud=cloud_ref)).order_by("-_id")
        if schedule:
            delta = schedule[0].end - schedule[0].start
            description = cloud.description[:len(headers[3])]
            _logger.info(f"{cloud.owner:<9}| "
                         f"{cloud.ticket:>9}| "
                         f"{cloud.name:>8}| "
                         f"{description:>11}| "
                         f"{schedule.count():>7}| "
                         f"{str(schedule[0].start)[:10]:>9}| "
                         f"{delta.days:>8}| ")
Esempio n. 13
0
def available(search):
    models = search.data['model']

    if models:
        query = None
        for model in models:
            if query:
                query = query | Q(model=model.upper())
            else:
                query = Q(model=model.upper())

        hosts = Host.objects.filter(query)
    else:
        hosts = Host.objects().all()

    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    foreman = Foreman(
        conf["foreman_api_url"],
        conf["foreman_username"],
        conf["foreman_password"],
        loop=loop,
    )
    broken_hosts = loop.run_until_complete(foreman.get_broken_hosts())

    available_hosts = []
    start = datetime.combine(search.data['start'], time.min)
    end = datetime.combine(search.data['end'], time.min)

    if hosts:
        for host in hosts:
            if Schedule.is_host_available(
                    host=host["name"], start=start,
                    end=end) and not broken_hosts.get(host["name"], False):
                host_dict = {"name": host.name, "model": host.model}
                available_hosts.append(host_dict)

    return jsonify(available_hosts)
Esempio n. 14
0
def render_vlans(markdown):
    lines = []
    vlans = Vlan.objects().all()
    for vlan in vlans:
        cloud_obj = Cloud.objects(vlan=vlan).first()
        vlan_id = vlan.vlan_id
        ip_range = vlan.ip_range
        netmask = vlan.netmask
        gateway = vlan.gateway
        ip_free = vlan.ip_free
        cloud_current_count = Schedule.current_schedule(
            cloud=cloud_obj).count()
        if cloud_obj and cloud_current_count > 0:
            owner = cloud_obj.owner
            ticket = cloud_obj.ticket
            cloud_name = cloud_obj.name
        else:
            owner = "nobody"
            ticket = ""
            cloud_name = ""

        columns = [
            vlan_id,
            ip_range.strip(","),
            netmask,
            gateway,
            ip_free,
            owner,
            ticket,
            cloud_name,
        ]

        lines.append(columns)

    for line in sorted(lines, key=lambda _line: _line[1]):
        entry = "| %s |\n" % " | ".join([str(col) for col in line])
        markdown.write(entry)
Esempio n. 15
0
    def GET(self, **data):
        args = {}
        _cloud = None
        _host = None
        if "cloudonly" in data:
            _cloud = Cloud.objects(cloud=data["cloudonly"])
            if not _cloud:
                cherrypy.response.status = "404 Not Found"
                return json.dumps(
                    {"result": "Cloud %s Not Found" % data["cloudonly"]})
            else:
                return _cloud.to_json()

        if self.name == "host":
            if "id" in data:
                _host = Host.objects(id=data["id"]).first()
            elif "name" in data:
                _host = Host.objects(name=data["name"]).first()
            elif "cloud" in data:
                _cloud = Cloud.objects(name=data["cloud"]).first()
                _host = Host.objects(cloud=_cloud)
            else:
                _host = Host.objects()
            if not _host:
                return json.dumps({"result": ["Nothing to do."]})
            return _host.to_json()

        if self.name == "ccuser":
            _clouds = self.model.objects().all()
            clouds_summary = []
            for cloud in _clouds:
                count = Schedule.current_schedule(cloud=cloud).count()
                clouds_summary.append({
                    "name": cloud.name,
                    "count": count,
                    "description": cloud.description,
                    "owner": cloud.owner,
                    "ticket": cloud.ticket,
                    "ccuser": cloud.ccuser,
                    "provisioned": cloud.provisioned,
                })

            return json.dumps(clouds_summary)

        if self.name == "cloud":
            if "id" in data:
                _cloud = Cloud.objects(id=data["id"]).first()
            elif "name" in data:
                _cloud = Cloud.objects(name=data["name"]).first()
            elif "owner" in data:
                _cloud = Cloud.to_json(owner=data["owner"]).first()
            if _cloud:
                return _cloud.to_json()

        if self.name == "available":

            _start = _end = datetime.datetime.now()
            if "start" in data:
                _start = datetime.datetime.strptime(data["start"],
                                                    "%Y-%m-%dT%H:%M:%S")

            if "end" in data:
                _end = datetime.datetime.strptime(data["end"],
                                                  "%Y-%m-%dT%H:%M:%S")

            available = []
            all_hosts = Host.objects().all()

            for host in all_hosts:
                if Schedule.is_host_available(host=host["name"],
                                              start=_start,
                                              end=_end):
                    available.append(host.name)
            return json.dumps(available)

        if self.name == "summary":
            _clouds = Cloud.objects().all()
            clouds_summary = []
            total_count = 0
            for cloud in _clouds:
                if cloud.name == "cloud01":
                    count = Host.objects(cloud=cloud,
                                         retired=False,
                                         broken=False).count()
                else:
                    date = datetime.datetime.now()
                    if "date" in data:
                        date = datetime.datetime.strptime(
                            data["date"], "%Y-%m-%dT%H:%M:%S")
                    count = self.model.current_schedule(cloud=cloud,
                                                        date=date).count()
                    total_count += count
                clouds_summary.append({
                    "name": cloud.name,
                    "count": count,
                    "description": cloud.description,
                    "owner": cloud.owner,
                    "ticket": cloud.ticket,
                    "ccuser": cloud.ccuser,
                    "provisioned": cloud.provisioned,
                    "validated": cloud.validated,
                })
            if "date" in data:
                host_count = Host.objects(retired=False, broken=False).count()
                for cloud in clouds_summary:
                    if cloud["name"] == "cloud01":
                        cloud["count"] = host_count - total_count

            return json.dumps(clouds_summary)

        if self.name == "qinq":
            _clouds = Cloud.objects().all()
            clouds_qinq = []
            for cloud in _clouds:
                _type = "Isolated"
                if cloud.qinq == 1:
                    _type = "Combined"
                qinq_value = f"{cloud.qinq} ({_type})"
                clouds_qinq.append({"name": cloud.name, "qinq": qinq_value})

            return json.dumps(clouds_qinq)

        if self.name == "broken":
            _hosts = self.model.objects(broken=True)
            broken = []
            for host in _hosts:
                broken.append(host.name)

            return json.dumps(broken)

        if self.name == "retired":
            hosts = [host.name for host in self.model.objects(retired=True)]
            return json.dumps(hosts)

        objs = self.model.objects(**args)
        if objs:
            return objs.to_json()
        else:
            return json.dumps({"result": ["No results."]})
Esempio n. 16
0
    def POST(self, **data):
        # make sure post data passed in is ready to pass to mongo engine
        result, data = Schedule.prep_data(data)

        _start = None
        _end = None

        if "start" in data:
            _start = datetime.datetime.strptime(data["start"],
                                                "%Y-%m-%d %H:%M")

        if "end" in data:
            _end = datetime.datetime.strptime(data["end"], "%Y-%m-%d %H:%M")

        _host = data["host"]
        _host_obj = Host.objects(name=_host).first()

        broken_hosts = Host.objects(broken=True)
        if _host_obj in broken_hosts:
            result.append(f"Host {_host_obj.name} is in broken state")

        # Check if there were data validation errors
        if result:
            result = ["Data validation failed: %s" % ", ".join(result)]
            cherrypy.response.status = "400 Bad Request"
            return json.dumps({"result": result})

        cloud_obj = None
        if "cloud" in data:
            cloud_obj = Cloud.objects(name=data["cloud"]).first()
            if not cloud_obj:
                result.append("Provided cloud does not exist")
                cherrypy.response.status = "400 Bad Request"
                return json.dumps({"result": result})

        if "index" in data:
            data["host"] = _host_obj
            schedule = self.model.objects(index=data["index"],
                                          host=data["host"]).first()
            if schedule:
                if not _start:
                    _start = schedule.start
                if not _end:
                    _end = schedule.end
                if not cloud_obj:
                    cloud_obj = schedule.cloud
                if Schedule.is_host_available(host=_host,
                                              start=_start,
                                              end=_end,
                                              exclude=schedule.index):
                    data["cloud"] = cloud_obj
                    notification_obj = Notification.objects(
                        cloud=cloud_obj, ticket=cloud_obj.ticket).first()
                    if notification_obj:
                        notification_obj.update(
                            one_day=False,
                            three_days=False,
                            five_days=False,
                            seven_days=False,
                        )
                    schedule.update(**data)
                    result.append("Updated %s %s" %
                                  (self.name, schedule.index))
                else:
                    result.append(
                        "Host is not available during that time frame")
        else:
            try:
                if Schedule.is_host_available(host=_host,
                                              start=_start,
                                              end=_end):

                    if (self.model.current_schedule(cloud=cloud_obj)
                            and cloud_obj.validated):
                        if not cloud_obj.wipe:
                            _host_obj.update(validated=True)
                        notification_obj = Notification.objects(
                            cloud=cloud_obj, ticket=cloud_obj.ticket).first()
                        if notification_obj:
                            notification_obj.update(success=False)

                    schedule = Schedule()
                    data["cloud"] = cloud_obj
                    schedule.insert_schedule(**data)
                    cherrypy.response.status = "201 Resource Created"
                    result.append("Added schedule for %s on %s" %
                                  (data["host"], cloud_obj.name))
                else:
                    result.append(
                        "Host is not available during that time frame")

            except Exception as e:
                # TODO: make sure when this is thrown the output
                #       points back to here and gives the end user
                #       enough information to fix the issue
                cherrypy.response.status = "500 Internal Server Error"
                result.append("Error: %s" % e)
        return json.dumps({"result": result})
Esempio n. 17
0
    def POST(self, **data):
        # handle force

        force = data.get("force", False) == "True"
        if "force" in data:
            del data["force"]

        # make sure post data passed in is ready to pass to mongo engine
        result, obj_data = self.model.prep_data(data)

        # Check if there were data validation errors
        if result:
            result = ["Data validation failed: %s" % ", ".join(result)]
            cherrypy.response.status = "400 Bad Request"
        else:
            # check if object already exists
            obj_name = data["name"]
            obj = self._get_obj(obj_name)
            if obj and not force:
                result.append("%s %s already exists." %
                              (self.name.capitalize(), obj_name))
                cherrypy.response.status = "409 Conflict"
            else:
                # Create/update Operation
                try:
                    # if force and found object do an update
                    if force and obj:
                        schedule_count = 0
                        if self.name == "cloud":
                            if obj.last_redefined:
                                cloud_reservation_lock = int(
                                    conf["cloud_reservation_lock"])
                                lock_release = obj.last_redefined + datetime.timedelta(
                                    hours=cloud_reservation_lock)
                                if lock_release > datetime.datetime.now():
                                    time_left = lock_release - datetime.datetime.now(
                                    )
                                    hours = time_left.total_seconds() // 3600
                                    minutes = (time_left.total_seconds() %
                                               3600) // 60
                                    cloud_string = "%s still has %dhr %dmin remaining on a pre-schedule reservation lock" % (
                                        obj.name,
                                        hours,
                                        minutes,
                                    )
                                    result.append(cloud_string)
                                    cherrypy.response.status = "400 Bad Request"
                                    return json.dumps({"result": result})

                            schedule_count = Schedule.objects(
                                cloud=obj,
                                start__gte=datetime.datetime.now()).count()
                            notification_obj = Notification.objects(
                                cloud=obj, ticket=data["ticket"]).first()
                            if not notification_obj:
                                Notification(cloud=obj,
                                             ticket=data["ticket"]).save()

                            copy_data = data.copy()
                            history_result, history_data = CloudHistory.prep_data(
                                copy_data)
                            if history_result:
                                result.append("Data validation failed: %s" %
                                              ", ".join(history_result))
                                cherrypy.response.status = "400 Bad Request"
                            else:
                                CloudHistory(**history_data).save()

                            current_schedule = Schedule.current_schedule(
                                cloud=obj).count()
                            if current_schedule:
                                if data.get("wipe", False):
                                    if data["wipe"]:
                                        data.pop("wipe")

                        if schedule_count > 0:
                            result.append(
                                "Can't redefine cloud due to future use.")
                            cherrypy.response.status = "400 Bad Request"
                        else:
                            obj.update(**obj_data)
                            result.append("Updated %s %s" %
                                          (self.name, obj_name))
                    # otherwise create it
                    else:
                        self.model(**obj_data).save()
                        obj = self._get_obj(obj_name)
                        if self.name == "cloud":
                            notification_obj = Notification.objects(
                                cloud=obj, ticket=data["ticket"]).first()
                            if not notification_obj:
                                Notification(cloud=obj,
                                             ticket=data["ticket"]).save()
                        cherrypy.response.status = "201 Resource Created"
                        result.append("Created %s %s" % (self.name, obj_name))
                except Exception as e:
                    # TODO: make sure when this is thrown the output
                    #       points back to here and gives the end user
                    #       enough information to fix the issue
                    cherrypy.response.status = "500 Internal Server Error"
                    result.append("Error: %s" % e)
        return json.dumps({"result": result})
Esempio n. 18
0
def main():
    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    future_days = 7

    _all_clouds = Cloud.objects()
    _active_clouds = [
        _cloud for _cloud in _all_clouds
        if Schedule.current_schedule(cloud=_cloud).count() > 0
    ]
    _validated_clouds = [
        _cloud for _cloud in _active_clouds if _cloud.validated
    ]

    if not os.path.exists(os.path.join(conf["data_dir"], "report")):
        Path(os.path.join(conf["data_dir"], "report")).mkdir(parents=True,
                                                             exist_ok=True)

    for cloud in _validated_clouds:
        notification_obj = Notification.objects(cloud=cloud,
                                                ticket=cloud.ticket).first()
        current_hosts = Schedule.current_schedule(cloud=cloud)
        cloud_info = "%s: %s (%s)" % (
            cloud.name,
            current_hosts.count(),
            cloud.description,
        )
        if not notification_obj.initial:
            logger.info("=============== Initial Message")
            loop.run_until_complete(
                create_initial_message(
                    cloud.owner,
                    cloud.name,
                    cloud_info,
                    cloud.ticket,
                    cloud.ccuser,
                ))
            notification_obj.update(initial=True)

        for day in Days:
            future = datetime.now() + timedelta(days=day.value)
            future_date = "%4d-%.2d-%.2d 22:00" % (
                future.year,
                future.month,
                future.day,
            )
            future_hosts = Schedule.current_schedule(cloud=cloud,
                                                     date=future_date)

            diff = set(current_hosts) - set(future_hosts)
            if diff and future > current_hosts[0].end:
                if not notification_obj[
                        day.name.lower()] and conf["email_notify"]:
                    logger.info("=============== Additional Message")
                    host_list = [schedule.host.name for schedule in diff]
                    create_message(
                        cloud,
                        day.value,
                        cloud_info,
                        host_list,
                    )
                    kwargs = {day.name.lower(): True}
                    notification_obj.update(**kwargs)
                    break

    for cloud in _all_clouds:
        notification_obj = Notification.objects(cloud=cloud,
                                                ticket=cloud.ticket).first()
        if cloud.name != "cloud01" and cloud.owner not in ["quads", None]:
            current_hosts = Schedule.current_schedule(cloud=cloud)
            cloud_info = "%s: %s (%s)" % (
                cloud.name,
                current_hosts.count(),
                cloud.description,
            )

            if not notification_obj.pre_initial and conf["email_notify"]:
                logger.info("=============== Future Initial Message")
                create_future_initial_message(
                    cloud,
                    cloud_info,
                )
                notification_obj.update(pre_initial=True)

            for day in range(1, future_days + 1):
                if not notification_obj.pre and cloud.validated:
                    future = datetime.now() + timedelta(days=day)
                    future_date = "%4d-%.2d-%.2d 22:00" % (
                        future.year,
                        future.month,
                        future.day,
                    )
                    future_hosts = Schedule.current_schedule(cloud=cloud,
                                                             date=future_date)

                    if future_hosts.count() > 0:
                        diff = set(current_hosts) - set(future_hosts)
                        host_list = [schedule.host.name for schedule in diff]
                        if diff:
                            logger.info("=============== Additional Message")
                            create_future_message(
                                cloud,
                                day,
                                cloud_info,
                                host_list,
                            )
                            notification_obj.update(pre=True)
                            break
Esempio n. 19
0
        return


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Validate Quads assignments")
    parser.add_argument(
        "--debug",
        action="store_true",
        default=False,
        help="Show debugging information.",
    )
    args = parser.parse_args()

    level = logging.INFO
    if args.debug:
        level = logging.DEBUG

    logging.basicConfig(level=level, format="%(message)s")

    clouds = Cloud.objects(validated=False, provisioned=True, name__ne="cloud01")
    for _cloud in clouds:
        _schedule_count = Schedule.current_schedule(cloud=_cloud).count()
        if _schedule_count and _cloud.wipe:
            validator = Validator(_cloud)
            try:
                validator.validate_env()
            except Exception as ex:
                logger.debug(ex)
                logger.info("Failed validation for %s" % _cloud.name)
Esempio n. 20
0
def generator(_host_file, _days, _month, _year, _gentime):
    if _host_file:
        with open(_host_file, "r") as f:
            reader = csv.reader(f)
            hosts = list(reader)
    else:
        hosts = sorted(Host.objects(retired=False, broken=False),
                       key=lambda x: x.name)

    lines = []
    __days = []
    non_allocated_count = 0
    all_samples = []
    all_samples.extend(range(129296, 129510))
    all_samples.extend(range(128000, 128252))
    samples = random.sample(all_samples, 100)
    exclude = [129401, 129484]
    emojis = [emoji for emoji in samples if emoji not in exclude]
    colors = [random_color() for _ in range(100)]
    colors[0] = "#A9A9A9"
    for i, host in enumerate(hosts):
        line = {"hostname": host.name}
        __days = []
        for j in range(1, _days + 1):
            cell_date = "%s-%.2d-%.2d 01:00" % (_year, _month, j)
            cell_time = datetime.strptime(cell_date, "%Y-%m-%d %H:%M")
            schedule = Schedule.current_schedule(host=host,
                                                 date=cell_time).first()
            if schedule:
                chosen_color = schedule.cloud.name[5:]
            else:
                non_allocated_count += 1
                chosen_color = "01"
            _day = {
                "day": j,
                "chosen_color": chosen_color,
                "emoji": "&#%s;" % emojis[int(chosen_color) - 1],
                "color": colors[int(chosen_color) - 1],
                "cell_date": cell_date,
                "cell_time": cell_time,
            }

            if schedule:
                cloud = (CloudHistory.objects(
                    __raw__={
                        "_id": {
                            "$lt": schedule.id
                        },
                        "name": schedule.cloud.name,
                    }).order_by("-_id").first())
                _day["display_description"] = cloud.description
                _day["display_owner"] = cloud.owner
                _day["display_ticket"] = cloud.ticket
            __days.append(_day)

        line["days"] = __days
        lines.append(line)

    total_hosts = len(hosts)
    total_use = Schedule.current_schedule().count()
    utilization = 100 - (non_allocated_count * 100 // (_days * total_hosts))
    utilization_daily = total_use * 100 // total_hosts
    with open(os.path.join(TEMPLATES_PATH, "simple_table_emoji")) as _file:
        template = Template(_file.read())
    content = template.render(
        gentime=_gentime,
        _days=_days,
        lines=lines,
        utilization=utilization,
        utilization_daily=utilization_daily,
        total_use=total_use,
        total_hosts=total_hosts,
    )

    return content
Esempio n. 21
0
def report_available(_logger, _start, _end):
    start = _start.replace(hour=22, minute=0, second=0)
    end = _end.replace(hour=22, minute=0, second=0)
    next_sunday = start + timedelta(days=(6 - start.weekday()))

    hosts = Host.objects()

    _logger.info(f"QUADS report for {start.date()} to {end.date()}:")

    days = 0
    total_allocated_month = 0
    total_hosts = len(hosts)
    for _date in date_span(start, end):
        total_allocated_month += Schedule.current_schedule(date=_date).count()
        days += 1
    utilized = total_allocated_month * 100 // (total_hosts * days)
    _logger.info(f"Percentage Utilized: {utilized}%")

    schedules = Schedule.objects(build_start__ne=None, build_end__ne=None)
    total = timedelta()
    for schedule in schedules:
        total += schedule.build_end - schedule.build_start
    if schedules:
        average_build = total / len(schedules)
        _logger.info(f"Average build delta: {average_build}")

    hosts_summary = {}
    for host in hosts:
        host_type = host.name.split(".")[0].split("-")[-1]
        if not hosts_summary.get(host_type):
            hosts_summary[host_type] = []
        hosts_summary[host_type].append(host)

    headers = [
        "Server Type", "Total", "Free", "Scheduled", "2 weeks", "4 weeks"
    ]
    _logger.info(f"{headers[0]:<12}| "
                 f"{headers[1]:>5}| "
                 f"{headers[2]:>5}| "
                 f"{headers[3]:>9}| "
                 f"{headers[4]:>7}| "
                 f"{headers[5]:>7}")
    for host_type, _hosts in hosts_summary.items():
        scheduled_count = 0
        two_weeks_availability_count = 0
        four_weeks_availability_count = 0
        for host in _hosts:
            schedule = Schedule.current_schedule(host=host)
            if schedule:
                scheduled_count += 1

            two_weeks_availability = Schedule.is_host_available(
                host=host.name,
                start=next_sunday,
                end=next_sunday + timedelta(weeks=2))
            if two_weeks_availability:
                two_weeks_availability_count += 1

            four_weeks_availability = Schedule.is_host_available(
                host=host.name,
                start=next_sunday,
                end=next_sunday + timedelta(weeks=4))
            if four_weeks_availability:
                four_weeks_availability_count += 1

        free = len(_hosts) - scheduled_count
        schedule_percent = scheduled_count * 100 // len(_hosts)
        _logger.info(f"{host_type:<12}| "
                     f"{len(_hosts):>5}| "
                     f"{free:>5}| "
                     f"{schedule_percent:>8}%| "
                     f"{two_weeks_availability_count:>7}| "
                     f"{four_weeks_availability_count:>7}")
Esempio n. 22
0
async def move_and_rebuild(host,
                           new_cloud,
                           semaphore,
                           rebuild=False,
                           loop=None):
    build_start = datetime.now()
    logger.debug("Moving and rebuilding host: %s" % host)

    untouchable_hosts = Config["untouchable_hosts"]
    logger.debug("Untouchable hosts: %s" % untouchable_hosts)
    _host_obj = Host.objects(name=host).first()

    if host in untouchable_hosts:
        logger.error("No way...")
        return False

    _target_cloud = Cloud.objects(name=new_cloud).first()

    ipmi_new_pass = (f"{Config['infra_location']}@{_target_cloud.ticket}"
                     if _target_cloud.ticket else Config["ipmi_password"])

    ipmi_set_pass = [
        "user",
        "set",
        "password",
        str(Config["ipmi_cloud_username_id"]),
        ipmi_new_pass,
    ]

    new_semaphore = asyncio.Semaphore(20)
    await execute_ipmi(host, arguments=ipmi_set_pass, semaphore=new_semaphore)

    ipmi_set_operator = [
        "user", "priv",
        str(Config["ipmi_cloud_username_id"]), "0x4"
    ]
    await execute_ipmi(host,
                       arguments=ipmi_set_operator,
                       semaphore=new_semaphore)

    badfish = None
    if rebuild and _target_cloud.name != _host_obj.default_cloud.name:
        if Config.pdu_management:
            # TODO: pdu management
            pass

        try:
            badfish = await badfish_factory(
                "mgmt-%s" % host,
                Config["ipmi_username"],
                Config["ipmi_password"],
                propagate=True,
            )
        except BadfishException:
            logger.error(
                f"Could not initialize Badfish. Verify ipmi credentials for mgmt-{host}."
            )
            return False

        if is_supported(host):
            try:
                interfaces_path = os.path.join(
                    os.path.dirname(__file__),
                    "../../conf/idrac_interfaces.yml")
                await badfish.change_boot("director", interfaces_path)

                # wait 10 minutes for the boot order job to complete
                await asyncio.sleep(600)
            except BadfishException:
                logger.error(
                    f"Could not set boot order via Badfish for mgmt-{host}.")
                return False

        try:
            await badfish.set_power_state("on")
        except BadfishException:
            logger.error(f"Failed to power on {host}")
            return False
        foreman_results = []
        params = [
            {
                "name": "operatingsystems",
                "value": Config["foreman_default_os"],
                "identifier": "title",
            },
            {
                "name": "ptables",
                "value": Config["foreman_default_ptable"]
            },
            {
                "name": "media",
                "value": Config["foreman_default_medium"]
            },
        ]

        foreman = Foreman(
            Config["foreman_api_url"],
            Config["foreman_username"],
            Config["foreman_password"],
            semaphore=semaphore,
            loop=loop,
        )

        set_result = await foreman.set_host_parameter(host, "overcloud",
                                                      "true")
        foreman_results.append(set_result)

        put_result = await foreman.put_parameter(host, "build", 1)
        foreman_results.append(put_result)

        put_param_result = await foreman.put_parameters_by_name(host, params)
        foreman_results.append(put_param_result)

        owner_id = await foreman.get_user_id(new_cloud)
        host_id = await foreman.get_host_id(host)
        put_result = await foreman.put_element("hosts", host_id, "owner_id",
                                               owner_id)
        foreman_results.append(put_result)

        for result in foreman_results:
            if isinstance(result, Exception) or not result:
                logger.error(
                    "There was something wrong setting Foreman host parameters."
                )
                return False
        if is_supported(host):
            try:
                await badfish.boot_to_type(
                    "foreman",
                    os.path.join(os.path.dirname(__file__),
                                 "../../conf/idrac_interfaces.yml"),
                )
                await badfish.reboot_server(graceful=False)
            except BadfishException:
                logger.error(f"Error setting PXE boot via Badfish on {host}.")
                await badfish.reboot_server(graceful=False)
                return False
        else:
            try:
                asyncio.run_coroutine_threadsafe(
                    badfish.unmount_virtual_media(),
                    loop,
                )
            except BadfishException:
                logger.warning(
                    f"Could not unmount virtual media for mgmt-{host}.")

            try:
                ipmi_pxe_persistent = [
                    "chassis",
                    "bootdev",
                    "pxe",
                    "options=persistent",
                ]
                await execute_ipmi(host,
                                   arguments=ipmi_pxe_persistent,
                                   semaphore=new_semaphore)
                await ipmi_reset(host, new_semaphore)
            except Exception as ex:
                logger.debug(ex)
                logger.error(
                    f"There was something wrong setting PXE flag or resetting IPMI on {host}."
                )

    if _target_cloud.name == _host_obj.default_cloud.name:
        if not badfish:
            try:
                badfish = await badfish_factory(
                    "mgmt-%s" % host,
                    Config["ipmi_username"],
                    Config["ipmi_password"],
                    propagate=True,
                )
            except BadfishException:
                logger.error(
                    f"Could not initialize Badfish. Verify ipmi credentials for mgmt-{host}."
                )
                return False

        await badfish.set_power_state("off")
        source_cloud_schedule = Schedule.current_schedule(
            cloud=_host_obj.cloud.name)
        if not source_cloud_schedule:
            _old_cloud_obj = Cloud.objects(name=_host_obj.cloud.name).first()
            _old_cloud_obj.update(vlan=None)

    schedule = Schedule.current_schedule(cloud=_target_cloud,
                                         host=_host_obj).first()
    if schedule:
        schedule.update(build_start=build_start, build_end=datetime.now())
        schedule.save()

    logger.debug("Updating host: %s")
    _host_obj.update(cloud=_target_cloud,
                     build=False,
                     last_build=datetime.now(),
                     validated=False)
    return True
Esempio n. 23
0
def main():
    loop = asyncio.get_event_loop()

    foreman_admin = Foreman(
        conf["foreman_api_url"],
        conf["foreman_username"],
        conf["foreman_password"],
        loop=loop,
    )

    ignore = ["cloud01"]
    foreman_rbac_exclude = conf.get("foreman_rbac_exclude")
    if foreman_rbac_exclude:
        ignore.extend(foreman_rbac_exclude.split("|"))
    clouds = Cloud.objects()
    for cloud in clouds:

        infra_pass = f"{conf['infra_location']}@{cloud.ticket}"
        loop.run_until_complete(
            foreman_admin.update_user_password(cloud.name, infra_pass)
        )

        foreman_cloud_user = Foreman(
            conf["foreman_api_url"],
            cloud.name,
            infra_pass,
            loop=loop,
        )

        if cloud.name not in ignore:
            logger.info(f"Processing {cloud.name}")

            cloud_hosts = loop.run_until_complete(foreman_cloud_user.get_all_hosts())

            user_id = loop.run_until_complete(foreman_admin.get_user_id(cloud.name))
            admin_id = loop.run_until_complete(
                foreman_admin.get_user_id(conf["foreman_username"])
            )

            current_schedule = Schedule.current_schedule(cloud=cloud)
            if current_schedule:

                logger.info(f"  Current Host Permissions:")
                for host, properties in cloud_hosts.items():
                    logger.info(f"    {host}")

                    match = [
                        schedule.host.name
                        for schedule in current_schedule
                        if schedule.host.name == host
                    ]
                    if not match:
                        _host_id = loop.run_until_complete(
                            foreman_admin.get_host_id(host)
                        )
                        loop.run_until_complete(
                            foreman_admin.put_element(
                                "hosts", _host_id, "owner_id", admin_id
                            )
                        )
                        logger.info(f"* Removed permission {host}")

                for schedule in current_schedule:
                    match = [
                        host
                        for host, _ in cloud_hosts.items()
                        if host == schedule.host.name
                    ]
                    if not match:
                        # want to run these separately to avoid ServerDisconnect
                        _host_id = loop.run_until_complete(
                            foreman_admin.get_host_id(schedule.host.name)
                        )
                        loop.run_until_complete(
                            foreman_admin.put_element(
                                "hosts", _host_id, "owner_id", user_id
                            )
                        )
                        logger.info(f"* Added permission {schedule.host.name}")
            else:
                if cloud_hosts:
                    logger.info("  No active schedule, removing pre-existing roles.")
                    for host, properties in cloud_hosts.items():
                        _host_id = loop.run_until_complete(
                            foreman_admin.get_host_id(host)
                        )
                        loop.run_until_complete(
                            foreman_admin.put_element(
                                "hosts", _host_id, "owner_id", admin_id
                            )
                        )
                        logger.info(f"* Removed permission {host}")
                else:
                    logger.info("  No active schedule nor roles assigned.")
def print_summary():
    _summary = []
    _headers = [
        "**NAME**",
        "**SUMMARY**",
        "**OWNER**",
        "**REQUEST**",
        '<span id="status">**STATUS**</span>',
    ]
    if conf["openstack_management"]:
        _headers.append("**OSPENV**")
    if conf["openshift_management"]:
        _headers.append("**OCPINV**")
    if conf["gather_ansible_facts"]:
        _headers.append("**HWFACTS**")

    _summary.append("| %s |\n" % " | ".join(_headers))
    _summary.append("| %s |\n" %
                    " | ".join(["---" for _ in range(len(_headers))]))

    _cloud_response = requests.get(os.path.join(API_URL, "summary"))
    _cloud_summary = []
    if _cloud_response.status_code == 200:
        _cloud_summary = _cloud_response.json()

    for cloud in [cloud for cloud in _cloud_summary if cloud["count"] > 0]:
        cloud_name = cloud["name"]
        desc = "%s (%s)" % (cloud["count"], cloud["description"])
        owner = cloud["owner"]
        ticket = cloud["ticket"]
        link = "<a href=%s/%s-%s target=_blank>%s</a>" % (
            conf["ticket_url"],
            conf["ticket_queue"],
            ticket,
            ticket,
        )
        cloud_specific_tag = "%s_%s_%s" % (cloud_name, owner, ticket)

        style_tag_end = "</span>"
        if cloud["validated"] or cloud_name == "cloud01":
            style_tag_start = '<span style="color:green">'
            instack_link = os.path.join(conf["quads_url"], "cloud",
                                        "%s_instackenv.json" % cloud_name)
            instack_text = "download"
            ocpinv_link = os.path.join(conf["quads_url"], "cloud",
                                       "%s_ocpinventory.json" % cloud_name)
            ocpinv_text = "download"
            status = (
                '<span class="progress" style="margin-bottom:0px"><span role="progressbar" aria-valuenow="100" '
                'aria-valuemin="0" aria-valuemax="100" style="width:100%" class="progress-bar">100%</span></span> '
            )
        else:
            cloud_obj = Cloud.objects(name=cloud_name).first()
            scheduled_hosts = Schedule.current_schedule(
                cloud=cloud_obj).count()
            moved_hosts = Host.objects(cloud=cloud_obj).count()
            percent = moved_hosts / scheduled_hosts * 100
            style_tag_start = '<span style="color:red">'
            instack_link = "#"
            instack_text = "validating"
            ocpinv_link = "#"
            ocpinv_text = "validating"
            if percent < 15:
                classes = [
                    "progress-bar",
                    "progress-bar-striped",
                    "progress-bar-danger",
                    "active",
                ]
                status = (
                    '<span class="progress" style="margin-bottom:0px"><span role="progressbar" '
                    'aria-valuenow="100" aria-valuemin="0" aria-valuemax="100" style="width:100%%" '
                    'class="%s">%.0f%%</span></span>' %
                    (" ".join(classes), percent))
            else:
                classes = [
                    "progress-bar",
                    "progress-bar-striped",
                    "progress-bar-warning",
                    "active",
                ]
                status = (
                    '<span class="progress" style="margin-bottom:0px"><span role="progressbar" '
                    'aria-valuenow="%.0f" aria-valuemin="0" aria-valuemax="100" style="width:%.0f%%" '
                    'class="%s">%.0f%%</span></span>' %
                    (percent, percent, " ".join(classes), percent))

        _data = [
            "[%s%s%s](#%s)" %
            (style_tag_start, cloud_name, style_tag_end, cloud_name),
            desc,
            owner,
            link,
        ]

        if conf["gather_ansible_facts"]:
            factstyle_tag_end = "</span>"
            if os.path.exists(
                    os.path.join(
                        conf["ansible_facts_web_path"],
                        "ansible_facts",
                        "%s_overview.html" % cloud_specific_tag,
                    )):
                factstyle_tag_start = '<span style="color:green">'
                ansible_facts_link = os.path.join(
                    conf["quads_url"],
                    "ansible_facts",
                    "%s_overview.html" % cloud_specific_tag,
                )
            else:
                factstyle_tag_start = '<span style="color:red">'
                ansible_facts_link = os.path.join(conf["quads_url"],
                                                  "underconstruction")
            if cloud_name == "cloud01":
                _data.append("")
                _data.append("")
                _data.append(status)
                _data.append("")
            else:
                _data.append("<a href=%s target=_blank>%s%s%s</a>" %
                             (instack_link, style_tag_start, instack_text,
                              style_tag_end))
                _data.append(
                    "<a href=%s target=_blank>%s%s%s</a>" %
                    (ocpinv_link, style_tag_start, ocpinv_text, style_tag_end))
                _data.append(status)
                _data.append("<a href=%s target=_blank>%sinventory%s</a>" %
                             (ansible_facts_link, factstyle_tag_start,
                              factstyle_tag_end))
        else:
            _data.append(status)
            if cloud_name == "cloud01":
                if conf["openstack_management"]:
                    _data.append("")
                if conf["openshift_management"]:
                    _data.append("")
            else:
                if conf["openstack_management"]:
                    _data.append("<a href=%s target=_blank>%s%s%s</a>" %
                                 (instack_link, style_tag_start, instack_text,
                                  style_tag_end))
                if conf["openshift_management"]:
                    _data.append("<a href=%s target=_blank>%s%s%s</a>" %
                                 (ocpinv_link, style_tag_start, ocpinv_text,
                                  style_tag_end))

        _summary.append("| %s |\n" % " | ".join(_data))

    _hosts = Host.objects(broken=False, retired=False)
    _host_count = len(_hosts)
    _schedules = Schedule.current_schedule().count()
    _daily_percentage = _schedules * 100 // _host_count
    _summary.append(f"| Total | {_host_count} |\n")
    _summary.append("\n")
    _summary.append(f"Daily Utilization: {_daily_percentage}% \n")
    _summary.append("\n")
    _summary.append("[Unmanaged Hosts](#unmanaged)\n")
    _summary.append("\n")
    _summary.append("[Faulty Hosts](#faulty)\n")

    return _summary
Esempio n. 25
0
async def move_and_rebuild(host,
                           new_cloud,
                           semaphore,
                           rebuild=False,
                           loop=None):
    build_start = datetime.now()
    logger.debug("Moving and rebuilding host: %s" % host)

    untouchable_hosts = conf["untouchable_hosts"]
    logger.debug("Untouchable hosts: %s" % untouchable_hosts)
    _host_obj = Host.objects(name=host).first()

    if host in untouchable_hosts:
        logger.error("No way...")
        return False

    _new_cloud_obj = Cloud.objects(name=new_cloud).first()

    ipmi_new_pass = (f"{conf['infra_location']}@{_new_cloud_obj.ticket}"
                     if _new_cloud_obj.ticket else conf["ipmi_password"])

    ipmi_set_pass = [
        "user",
        "set",
        "password",
        str(conf["ipmi_cloud_username_id"]),
        ipmi_new_pass,
    ]

    new_semaphore = asyncio.Semaphore(20)
    await execute_ipmi(host, arguments=ipmi_set_pass, semaphore=new_semaphore)

    ipmi_set_operator = [
        "user", "priv",
        str(conf["ipmi_cloud_username_id"]), "0x4"
    ]
    await execute_ipmi(host,
                       arguments=ipmi_set_operator,
                       semaphore=new_semaphore)

    if rebuild and _new_cloud_obj.name != _host_obj.default_cloud.name:
        if "pdu_management" in conf and conf["pdu_management"]:
            # TODO: pdu management
            pass

        if is_supported(host):
            try:
                badfish = await badfish_factory(
                    "mgmt-%s" % host,
                    conf["ipmi_username"],
                    conf["ipmi_password"],
                    propagate=True,
                )
            except BadfishException:
                logger.error(
                    f"Could not initialize Badfish. Verify ipmi credentials for mgmt-{host}."
                )
                return False
            try:
                changed_boot_order = asyncio.run_coroutine_threadsafe(
                    badfish.change_boot(
                        "director",
                        os.path.join(os.path.dirname(__file__),
                                     "../../conf/idrac_interfaces.yml"),
                    ),
                    loop,
                )
                if changed_boot_order:
                    await badfish.reboot_server(graceful=False)
            except BadfishException:
                logger.error(
                    f"Could not set boot order via Badfish for mgmt-{host}.")
                return False

        foreman_results = []
        params = [
            {
                "name": "operatingsystems",
                "value": conf["foreman_default_os"],
                "identifier": "title",
            },
            {
                "name": "ptables",
                "value": conf["foreman_default_ptable"]
            },
            {
                "name": "media",
                "value": conf["foreman_default_medium"]
            },
        ]

        foreman = Foreman(
            conf["foreman_api_url"],
            conf["foreman_username"],
            conf["foreman_password"],
            semaphore=semaphore,
            loop=loop,
        )

        set_result = await foreman.set_host_parameter(host, "overcloud",
                                                      "true")
        foreman_results.append(set_result)

        put_result = await foreman.put_parameter(host, "build", 1)
        foreman_results.append(put_result)

        put_param_result = await foreman.put_parameters_by_name(host, params)
        foreman_results.append(put_param_result)

        owner_id = await foreman.get_user_id(new_cloud)
        host_id = await foreman.get_host_id(host)
        put_result = await foreman.put_element("hosts", host_id, "owner_id",
                                               owner_id)
        foreman_results.append(put_result)

        for result in foreman_results:
            if isinstance(result, Exception) or not result:
                logger.error(
                    "There was something wrong setting Foreman host parameters."
                )
                return False

        healthy = False
        for i in range(RETRIES):
            nc = Netcat(_host_obj.name)
            if nc.health_check():
                healthy = True
                nc.close()
                break
            nc.close()

        if not healthy:
            logger.error("Failed to recover host after changing boot order.")
            return False

        if is_supported(host):
            try:
                await badfish.boot_to_type(
                    "foreman",
                    os.path.join(os.path.dirname(__file__),
                                 "../../conf/idrac_interfaces.yml"),
                )
                await badfish.reboot_server(graceful=False)
            except BadfishException:
                logger.error(f"Error setting PXE boot via Badfish on {host}.")
                await badfish.reboot_server(graceful=False)
                return False
        else:
            try:
                ipmi_pxe_persistent = [
                    "chassis",
                    "bootdev",
                    "pxe",
                    "options",
                    "=",
                    "persistent",
                ]
                await execute_ipmi(host,
                                   arguments=ipmi_pxe_persistent,
                                   semaphore=new_semaphore)
                await ipmi_reset(host, new_semaphore)
            except Exception as ex:
                logger.debug(ex)
                logger.error(
                    f"There was something wrong setting PXE flag or resetting IPMI on {host}."
                )

    schedule = Schedule.current_schedule(cloud=_new_cloud_obj,
                                         host=_host_obj).first()
    if schedule:
        schedule.update(build_start=build_start, build_end=datetime.now())
        schedule.save()

    logger.debug("Updating host: %s")
    _host_obj.update(cloud=_new_cloud_obj,
                     build=False,
                     last_build=datetime.now(),
                     validated=False)
    return True
Esempio n. 26
0
    async def post_system_test(self):
        password = f"{Config['infra_location']}@{self.cloud.ticket}"
        foreman = Foreman(
            Config["foreman_api_url"],
            self.cloud.name,
            password,
            loop=self.loop,
        )

        valid_creds = await foreman.verify_credentials()
        if not valid_creds:
            logger.error("Unable to query Foreman for cloud: %s" % self.cloud.name)
            logger.error("Verify Foreman password is correct: %s" % password)
            self.report = (
                self.report
                + "Unable to query Foreman for cloud: %s\n" % self.cloud.name
            )
            self.report = (
                self.report + "Verify Foreman password is correct: %s\n" % password
            )
            return False

        build_hosts = await foreman.get_build_hosts()

        pending = []
        schedules = Schedule.current_schedule(cloud=self.cloud)
        if schedules:
            for schedule in schedules:
                if schedule.host and schedule.host.name in build_hosts:
                    pending.append(schedule.host.name)

            if pending:
                logger.info(
                    "The following hosts are marked for build and will now be rebooted:"
                )
                self.report = (
                    self.report + "The following hosts are marked for build:\n"
                )
                for host in pending:
                    logger.info(host)
                    try:
                        nc = Netcat(host)
                        healthy = await nc.health_check()
                    except OSError:
                        healthy = False
                    if not healthy:
                        logger.warning(
                            "Host %s didn't pass the health check. "
                            "Potential provisioning in process. SKIPPING." % host
                        )
                        continue
                    badfish = None
                    try:
                        badfish = await badfish_factory(
                            "mgmt-" + host,
                            str(Config["ipmi_username"]),
                            str(Config["ipmi_password"]),
                        )
                        if is_supported(host):
                            await badfish.boot_to_type(
                                "foreman",
                                os.path.join(
                                    os.path.dirname(__file__),
                                    "../../conf/idrac_interfaces.yml",
                                ),
                            )
                        else:
                            await badfish.set_next_boot_pxe()
                        await badfish.reboot_server()
                    except BadfishException as ಥ﹏ಥ:
                        logger.debug(ಥ﹏ಥ)
                        if badfish:
                            logger.warning(
                                f"There was something wrong trying to boot from Foreman interface for: {host}"
                            )
                            await badfish.reboot_server()
                        else:
                            logger.error(
                                f"Could not initiate Badfish instance for: {host}"
                            )

                    self.report = self.report + "%s\n" % host
                return False

        failed = False
        for host in self.hosts:
            try:
                badfish = await badfish_factory(
                    "mgmt-" + host.name,
                    str(Config["ipmi_cloud_username"]),
                    password,
                )
                await badfish.validate_credentials()
            except BadfishException:
                logger.info(f"Could not verify badfish credentials for: {host.name}")
                failed = True

        return not failed
Esempio n. 27
0
def main():
    days = [1, 3, 5, 7]
    future_days = 7

    _all_clouds = Cloud.objects()
    _active_clouds = [
        _cloud for _cloud in _all_clouds
        if Schedule.current_schedule(cloud=_cloud).count() > 0
    ]
    _validated_clouds = [
        _cloud for _cloud in _active_clouds if _cloud.validated
    ]

    if not os.path.exists(os.path.join(conf["data_dir"], "report")):
        Path(os.path.join(conf["data_dir"], "report")).mkdir(parents=True,
                                                             exist_ok=True)

    for cloud in _validated_clouds:
        current_hosts = Schedule.current_schedule(cloud=cloud)
        cloud_info = "%s: %s (%s)" % (cloud.name, current_hosts.count(),
                                      cloud.description)
        if not cloud["notified"]:
            logger.info('=============== Initial Message')
            create_initial_message(
                cloud.owner,
                cloud.name,
                cloud_info,
                cloud.ticket,
                cloud.ccuser,
            )

        for day in days:
            future = datetime.now() + timedelta(days=day)
            future_date = "%4d-%.2d-%.2d 22:00" % (future.year, future.month,
                                                   future.day)
            future_hosts = Schedule.current_schedule(cloud=cloud,
                                                     date=future_date)

            diff = set(current_hosts) - set(future_hosts)
            if diff and future < current_hosts[0].end:
                report_file = "%s-%s-%s-%s" % (cloud.name, cloud.owner, day,
                                               cloud.ticket)
                report_path = os.path.join(conf["data_dir"], "report",
                                           report_file)
                if not os.path.exists(report_path) and conf["email_notify"]:
                    logger.info('=============== Additional Message')
                    host_list = [schedule.host.name for schedule in diff]
                    create_message(
                        cloud.owner,
                        day,
                        cloud.name,
                        cloud_info,
                        cloud.ccuser,
                        host_list,
                        report_path,
                    )
                    break

    for cloud in _all_clouds:
        if cloud.name != "cloud01" and cloud.owner not in ["quads", None]:
            current_hosts = Schedule.current_schedule(cloud=cloud)
            cloud_info = "%s: %s (%s)" % (cloud.name, current_hosts.count(),
                                          cloud.description)

            report_pre_ini_file = "%s-%s-pre-initial-%s" % (
                cloud.name, cloud.owner, cloud.ticket)
            report_pre_ini_path = os.path.join(conf["data_dir"], "report",
                                               report_pre_ini_file)
            if not os.path.exists(
                    report_pre_ini_path) and conf["email_notify"]:
                logger.info('=============== Future Initial Message')
                create_future_initial_message(
                    cloud.owner,
                    cloud_info,
                    cloud.ccuser,
                    report_pre_ini_path,
                )

            report_pre_file = "%s-%s-pre-%s" % (cloud.name, cloud.owner,
                                                cloud.ticket)
            report_pre_path = os.path.join(conf["data_dir"], "report",
                                           report_pre_file)
            if not os.path.exists(report_pre_path) and cloud.validated:
                future = datetime.now() + timedelta(days=future_days)
                future_date = "%4d-%.2d-%.2d 22:00" % (
                    future.year, future.month, future.day)
                future_hosts = Schedule.current_schedule(cloud=cloud,
                                                         date=future_date)

                diff = set(current_hosts) - set(future_hosts)
                host_list = [schedule.host.name for schedule in diff]
                if diff:
                    logger.info('=============== Additional Message')
                    create_future_message(
                        cloud.owner,
                        future_days,
                        cloud.name,
                        cloud_info,
                        cloud.ccuser,
                        host_list,
                        report_pre_path,
                    )