Пример #1
0
    def format_raw(self, shares, indent=""):
        sharedata = {}
        storage_cache = cache_storage_data()

        for dbshare in shares:
            if dbshare.name not in sharedata:
                share_info = find_storage_data(dbshare, storage_cache)

                sharedata[dbshare.name] = {"disks": 0,
                                           "machines": 0,
                                           "server": share_info.server,
                                           "mount": share_info.mount}
            sharedata[dbshare.name]["disks"] += dbshare.disk_count
            sharedata[dbshare.name]["machines"] += dbshare.machine_count

        details = []

        for name in sorted(sharedata.keys()):
            rec = sharedata[name]

            details.append(indent + "NAS Disk Share: %s" % name)
            details.append(indent + "  Server: %s" % rec["server"])
            details.append(indent + "  Mountpoint: %s" % rec["mount"])
            details.append(indent + "  Disk Count: %d" % rec["disks"])
            details.append(indent + "  Machine Count: %d" % rec["machines"])
        return "\n".join(details)
Пример #2
0
    def format_raw(self, shares, indent=""):
        sharedata = {}
        storage_cache = cache_storage_data()

        for dbshare in shares:
            if dbshare.name not in sharedata:
                share_info = find_storage_data(dbshare, storage_cache)

                sharedata[dbshare.name] = {
                    "disks": 0,
                    "machines": 0,
                    "server": share_info.server,
                    "mount": share_info.mount
                }
            sharedata[dbshare.name]["disks"] += dbshare.disk_count
            sharedata[dbshare.name]["machines"] += dbshare.machine_count

        details = []

        for name in sorted(sharedata.keys()):
            rec = sharedata[name]

            details.append(indent + "NAS Disk Share: %s" % name)
            details.append(indent + "  Server: %s" % rec["server"])
            details.append(indent + "  Mountpoint: %s" % rec["mount"])
            details.append(indent + "  Disk Count: %d" % rec["disks"])
            details.append(indent + "  Machine Count: %d" % rec["machines"])
        return "\n".join(details)
Пример #3
0
    def preload_virt_disk_info(self, session, cache):
        share_info = cache_storage_data()

        q = session.query(Share)
        q = q.options(joinedload('holder'))
        for res in q:
            res.populate_share_info(share_info)
            cache[res.id] = res

        q = session.query(Filesystem)
        q = q.options(joinedload('holder'))
        for res in q:
            cache[res.id] = res
Пример #4
0
    def render(self, session, share, hostname, resourcegroup, cluster, all,
               **arguments):
        q = session.query(Share)
        if share:
            q = q.filter_by(name=share)

        q = q.options(undefer(Share.disk_count),
                      undefer(Share.machine_count))

        if hostname or cluster or resourcegroup:
            who = get_resource_holder(session, hostname, cluster, resourcegroup)
            q = q.filter_by(holder=who)

        shares = q.all()

        share_info = cache_storage_data()
        for dbshare in shares:
            dbshare.populate_share_info(share_info)

        return ResourceList(shares)
Пример #5
0
    def render(self, session, share, hostname, resourcegroup, cluster, all,
               **arguments):
        q = session.query(Share)
        if share:
            q = q.filter_by(name=share)

        q = q.options(undefer(Share.disk_count), undefer(Share.machine_count))

        if hostname or cluster or resourcegroup:
            who = get_resource_holder(session, hostname, cluster,
                                      resourcegroup)
            q = q.filter_by(holder=who)

        shares = q.all()

        share_info = cache_storage_data()
        for dbshare in shares:
            dbshare.populate_share_info(share_info)

        return ResourceList(shares)
Пример #6
0
    def render(self, session, logger,
               services, personalities, machines, clusters, hosts,
               locations, resources, switches, all,
               **arguments):
        success = []
        failed = []
        written = 0

        # Caches for keeping preloaded data pinned in memory, since the SQLA
        # session holds a weak reference only
        resource_by_id = {}
        resholder_by_id = {}
        service_instances = None
        racks = None

        # Object caches that are accessed directly
        disks_by_machine = defaultdict(list)
        interfaces_by_machine = defaultdict(list)
        interfaces_by_id = {}

        if all:
            services = True
            personalities = True
            machines = True
            clusters = True
            hosts = True
            locations = True
            resources = True

        with CompileKey(logger=logger):
            logger.client_info("Loading data.")

            # When flushing clusters/hosts, loading the resource holder is done
            # as the query that loads those objects. But when flushing resources
            # only, we need the holder and the object it belongs to.
            if resources and not clusters:
                q = session.query(ClusterResource)
                # Using joinedload('cluster') would generate an outer join
                q = q.join(Cluster)
                q = q.options(contains_eager('cluster'))
                for resholder in q:
                    resholder_by_id[resholder.id] = resholder
            if resources and not hosts:
                q = session.query(HostResource)
                # Using joinedload('host') would generate an outer join
                q = q.join(Host)
                q = q.options(contains_eager('host'))
                for resholder in q:
                    resholder_by_id[resholder.id] = resholder

            if hosts or clusters or resources:
                # Load the most common resource types. Using
                # with_polymorphic('*') on Resource would generate a huge query,
                # so do something more targeted. More resource subclasses may be
                # added later if they become common.
                preload_classes = {
                    Filesystem: [],
                    RebootSchedule: [],
                    VirtualMachine: [joinedload('machine'),
                                     joinedload('machine.primary_name'),
                                     joinedload('machine.primary_name.fqdn')],
                    Share: [],
                }

                share_info = cache_storage_data()

                for cls, options in  preload_classes.items():
                    q = session.query(cls)

                    # If only hosts or only clusters are needed, don't load
                    # resources of the other kind
                    if hosts and not clusters and not resources:
                        q = q.join(ResourceHolder)
                        q = q.options(contains_eager('holder'))
                        q = q.filter_by(holder_type='host')
                    if clusters and not hosts and not resources:
                        q = q.join(ResourceHolder)
                        q = q.filter_by(holder_type='cluster')
                        q = q.options(contains_eager('holder'))

                    if options:
                        q = q.options(*options)

                    for res in q:
                        resource_by_id[res.id] = res
                        try:
                            res.populate_share_info(share_info)
                        except AttributeError:
                            pass

            if hosts or machines:
                # Polymorphic loading cannot be applied to eager-loaded
                # attributes, so load interfaces manually.
                q = session.query(Interface)
                q = q.with_polymorphic('*')
                q = q.options(lazyload("hardware_entity"))
                for iface in q:
                    interfaces_by_machine[iface.hardware_entity_id].append(iface)
                    interfaces_by_id[iface.id] = iface

                if hosts:
                    # subqueryload() and with_polymorphic() do not play nice
                    # together, so do it by hand
                    q = session.query(AddressAssignment)
                    q = q.options(joinedload("network"),
                                  joinedload("dns_records"))
                    q = q.order_by(AddressAssignment._label)
                    addrs_by_iface = defaultdict(list)
                    for addr in q:
                        addrs_by_iface[addr.interface_id].append(addr)
                    for interface_id, addrs in addrs_by_iface.items():
                        set_committed_value(interfaces_by_id[interface_id],
                                            "assignments", addrs)

                    q = session.query(Interface.id)
                    q = q.filter(~Interface.assignments.any())
                    for id in q.all():
                        set_committed_value(interfaces_by_id[id[0]],
                                            "assignments", None)

            if hosts or services:
                q = session.query(ServiceInstance)
                q = q.options(subqueryload("service"))
                service_instances = q.all()

            if machines or clusters:
                # Most machines are in racks...
                q = session.query(Rack)
                q = q.options(subqueryload("dns_maps"),
                              subqueryload("parents"))
                racks = q.all()

            if locations:
                logger.client_info("Flushing locations.")
                for dbloc in session.query(City).all():
                    try:
                        plenary = Plenary.get_plenary(dbloc, logger=logger)
                        written += plenary.write(locked=True)
                    except Exception, e:
                        failed.append("City %s failed: %s" %
                                      dbloc, e)
                        continue

            if services:
                logger.client_info("Flushing services.")
                q = session.query(Service)
                q = q.options(subqueryload("instances"))
                for dbservice in q:
                    try:
                        plenary_info = Plenary.get_plenary(dbservice,
                                                           logger=logger)
                        written += plenary_info.write(locked=True)
                    except Exception, e:
                        failed.append("Service %s failed: %s" %
                                      (dbservice.name, e))
                        continue

                    for dbinst in dbservice.instances:
                        try:
                            plenary_info = Plenary.get_plenary(dbinst,
                                                               logger=logger)
                            written += plenary_info.write(locked=True)
                        except Exception, e:
                            failed.append("Service %s instance %s failed: %s" %
                                          (dbservice.name, dbinst.name, e))
                            continue
Пример #7
0
    def render(self, session, logger, services, personalities, machines,
               clusters, hosts, locations, resources, switches, all,
               **arguments):
        success = []
        failed = []
        written = 0

        # Caches for keeping preloaded data pinned in memory, since the SQLA
        # session holds a weak reference only
        resource_by_id = {}
        resholder_by_id = {}
        service_instances = None
        racks = None

        # Object caches that are accessed directly
        disks_by_machine = defaultdict(list)
        interfaces_by_machine = defaultdict(list)
        interfaces_by_id = {}

        if all:
            services = True
            personalities = True
            machines = True
            clusters = True
            hosts = True
            locations = True
            resources = True

        with CompileKey(logger=logger):
            logger.client_info("Loading data.")

            # When flushing clusters/hosts, loading the resource holder is done
            # as the query that loads those objects. But when flushing resources
            # only, we need the holder and the object it belongs to.
            if resources and not clusters:
                q = session.query(ClusterResource)
                # Using joinedload('cluster') would generate an outer join
                q = q.join(Cluster)
                q = q.options(contains_eager('cluster'))
                for resholder in q:
                    resholder_by_id[resholder.id] = resholder
            if resources and not hosts:
                q = session.query(HostResource)
                # Using joinedload('host') would generate an outer join
                q = q.join(Host)
                q = q.options(contains_eager('host'))
                for resholder in q:
                    resholder_by_id[resholder.id] = resholder

            if hosts or clusters or resources:
                # Load the most common resource types. Using
                # with_polymorphic('*') on Resource would generate a huge query,
                # so do something more targeted. More resource subclasses may be
                # added later if they become common.
                preload_classes = {
                    Filesystem: [],
                    RebootSchedule: [],
                    VirtualMachine: [
                        joinedload('machine'),
                        joinedload('machine.primary_name'),
                        joinedload('machine.primary_name.fqdn')
                    ],
                    Share: [],
                }

                share_info = cache_storage_data()

                for cls, options in preload_classes.items():
                    q = session.query(cls)

                    # If only hosts or only clusters are needed, don't load
                    # resources of the other kind
                    if hosts and not clusters and not resources:
                        q = q.join(ResourceHolder)
                        q = q.options(contains_eager('holder'))
                        q = q.filter_by(holder_type='host')
                    if clusters and not hosts and not resources:
                        q = q.join(ResourceHolder)
                        q = q.filter_by(holder_type='cluster')
                        q = q.options(contains_eager('holder'))

                    if options:
                        q = q.options(*options)

                    for res in q:
                        resource_by_id[res.id] = res
                        try:
                            res.populate_share_info(share_info)
                        except AttributeError:
                            pass

            if hosts or machines:
                # Polymorphic loading cannot be applied to eager-loaded
                # attributes, so load interfaces manually.
                q = session.query(Interface)
                q = q.with_polymorphic('*')
                q = q.options(lazyload("hardware_entity"))
                for iface in q:
                    interfaces_by_machine[iface.hardware_entity_id].append(
                        iface)
                    interfaces_by_id[iface.id] = iface

                if hosts:
                    # subqueryload() and with_polymorphic() do not play nice
                    # together, so do it by hand
                    q = session.query(AddressAssignment)
                    q = q.options(joinedload("network"),
                                  joinedload("dns_records"))
                    q = q.order_by(AddressAssignment._label)
                    addrs_by_iface = defaultdict(list)
                    for addr in q:
                        addrs_by_iface[addr.interface_id].append(addr)
                    for interface_id, addrs in addrs_by_iface.items():
                        set_committed_value(interfaces_by_id[interface_id],
                                            "assignments", addrs)

                    q = session.query(Interface.id)
                    q = q.filter(~Interface.assignments.any())
                    for id in q.all():
                        set_committed_value(interfaces_by_id[id[0]],
                                            "assignments", None)

            if hosts or services:
                q = session.query(ServiceInstance)
                q = q.options(subqueryload("service"))
                service_instances = q.all()

            if machines or clusters:
                # Most machines are in racks...
                q = session.query(Rack)
                q = q.options(subqueryload("dns_maps"),
                              subqueryload("parents"))
                racks = q.all()

            if locations:
                logger.client_info("Flushing locations.")
                for dbloc in session.query(City).all():
                    try:
                        plenary = Plenary.get_plenary(dbloc, logger=logger)
                        written += plenary.write(locked=True)
                    except Exception, e:
                        failed.append("City %s failed: %s" % dbloc, e)
                        continue

            if services:
                logger.client_info("Flushing services.")
                q = session.query(Service)
                q = q.options(subqueryload("instances"))
                for dbservice in q:
                    try:
                        plenary_info = Plenary.get_plenary(dbservice,
                                                           logger=logger)
                        written += plenary_info.write(locked=True)
                    except Exception, e:
                        failed.append("Service %s failed: %s" %
                                      (dbservice.name, e))
                        continue

                    for dbinst in dbservice.instances:
                        try:
                            plenary_info = Plenary.get_plenary(dbinst,
                                                               logger=logger)
                            written += plenary_info.write(locked=True)
                        except Exception, e:
                            failed.append("Service %s instance %s failed: %s" %
                                          (dbservice.name, dbinst.name, e))
                            continue