Example #1
0
    def render(self, session, model, vendor, type, cpuname, cpuvendor,
               cpuspeed, cpunum, memory, disktype, diskcontroller, disksize,
               nics, nicmodel, nicvendor, comments, **arguments):
        dbvendor = Vendor.get_unique(session, vendor, compel=True)
        Model.get_unique(session, name=model, vendor=dbvendor, preclude=True)

        # Specifically not allowing new models to be added that are of
        # type aurora_node - that is only meant for the dummy aurora_model.
        allowed_types = [
            "blade", "rackmount", "workstation", "switch", "chassis",
            "virtual_machine", "nic"
        ]
        if type not in allowed_types:
            raise ArgumentError(
                "The model's machine type must be one of: %s." %
                ", ".join(allowed_types))

        dbmodel = Model(name=model,
                        vendor=dbvendor,
                        machine_type=type,
                        comments=comments)
        session.add(dbmodel)
        session.flush()

        if cpuname or cpuvendor or cpuspeed is not None:
            dbcpu = Cpu.get_unique(session,
                                   name=cpuname,
                                   vendor=cpuvendor,
                                   speed=cpuspeed,
                                   compel=True)
            if nicmodel or nicvendor:
                dbnic = Model.get_unique(session,
                                         machine_type='nic',
                                         name=nicmodel,
                                         vendor=nicvendor,
                                         compel=True)
            else:
                dbnic = Model.default_nic_model(session)
            dbmachine_specs = MachineSpecs(model=dbmodel,
                                           cpu=dbcpu,
                                           cpu_quantity=cpunum,
                                           memory=memory,
                                           disk_type=disktype,
                                           controller_type=diskcontroller,
                                           disk_capacity=disksize,
                                           nic_count=nics,
                                           nic_model=dbnic)
            session.add(dbmachine_specs)
        return
Example #2
0
    def render(self, session, logger, chassis, model, rack, ip, vendor, serial,
               comments, **arguments):
        dbchassis = Chassis.get_unique(session, chassis, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbchassis)

        if vendor and not model:
            model = dbchassis.model.name
        if model:
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       model_type=ChassisType.Chassis,
                                       compel=True)
            dbchassis.model = dbmodel

        dblocation = get_location(session, rack=rack)
        if dblocation:
            dbchassis.location = dblocation

        if serial is not None:
            dbchassis.serial_no = serial

        if ip:
            update_primary_ip(session, logger, dbchassis, ip)

        if comments is not None:
            dbchassis.comments = comments

        session.flush()

        dsdb_runner = DSDBRunner(logger=logger)
        dsdb_runner.update_host(dbchassis, oldinfo)
        dsdb_runner.commit_or_rollback("Could not update chassis in DSDB")

        return
Example #3
0
    def render(self, session, logger, switch, label, model, rack, type, ip,
               interface, mac, vendor, serial, comments, **arguments):
        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   machine_type='switch', compel=True)

        dblocation = get_location(session, rack=rack)

        dbdns_rec, newly_created = grab_address(session, switch, ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        if not label:
            label = dbdns_rec.fqdn.name
            try:
                Switch.check_label(label)
            except ArgumentError:
                raise ArgumentError("Could not deduce a valid hardware label "
                                    "from the switch name.  Please specify "
                                    "--label.")

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        dbswitch = Switch(label=label, switch_type=type, location=dblocation,
                          model=dbmodel, serial_no=serial, comments=comments)
        session.add(dbswitch)
        dbswitch.primary_name = dbdns_rec

        # FIXME: get default name from the model
        iftype = "oa"
        if not interface:
            interface = "xge"
            ifcomments = "Created automatically by add_switch"
        else:
            ifcomments = None
            if interface.lower().startswith("lo"):
                iftype = "loopback"

        dbinterface = get_or_create_interface(session, dbswitch,
                                              name=interface, mac=mac,
                                              interface_type=iftype,
                                              comments=ifcomments)
        dbnetwork = get_net_id_from_ip(session, ip)
        # TODO: should we call check_ip_restrictions() here?
        assign_address(dbinterface, ip, dbnetwork)

        session.flush()

        plenary = PlenarySwitch(dbswitch, logger=logger)
        with plenary.get_write_key() as key:
            plenary.stash()
            try:
                plenary.write(locked=True)
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbswitch, None)
                dsdb_runner.commit_or_rollback("Could not add switch to DSDB")
            except:
                plenary.restore_stash()
                raise

        return
Example #4
0
 def render(self, session, machine, model, vendor, machine_type, chassis,
            slot, **arguments):
     q = session.query(Machine)
     if machine:
         # TODO: This command still mixes search/show facilities.
         # For now, give an error if machine name not found, but
         # also allow the command to be used to check if the machine has
         # the requested attributes (via the standard query filters).
         # In the future, this should be clearly separated as 'show machine'
         # and 'search machine'.
         machine = AqStr.normalize(machine)
         Machine.check_label(machine)
         Machine.get_unique(session, machine, compel=True)
         q = q.filter_by(label=machine)
     dblocation = get_location(session, **arguments)
     if dblocation:
         q = q.filter_by(location=dblocation)
     if chassis:
         dbchassis = Chassis.get_unique(session, chassis, compel=True)
         q = q.join('chassis_slot')
         q = q.filter_by(chassis=dbchassis)
         q = q.reset_joinpoint()
     if slot is not None:
         q = q.join('chassis_slot')
         q = q.filter_by(slot_number=slot)
         q = q.reset_joinpoint()
     if model or vendor or machine_type:
         subq = Model.get_matching_query(session,
                                         name=model,
                                         vendor=vendor,
                                         machine_type=machine_type,
                                         compel=True)
         q = q.filter(Machine.model_id.in_(subq))
     return q.order_by(Machine.label).all()
Example #5
0
def test_create_machines_for_test_interface():
    np = Building.get_unique(sess, 'np')
    assert isinstance(np, Building), 'no building in %s' % func_name()

    hp = Vendor.get_unique(sess, 'hp')
    assert isinstance(hp, Vendor), 'no vendor in %s' % func_name()

    am = Model.get_unique(sess, name='bl45p', vendor=hp)
    assert isinstance(am, Model), 'no model in %s' % func_name()

    cpu = sess.query(Cpu).first()
    assert isinstance(cpu, Cpu), 'no cpu in %s' % func_name()

    for i in xrange(NUM_MACHINES):
        machine = Machine(label='%s%s' % (MACHINE_NAME_PREFIX, i),
                          location=np,
                          model=am,
                          cpu=cpu,
                          cpu_quantity=2,
                          memory=32768)
        create(sess, machine)

    machines = sess.query(Machine).filter(
        Machine.label.like(MACHINE_NAME_PREFIX + '%')).all()

    eq_(len(machines), NUM_MACHINES)
Example #6
0
 def render(self, session, machine, model, vendor, machine_type, chassis,
            slot, **arguments):
     q = session.query(Machine)
     if machine:
         # TODO: This command still mixes search/show facilities.
         # For now, give an error if machine name not found, but
         # also allow the command to be used to check if the machine has
         # the requested attributes (via the standard query filters).
         # In the future, this should be clearly separated as 'show machine'
         # and 'search machine'.
         machine = AqStr.normalize(machine)
         Machine.check_label(machine)
         Machine.get_unique(session, machine, compel=True)
         q = q.filter_by(label=machine)
     dblocation = get_location(session, **arguments)
     if dblocation:
         q = q.filter_by(location=dblocation)
     if chassis:
         dbchassis = Chassis.get_unique(session, chassis, compel=True)
         q = q.join('chassis_slot')
         q = q.filter_by(chassis=dbchassis)
         q = q.reset_joinpoint()
     if slot is not None:
         q = q.join('chassis_slot')
         q = q.filter_by(slot_number=slot)
         q = q.reset_joinpoint()
     if model or vendor or machine_type:
         subq = Model.get_matching_query(session, name=model, vendor=vendor,
                                         model_type=machine_type,
                                         compel=True)
         q = q.filter(Machine.model_id.in_(subq))
     return q.order_by(Machine.label).all()
Example #7
0
    def render(self, session, logger, model, vendor, **arguments):
        dbvendor = Vendor.get_unique(session, vendor, compel=True)
        dbmodel = Model.get_unique(session, name=model, vendor=dbvendor,
                                   compel=True)

        if dbmodel.model_type.isNic():
            q = session.query(Interface)
            q = q.filter_by(model=dbmodel)
            if q.first():
                raise ArgumentError("{0} is still in use and cannot be "
                                    "deleted.".format(dbmodel))
            q = session.query(MachineSpecs)
            q = q.filter_by(nic_model=dbmodel)
            if q.first():
                raise ArgumentError("{0} is still referenced by machine models and "
                                    "cannot be deleted.".format(dbmodel))
        else:
            q = session.query(HardwareEntity)
            q = q.filter_by(model=dbmodel)
            if q.first():
                raise ArgumentError("{0} is still in use and cannot be "
                                    "deleted.".format(dbmodel))

        if dbmodel.machine_specs:
            # FIXME: Log some details...
            logger.info("Before deleting model %s %s '%s', "
                        "removing machine specifications." %
                        (dbmodel.model_type, dbvendor.name, dbmodel.name))
            session.delete(dbmodel.machine_specs)
        session.delete(dbmodel)
        return
Example #8
0
    def render(self, session, logger, model, vendor, **arguments):
        dbvendor = Vendor.get_unique(session, vendor, compel=True)
        dbmodel = Model.get_unique(session,
                                   name=model,
                                   vendor=dbvendor,
                                   compel=True)

        if dbmodel.machine_type == 'nic':
            q = session.query(Interface)
            q = q.filter_by(model=dbmodel)
            if q.first():
                raise ArgumentError("{0} is still in use and cannot be "
                                    "deleted.".format(dbmodel))
            q = session.query(MachineSpecs)
            q = q.filter_by(nic_model=dbmodel)
            if q.first():
                raise ArgumentError(
                    "{0} is still referenced by machine models and "
                    "cannot be deleted.".format(dbmodel))
        else:
            q = session.query(HardwareEntity)
            q = q.filter_by(model=dbmodel)
            if q.first():
                raise ArgumentError("{0} is still in use and cannot be "
                                    "deleted.".format(dbmodel))

        if dbmodel.machine_specs:
            # FIXME: Log some details...
            logger.info("Before deleting model %s %s '%s', "
                        "removing machine specifications." %
                        (dbmodel.machine_type, dbvendor.name, dbmodel.name))
            session.delete(dbmodel.machine_specs)
        session.delete(dbmodel)
        return
Example #9
0
    def render(self, session, logger, chassis, model, rack, ip, vendor, serial,
               comments, **arguments):
        dbchassis = Chassis.get_unique(session, chassis, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbchassis)

        if vendor and not model:
            model = dbchassis.model.name
        if model:
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       machine_type='chassis', compel=True)
            dbchassis.model = dbmodel

        dblocation = get_location(session, rack=rack)
        if dblocation:
            dbchassis.location = dblocation

        if serial is not None:
            dbchassis.serial_no = serial

        if ip:
            update_primary_ip(session, dbchassis, ip)

        if comments is not None:
            dbchassis.comments = comments

        session.add(dbchassis)
        session.flush()

        dsdb_runner = DSDBRunner(logger=logger)
        dsdb_runner.update_host(dbchassis, oldinfo)
        dsdb_runner.commit_or_rollback("Could not update chassis in DSDB")

        return
Example #10
0
def search_hardware_entity_query(session,
                                 hardware_type=HardwareEntity,
                                 subquery=False,
                                 model=None,
                                 vendor=None,
                                 machine_type=None,
                                 exact_location=False,
                                 mac=None,
                                 pg=None,
                                 serial=None,
                                 interface_model=None,
                                 interface_vendor=None,
                                 **kwargs):
    q = session.query(hardware_type)
    if hardware_type is HardwareEntity:
        q = q.with_polymorphic('*')

    # The ORM deduplicates the result if we query full objects, but not if we
    # query just the label
    q = q.distinct()

    dblocation = get_location(session, **kwargs)
    if dblocation:
        if exact_location:
            q = q.filter_by(location=dblocation)
        else:
            childids = dblocation.offspring_ids()
            q = q.filter(HardwareEntity.location_id.in_(childids))
    if model or vendor or machine_type:
        subq = Model.get_matching_query(session,
                                        name=model,
                                        vendor=vendor,
                                        machine_type=machine_type,
                                        compel=True)
        q = q.filter(HardwareEntity.model_id.in_(subq))
    if mac or pg or interface_vendor or interface_model:
        q = q.join('interfaces')
        if mac:
            q = q.filter_by(mac=mac)
        if pg:
            q = q.filter_by(port_group=pg)
        if interface_model or interface_vendor:
            # HardwareEntity also has a .model relation, so we have to be
            # explicit here
            q = q.join(Interface.model)
            if interface_model:
                q = q.filter_by(name=interface_model)
            if interface_vendor:
                a_vendor = aliased(Vendor)
                q = q.join(a_vendor)
                q = q.filter_by(name=interface_vendor)
        q = q.reset_joinpoint()
    if serial:
        q = q.filter_by(serial_no=serial)
    if not subquery:
        # Oracle does not like "ORDER BY" in a sub-select, so we have to
        # suppress it if we want to use this query as a subquery
        q = q.order_by(HardwareEntity.label)
    return q
Example #11
0
    def render(self, session, logger, chassis, label, rack, model, vendor, ip,
               interface, mac, serial, comments, **arguments):
        dbdns_rec, newly_created = grab_address(session,
                                                chassis,
                                                ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        if not label:
            label = dbdns_rec.fqdn.name
            try:
                Chassis.check_label(label)
            except ArgumentError:
                raise ArgumentError("Could not deduce a valid hardware label "
                                    "from the chassis name.  Please specify "
                                    "--label.")

        dblocation = get_location(session, rack=rack)
        dbmodel = Model.get_unique(session,
                                   name=model,
                                   vendor=vendor,
                                   machine_type='chassis',
                                   compel=True)
        # FIXME: Precreate chassis slots?
        dbchassis = Chassis(label=label,
                            location=dblocation,
                            model=dbmodel,
                            serial_no=serial,
                            comments=comments)
        session.add(dbchassis)
        dbchassis.primary_name = dbdns_rec

        # FIXME: get default name from the model
        if not interface:
            interface = "oa"
            ifcomments = "Created automatically by add_chassis"
        else:
            ifcomments = None
        dbinterface = get_or_create_interface(session,
                                              dbchassis,
                                              name=interface,
                                              mac=mac,
                                              interface_type="oa",
                                              comments=ifcomments)
        if ip:
            dbnetwork = get_net_id_from_ip(session, ip)
            check_ip_restrictions(dbnetwork, ip)
            assign_address(dbinterface, ip, dbnetwork)

        session.flush()

        if ip:
            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbchassis, None)
            dsdb_runner.commit_or_rollback("Could not add chassis to DSDB")
        return
Example #12
0
    def render(self, session, logger, network_device, label, model, type, ip,
               interface, iftype, mac, vendor, serial, comments, **arguments):
        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   compel=True)

        if not dbmodel.model_type.isNetworkDeviceType():
            raise ArgumentError("This command can only be used to "
                                "add network devices.")

        dblocation = get_location(session, compel=True, **arguments)

        dbdns_rec, newly_created = grab_address(session, network_device, ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        if not label:
            label = dbdns_rec.fqdn.name
            try:
                NetworkDevice.check_label(label)
            except ArgumentError:
                raise ArgumentError("Could not deduce a valid hardware label "
                                    "from the network device name.  Please specify "
                                    "--label.")

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        dbnetdev = NetworkDevice(label=label, switch_type=type,
                                 location=dblocation, model=dbmodel,
                                 serial_no=serial, comments=comments)
        session.add(dbnetdev)
        dbnetdev.primary_name = dbdns_rec

        check_netdev_iftype(iftype)

        dbinterface = get_or_create_interface(session, dbnetdev,
                                              name=interface, mac=mac,
                                              interface_type=iftype)
        dbnetwork = get_net_id_from_ip(session, ip)
        # TODO: should we call check_ip_restrictions() here?
        assign_address(dbinterface, ip, dbnetwork, logger=logger)

        session.flush()

        plenary = Plenary.get_plenary(dbnetdev, logger=logger)
        with plenary.get_key():
            plenary.stash()
            try:
                plenary.write(locked=True)
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbnetdev, None)
                dsdb_runner.commit_or_rollback("Could not add network device to DSDB")
            except:
                plenary.restore_stash()
                raise

        return
Example #13
0
def search_hardware_entity_query(session, hardware_type=HardwareEntity,
                                 subquery=False,
                                 model=None, vendor=None, machine_type=None,
                                 exact_location=False, ip=None,
                                 mac=None, pg=None, serial=None,
                                 interface_model=None, interface_vendor=None,
                                 **kwargs):
    q = session.query(hardware_type)
    if hardware_type is HardwareEntity:
        q = q.with_polymorphic('*')

    # The ORM deduplicates the result if we query full objects, but not if we
    # query just the label
    q = q.distinct()

    dblocation = get_location(session, **kwargs)
    if dblocation:
        if exact_location:
            q = q.filter_by(location=dblocation)
        else:
            childids = dblocation.offspring_ids()
            q = q.filter(HardwareEntity.location_id.in_(childids))
    if model or vendor or machine_type:
        subq = Model.get_matching_query(session, name=model, vendor=vendor,
                                        model_type=machine_type, compel=True)
        q = q.filter(HardwareEntity.model_id.in_(subq))
    if ip or mac or pg or interface_vendor or interface_model:
        q = q.join('interfaces')
        if mac:
            q = q.filter_by(mac=mac)
        if pg:
            q = q.filter_by(port_group=pg)
        if ip:
            q = q.join(AddressAssignment)
            q = q.filter(AddressAssignment.ip == ip)
        if interface_model or interface_vendor:
            # HardwareEntity also has a .model relation, so we have to be
            # explicit here
            q = q.join(Interface.model)
            if interface_model:
                q = q.filter_by(name=interface_model)
            if interface_vendor:
                a_vendor = aliased(Vendor)
                q = q.join(a_vendor)
                q = q.filter_by(name=interface_vendor)
        q = q.reset_joinpoint()
    if serial:
        q = q.filter_by(serial_no=serial)
    if not subquery:
        # Oracle does not like "ORDER BY" in a sub-select, so we have to
        # suppress it if we want to use this query as a subquery
        q = q.order_by(HardwareEntity.label)
    return q
Example #14
0
    def render(self, session, model, vendor, type, cpuname, cpuvendor, cpuspeed,
               cpunum, memory, disktype, diskcontroller, disksize,
               nics, nicmodel, nicvendor,
               comments, **arguments):
        dbvendor = Vendor.get_unique(session, vendor, compel=True)
        Model.get_unique(session, name=model, vendor=dbvendor, preclude=True)

        # Specifically not allowing new models to be added that are of
        # type aurora_node - that is only meant for the dummy aurora_model.
        if type.isAuroraChassis() or type.isAuroraNode():
            raise ArgumentError("The model's machine type must not be"
                                " an aurora type")

        dbmodel = Model(name=model, vendor=dbvendor, model_type=type,
                        comments=comments)
        session.add(dbmodel)
        session.flush()

        if cpuname or cpuvendor or cpuspeed is not None:
            if not type.isMachineType():
                raise ArgumentError("Machine specfications are only valid"
                                    " for machine types")
            dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor,
                                   speed=cpuspeed, compel=True)
            if nicmodel or nicvendor:
                dbnic = Model.get_unique(session, model_type=NicType.Nic,
                                         name=nicmodel, vendor=nicvendor,
                                         compel=True)
            else:
                dbnic = Model.default_nic_model(session)
            dbmachine_specs = MachineSpecs(model=dbmodel, cpu=dbcpu,
                                           cpu_quantity=cpunum, memory=memory,
                                           disk_type=disktype,
                                           controller_type=diskcontroller,
                                           disk_capacity=disksize,
                                           nic_count=nics, nic_model=dbnic)
            session.add(dbmachine_specs)
        return
Example #15
0
    def render(self, session, model, vendor, type, cpuname, cpuvendor, cpuspeed,
               cpunum, memory, disktype, diskcontroller, disksize,
               nics, nicmodel, nicvendor,
               comments, **arguments):
        dbvendor = Vendor.get_unique(session, vendor, compel=True)
        Model.get_unique(session, name=model, vendor=dbvendor, preclude=True)

        # Specifically not allowing new models to be added that are of
        # type aurora_node - that is only meant for the dummy aurora_model.
        allowed_types = ["blade", "rackmount", "workstation", "switch",
                         "chassis", "virtual_machine", "nic"]
        if type not in allowed_types:
            raise ArgumentError("The model's machine type must be one of: %s." %
                                ", ".join(allowed_types))

        dbmodel = Model(name=model, vendor=dbvendor, machine_type=type,
                        comments=comments)
        session.add(dbmodel)
        session.flush()

        if cpuname or cpuvendor or cpuspeed is not None:
            dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor,
                                   speed=cpuspeed, compel=True)
            if nicmodel or nicvendor:
                dbnic = Model.get_unique(session, machine_type='nic',
                                         name=nicmodel, vendor=nicvendor,
                                         compel=True)
            else:
                dbnic = Model.default_nic_model(session)
            dbmachine_specs = MachineSpecs(model=dbmodel, cpu=dbcpu,
                                           cpu_quantity=cpunum, memory=memory,
                                           disk_type=disktype,
                                           controller_type=diskcontroller,
                                           disk_capacity=disksize,
                                           nic_count=nics, nic_model=dbnic)
            session.add(dbmachine_specs)
        return
Example #16
0
def test_create_vm():
    #vend = Vendor.get_unique(sess, 'virtual')
    mod = Model.get_unique(sess, name='vm', compel=True)
    proc = Cpu.get_unique(sess, name='virtual_cpu', speed=0, compel=True)
    np = Building.get_unique(sess, 'np', compel=True)

    for i in xrange(NUM_MACHINES):
        vm = Machine(label='%s%s'%(VM_NAME, i), location=np, model=mod,
                     cpu=proc, cpu_quantity=1, memory=4196)
        create(sess, vm)

    machines = sess.query(Machine).filter(Machine.label.like(VM_NAME+'%')).all()

    assert len(machines) is NUM_MACHINES
    print 'created %s machines' % (len(machines))
Example #17
0
def test_create_machines_for_hosts():
    np = Building.get_unique(sess, name='np', compel=True)
    am = Model.get_unique(sess, name='vm', compel=True)
    a_cpu = Cpu.get_unique(sess, name='aurora_cpu', compel=True)

    for i in xrange(NUM_HOSTS):
        machine = Machine(label='%s%s'% (MACHINE_NAME, i), location=np, model=am,
                          cpu=a_cpu, cpu_quantity=8, memory=32768)
        create(sess, machine)

    machines = sess.query(Machine).filter(
        Machine.label.like(MACHINE_NAME+'%')).all()

    assert len(machines) is NUM_MACHINES
    print 'created %s esx machines' % len(machines)
Example #18
0
    def render(self, session, logger, chassis, label, rack, model, vendor,
               ip, interface, mac, serial, comments, **arguments):
        dbdns_rec, newly_created = grab_address(session, chassis, ip,
                                                allow_restricted_domain=True,
                                                allow_reserved=True,
                                                preclude=True)
        if not label:
            label = dbdns_rec.fqdn.name
            try:
                Chassis.check_label(label)
            except ArgumentError:
                raise ArgumentError("Could not deduce a valid hardware label "
                                    "from the chassis name.  Please specify "
                                    "--label.")

        dblocation = get_location(session, rack=rack)
        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   machine_type='chassis', compel=True)
        # FIXME: Precreate chassis slots?
        dbchassis = Chassis(label=label, location=dblocation, model=dbmodel,
                            serial_no=serial, comments=comments)
        session.add(dbchassis)
        dbchassis.primary_name = dbdns_rec

        # FIXME: get default name from the model
        if not interface:
            interface = "oa"
            ifcomments = "Created automatically by add_chassis"
        else:
            ifcomments = None
        dbinterface = get_or_create_interface(session, dbchassis,
                                              name=interface, mac=mac,
                                              interface_type="oa",
                                              comments=ifcomments)
        if ip:
            dbnetwork = get_net_id_from_ip(session, ip)
            check_ip_restrictions(dbnetwork, ip)
            assign_address(dbinterface, ip, dbnetwork)

        session.flush()

        if ip:
            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbchassis, None)
            dsdb_runner.commit_or_rollback("Could not add chassis to DSDB")
        return
Example #19
0
def get_feature_link(session, feature, model, interface_name, personality):
    dblink = None
    dbmodel = None
    feature_type = 'host'
    if interface_name:
        feature_type = 'interface'
    if model:
        feature_type = 'hardware'
        dbmodel = Model.get_unique(session, name=model,
                                   compel=True)

    dbfeature = Feature.get_unique(session, name=feature,
                                   feature_type=feature_type, compel=True)
    dblink = FeatureLink.get_unique(session, feature=dbfeature,
                                    interface_name=interface_name,
                                    model=dbmodel, personality=personality,
                                    compel=True)
    return dblink
Example #20
0
def test_create_machines_for_hosts():
    np = Building.get_unique(sess, name='np', compel=True)
    am = Model.get_unique(sess, name='vm', compel=True)
    a_cpu = Cpu.get_unique(sess, name='aurora_cpu', compel=True)

    for i in xrange(NUM_HOSTS):
        machine = Machine(label='%s%s' % (MACHINE_NAME, i),
                          location=np,
                          model=am,
                          cpu=a_cpu,
                          cpu_quantity=8,
                          memory=32768)
        create(sess, machine)

    machines = sess.query(Machine).filter(
        Machine.label.like(MACHINE_NAME + '%')).all()

    assert len(machines) is NUM_MACHINES
    print 'created %s esx machines' % len(machines)
Example #21
0
def add_machine(sess, name, model=MODEL):
    """ Shorthand to create machines created for the purposes of reuse
        among all the various tests that require them """
    mchn = sess.query(Machine).filter_by(label=name).first()
    if mchn:
        return mchn

    model = Model.get_unique(sess, name=model, compel=True)

    proc = sess.query(Cpu).first()
    assert proc, "Can't find a cpu"

    rack = sess.query(Rack).first()
    assert rack, "Can't find a rack"

    mchn = Machine(label=name, model=model, location=rack, cpu=proc)
    add(sess, mchn)
    commit(sess)

    return mchn
Example #22
0
def add_machine(sess, name, model=MODEL):
    """ Shorthand to create machines created for the purposes of reuse
        among all the various tests that require them """
    mchn = sess.query(Machine).filter_by(label=name).first()
    if mchn:
        return mchn

    model = Model.get_unique(sess, name=model, compel=True)

    proc = sess.query(Cpu).first()
    assert proc, "Can't find a cpu"

    rack = sess.query(Rack).first()
    assert rack, "Can't find a rack"

    mchn = Machine(label=name, model=model, location=rack, cpu=proc)
    add(sess, mchn)
    commit(sess)

    return mchn
Example #23
0
def test_create_vm():
    #vend = Vendor.get_unique(sess, 'virtual')
    mod = Model.get_unique(sess, name='vm', compel=True)
    proc = Cpu.get_unique(sess, name='virtual_cpu', speed=0, compel=True)
    np = Building.get_unique(sess, 'np', compel=True)

    for i in xrange(NUM_MACHINES):
        vm = Machine(label='%s%s' % (VM_NAME, i),
                     location=np,
                     model=mod,
                     cpu=proc,
                     cpu_quantity=1,
                     memory=4196)
        create(sess, vm)

    machines = sess.query(Machine).filter(Machine.label.like(VM_NAME +
                                                             '%')).all()

    assert len(machines) is NUM_MACHINES
    print 'created %s machines' % (len(machines))
Example #24
0
def get_feature_link(session, feature, model, interface_name, personality):
    dblink = None
    dbmodel = None
    feature_type = 'host'
    if interface_name:
        feature_type = 'interface'
    if model:
        feature_type = 'hardware'
        dbmodel = Model.get_unique(session, name=model, compel=True)

    dbfeature = Feature.get_unique(session,
                                   name=feature,
                                   feature_type=feature_type,
                                   compel=True)
    dblink = FeatureLink.get_unique(session,
                                    feature=dbfeature,
                                    interface_name=interface_name,
                                    model=dbmodel,
                                    personality=personality,
                                    compel=True)
    return dblink
Example #25
0
def test_create_machines_for_test_host():
    np = Building.get_unique(sess, 'np')
    assert isinstance(np,Building), 'no building in %s' % func_name()

    hp = Vendor.get_unique(sess, 'hp')
    assert isinstance(hp, Vendor), 'no vendor in %s' % func_name()

    am = Model.get_unique(sess, name='bl45p', vendor=hp)
    assert isinstance(am, Model), 'no model in %s' % func_name()

    cpu = sess.query(Cpu).first()
    assert isinstance(cpu, Cpu), 'no cpu in %s' % func_name()

    for i in xrange(NUM_MACHINES):
        machine = Machine(label='%s%s'% (MACHINE_NAME_PREFIX, i), location=np,
                          model=am, cpu=cpu, cpu_quantity=2, memory=32768)
        create(sess, machine)

    machines = sess.query(Machine).filter(
        Machine.label.like(MACHINE_NAME_PREFIX+'%')).all()

    eq_(len(machines), NUM_MACHINES)
Example #26
0
    def render(self, session, logger, feature, archetype, personality, model,
               vendor, interface, justification, user, **arguments):

        # Binding a feature to a named interface makes sense in the scope of a
        # personality, but not for a whole archetype.
        if interface and not personality:
            raise ArgumentError("Binding to a named interface needs "
                                "a personality.")

        q = session.query(Personality)
        dbarchetype = None

        feature_type = "host"

        justification_required = True

        # Warning: order matters here!
        params = {}
        if personality:
            justification_required = False
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            params["personality"] = dbpersonality
            if interface:
                params["interface_name"] = interface
                feature_type = "interface"
            dbarchetype = dbpersonality.archetype
            q = q.filter_by(archetype=dbarchetype)
            q = q.filter_by(name=personality)
        elif archetype:
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            params["archetype"] = dbarchetype
            q = q.filter_by(archetype=dbarchetype)
        else:
            # It's highly unlikely that a feature template would work for
            # _any_ archetype, so disallow this case for now. As I can't
            # rule out that such a case will not have some uses in the
            # future, the restriction is here and not in the model.
            raise ArgumentError("Please specify either an archetype or "
                                "a personality when binding a feature.")

        if model:
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)

            if dbmodel.model_type.isNic():
                feature_type = "interface"
            else:
                feature_type = "hardware"

            params["model"] = dbmodel

        if dbarchetype and not dbarchetype.is_compileable:
            raise UnimplementedError("Binding features to non-compilable "
                                     "archetypes is not implemented.")

        if not feature_type:  # pragma: no cover
            raise InternalError("Feature type is not known.")

        dbfeature = Feature.get_unique(session, name=feature,
                                       feature_type=feature_type, compel=True)

        cnt = q.count()
        # TODO: should the limit be configurable?
        if justification_required and cnt > 0:
            if not justification:
                raise AuthorizationException("Changing feature bindings for "
                                             "more than just a personality "
                                             "requires --justification.")
            validate_justification(user, justification)

        self.do_link(session, logger, dbfeature, params)
        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        for dbpersonality in q:
            plenaries.append(Plenary.get_plenary(dbpersonality))

        written = plenaries.write()
        logger.client_info("Flushed %d/%d templates." %
                           (written, len(plenaries.plenaries)))
        return
Example #27
0
    def render(self, session, logger, feature, archetype, personality, model,
               vendor, interface, justification, user, **arguments):

        # Binding a feature to a named interface makes sense in the scope of a
        # personality, but not for a whole archetype.
        if interface and not personality:
            raise ArgumentError("Binding to a named interface needs "
                                "a personality.")

        q = session.query(Personality)
        dbarchetype = None

        feature_type = "host"

        justification_required = True

        # Warning: order matters here!
        params = {}
        if personality:
            justification_required = False
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            params["personality"] = dbpersonality
            if interface:
                params["interface_name"] = interface
                feature_type = "interface"
            dbarchetype = dbpersonality.archetype
            q = q.filter_by(archetype=dbarchetype)
            q = q.filter_by(name=personality)
        elif archetype:
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            params["archetype"] = dbarchetype
            q = q.filter_by(archetype=dbarchetype)
        else:
            # It's highly unlikely that a feature template would work for
            # _any_ archetype, so disallow this case for now. As I can't
            # rule out that such a case will not have some uses in the
            # future, the restriction is here and not in the model.
            raise ArgumentError("Please specify either an archetype or "
                                "a personality when binding a feature.")

        if model:
            dbmodel = Model.get_unique(session,
                                       name=model,
                                       vendor=vendor,
                                       compel=True)

            if dbmodel.machine_type == "nic":
                feature_type = "interface"
            else:
                feature_type = "hardware"

            params["model"] = dbmodel

        if dbarchetype and not dbarchetype.is_compileable:
            raise UnimplementedError("Binding features to non-compilable "
                                     "archetypes is not implemented.")

        if not feature_type:  # pragma: no cover
            raise InternalError("Feature type is not known.")

        dbfeature = Feature.get_unique(session,
                                       name=feature,
                                       feature_type=feature_type,
                                       compel=True)

        cnt = q.count()
        # TODO: should the limit be configurable?
        if justification_required and cnt > 0:
            if not justification:
                raise AuthorizationException(
                    "Changing feature bindings for more "
                    "than just a personality requires --justification.")
            validate_justification(user, justification)

        self.do_link(session, logger, dbfeature, params)
        session.flush()

        idx = 0
        written = 0
        successful = []
        failed = []

        with CompileKey(logger=logger):
            personalities = q.all()

            for personality in personalities:
                idx += 1
                if idx % 1000 == 0:  # pragma: no cover
                    logger.client_info("Processing personality %d of %d..." %
                                       (idx, cnt))

                if not personality.archetype.is_compileable:  # pragma: no cover
                    continue

                try:
                    plenary_personality = PlenaryPersonality(personality)
                    written += plenary_personality.write(locked=True)
                    successful.append(plenary_personality)
                except IncompleteError:
                    pass
                except Exception, err:  # pragma: no cover
                    failed.append("{0} failed: {1}".format(personality, err))

            if failed:  # pragma: no cover
                for plenary in successful:
                    plenary.restore_stash()
                raise PartialError([], failed)
Example #28
0
    def render(self, session, logger, machine, model, vendor, serial,
               chassis, slot, clearchassis, multislot,
               vmhost, cluster, allow_metacluster_change,
               cpuname, cpuvendor, cpuspeed, cpucount, memory, ip,
               **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        plenaries = PlenaryCollection(logger=logger)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        if clearchassis:
            del dbmachine.chassis_slot[:]

        remove_plenaries = PlenaryCollection(logger=logger)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dbchassis.location):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dbchassis.location
            if slot is None:
                raise ArgumentError("Option --chassis requires --slot "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)
        elif slot is not None:
            dbchassis = None
            for dbslot in dbmachine.chassis_slot:
                if dbchassis and dbslot.chassis != dbchassis:
                    raise ArgumentError("Machine in multiple chassis, please "
                                        "use --chassis argument.")
                dbchassis = dbslot.chassis
            if not dbchassis:
                raise ArgumentError("Option --slot requires --chassis "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)

        dblocation = get_location(session, **arguments)
        if dblocation:
            loc_clear_chassis = False
            for dbslot in dbmachine.chassis_slot:
                dbcl = dbslot.chassis.location
                if dbcl != dblocation:
                    if chassis or slot is not None:
                        raise ArgumentError("{0} conflicts with chassis {1!s} "
                                            "location {2}.".format(dblocation,
                                                        dbslot.chassis, dbcl))
                    else:
                        loc_clear_chassis = True
            if loc_clear_chassis:
                del dbmachine.chassis_slot[:]
            if machine_plenary_will_move(old=dbmachine.location,
                                         new=dblocation):
                remove_plenaries.append(Plenary.get_plenary(dbmachine))
            dbmachine.location = dblocation

        if model or vendor:
            # If overriding model, should probably overwrite default
            # machine specs as well.
            if not model:
                model = dbmachine.model.name
            if not vendor:
                vendor = dbmachine.model.vendor.name
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)
            if dbmodel.machine_type not in ['blade', 'rackmount',
                                            'workstation', 'aurora_node',
                                            'virtual_machine']:
                raise ArgumentError("The update_machine command cannot update "
                                    "machines of type %s." %
                                    dbmodel.machine_type)
            # We probably could do this by forcing either cluster or
            # location data to be available as appropriate, but really?
            # Failing seems reasonable.
            if dbmodel.machine_type != dbmachine.model.machine_type and \
               'virtual_machine' in [dbmodel.machine_type,
                                     dbmachine.model.machine_type]:
                raise ArgumentError("Cannot change machine from %s to %s." %
                                    (dbmachine.model.machine_type,
                                     dbmodel.machine_type))

            old_nic_model = dbmachine.model.nic_model
            new_nic_model = dbmodel.nic_model
            if old_nic_model != new_nic_model:
                for iface in dbmachine.interfaces:
                    if iface.model == old_nic_model:
                        iface.model = new_nic_model

            dbmachine.model = dbmodel

        if cpuname or cpuvendor or cpuspeed is not None:
            dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor,
                                   speed=cpuspeed, compel=True)
            dbmachine.cpu = dbcpu

        if cpucount is not None:
            dbmachine.cpu_quantity = cpucount
        if memory is not None:
            dbmachine.memory = memory
        if serial:
            dbmachine.serial_no = serial

        if ip:
            update_primary_ip(session, dbmachine, ip)

        # FIXME: For now, if a machine has its interface(s) in a portgroup
        # this command will need to be followed by an update_interface to
        # re-evaluate the portgroup for overflow.
        # It would be better to have --pg and --autopg options to let it
        # happen at this point.
        if cluster or vmhost:
            if not dbmachine.vm_container:
                raise ArgumentError("Cannot convert a physical machine to "
                                    "virtual.")

            old_holder = dbmachine.vm_container.holder.holder_object
            resholder = get_resource_holder(session, hostname=vmhost,
                                            cluster=cluster, compel=False)
            new_holder = resholder.holder_object

            # TODO: do we want to allow moving machines between the cluster and
            # metacluster level?
            if new_holder.__class__ != old_holder.__class__:
                raise ArgumentError("Cannot move a VM between a cluster and a "
                                    "stand-alone host.")

            if cluster:
                if new_holder.metacluster != old_holder.metacluster \
                   and not allow_metacluster_change:
                    raise ArgumentError("Current {0:l} does not match "
                                        "new {1:l}."
                                        .format(old_holder.metacluster,
                                                new_holder.metacluster))

            remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
            dbmachine.vm_container.holder = resholder

            for dbdisk in dbmachine.disks:
                if not isinstance(dbdisk, VirtualDisk):
                    continue
                old_share = dbdisk.share
                if isinstance(old_share.holder, BundleResource):
                    resourcegroup = old_share.holder.name
                else:
                    resourcegroup = None
                new_share = find_share(new_holder, resourcegroup, old_share.name,
                                       error=ArgumentError)

                # If the shares are registered at the metacluster level and both
                # clusters are in the same metacluster, then there will be no
                # real change here
                if new_share != old_share:
                    old_share.disks.remove(dbdisk)
                    new_share.disks.append(dbdisk)

            if isinstance(new_holder, Cluster):
                dbmachine.location = new_holder.location_constraint
            else:
                dbmachine.location = new_holder.location

            session.flush()
            plenaries.append(Plenary.get_plenary(old_holder))
            plenaries.append(Plenary.get_plenary(new_holder))

        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))

        session.flush()

        # Check if the changed parameters still meet cluster capacity
        # requiremets
        if dbmachine.cluster:
            dbmachine.cluster.validate()
            if allow_metacluster_change:
                dbmachine.cluster.metacluster.validate()
        if dbmachine.host and dbmachine.host.cluster:
            dbmachine.host.cluster.validate()

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.append(Plenary.get_plenary(dbmachine))
        if remove_plenaries.plenaries and dbmachine.host:
            plenaries.append(Plenary.get_plenary(dbmachine.host))

        key = CompileKey.merge([plenaries.get_write_key(),
                                remove_plenaries.get_remove_key()])
        try:
            lock_queue.acquire(key)
            remove_plenaries.stash()
            plenaries.write(locked=True)
            remove_plenaries.remove(locked=True)

            if dbmachine.host:
                # XXX: May need to reconfigure.
                pass

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbmachine, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update machine in DSDB")
        except:
            plenaries.restore_stash()
            remove_plenaries.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Example #29
0
    def render(self, session, logger, switch, model, rack, type, ip, vendor,
               serial, rename_to, discovered_macs, clear, discover, comments,
               **arguments):
        dbswitch = Switch.get_unique(session, switch, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbswitch)

        if discover:
            discover_switch(session, logger, self.config, dbswitch, False)

        if vendor and not model:
            model = dbswitch.model.name
        if model:
            dbmodel = Model.get_unique(session,
                                       name=model,
                                       vendor=vendor,
                                       machine_type='switch',
                                       compel=True)
            dbswitch.model = dbmodel

        dblocation = get_location(session, rack=rack)
        if dblocation:
            dbswitch.location = dblocation

        if serial is not None:
            dbswitch.serial_no = serial

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        if type:
            Switch.check_type(type)
            dbswitch.switch_type = type

        if ip:
            update_primary_ip(session, dbswitch, ip)

        if comments is not None:
            dbswitch.comments = comments

        remove_plenary = None
        if rename_to:
            # Handling alias renaming would not be difficult in AQDB, but the
            # DSDB synchronization would be painful, so don't do that for now.
            # In theory we should check all configured IP addresses for aliases,
            # but this is the most common case
            if dbswitch.primary_name and dbswitch.primary_name.fqdn.aliases:
                raise ArgumentError(
                    "The switch has aliases and it cannot be "
                    "renamed. Please remove all aliases first.")
            remove_plenary = Plenary.get_plenary(dbswitch, logger=logger)
            rename_hardware(session, dbswitch, rename_to)

        if clear:
            session.query(ObservedMac).filter_by(switch=dbswitch).delete()

        if discovered_macs:
            now = datetime.now()
            for (macaddr, port) in discovered_macs:
                update_or_create_observed_mac(session, dbswitch, port, macaddr,
                                              now)

        session.flush()

        switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)

        key = switch_plenary.get_write_key()
        if remove_plenary:
            key = CompileKey.merge([key, remove_plenary.get_remove_key()])
        try:
            lock_queue.acquire(key)
            if remove_plenary:
                remove_plenary.stash()
                remove_plenary.remove(locked=True)
            switch_plenary.write(locked=True)

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbswitch, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update switch in DSDB")
        except:
            if remove_plenary:
                remove_plenary.restore_stash()
            switch_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Example #30
0
def get_or_create_interface(session, dbhw_ent, name=None, mac=None,
                            model=None, vendor=None,
                            interface_type='public', bootable=None,
                            preclude=False, port_group=None, comments=None):
    """
    Look up an existing interface or create a new one.

    If either the name or the MAC address is given and a matching interface
    exists, then that interface is returned and the other parameters are not
    checked.

    If neither the name nor the MAC address is given, but there is just one
    existing interface matching the specified interface_type/bootable/port_group
    combination, then that interface is returned. If there are multiple matches,
    an exception is raised.

    If no interfaces were found, and enough parameters are provided, then a new
    interface is created. For this purpose, at least the name and in some cases
    the MAC address must be specified.

    Setting preclude to True enforces the creation of a new interface. An error
    is raised if a conflicting interface already exists.
    """

    dbinterface = None
    if mac:
        # Look for the MAC globally. If it is present, check that it belongs to
        # the right hardware, and that it does not conflict with the name (if
        # any).
        q = session.query(Interface)
        q = q.filter_by(mac=mac)
        dbinterface = q.first()
        if dbinterface and (dbinterface.hardware_entity != dbhw_ent or
                            (name and dbinterface.name != name)):
            raise ArgumentError("MAC address %s is already in use: %s." %
                                (mac, describe_interface(session,
                                                         dbinterface)))

    if name and not dbinterface:
        # Special logic to allow "add_interface" to succeed if there is an
        # auto-created interface already
        if preclude and len(dbhw_ent.interfaces) == 1:
            dbinterface = dbhw_ent.interfaces[0]
            if dbinterface.mac is None and \
               dbinterface.interface_type == interface_type and \
               dbinterface.comments is not None and \
               dbinterface.comments.startswith("Created automatically"):
                dbinterface.name = name
                dbinterface.mac = mac
                if hasattr(dbinterface, "bootable") and bootable is not None:
                    dbinterface.bootable = bootable
                dbinterface.comments = comments
                if hasattr(dbinterface, "port_group"):
                    dbinterface.port_group = port_group
                session.flush()
                return dbinterface

        dbinterface = None
        for iface in dbhw_ent.interfaces:
            if iface.name == name:
                dbinterface = iface
                break

    if not name and not mac:
        # Time for guessing. If neither the name nor the MAC was given, then
        # there is no guarantee of uniqueness, but we can still return something
        # useful if e.g. there is just one management interface.
        interfaces = []
        for iface in dbhw_ent.interfaces:
            if iface.interface_type != interface_type:
                continue
            if bootable is not None and (not hasattr(iface, "bootable") or
                                         iface.bootable != bootable):
                continue
            if port_group is not None and (not hasattr(iface, "port_group") or
                                           iface.port_group != port_group):
                continue
            interfaces.append(iface)
        if len(interfaces) > 1:
            type_msg = _type_msg(interface_type, bootable)
            raise ArgumentError("{0} has multiple {1} interfaces, please "
                                "specify which one to "
                                "use.".format(dbhw_ent, type_msg))
        elif interfaces:
            dbinterface = interfaces[0]
        else:
            dbinterface = None

    if dbinterface:
        if preclude:
            raise ArgumentError("{0} already exists.".format(dbinterface))
        return dbinterface

    # No suitable interface was found, try to create a new one

    if not name:
        # Not enough information to create it
        type_msg = _type_msg(interface_type, bootable)
        raise ArgumentError("{0} has no {1} interfaces.".format(dbhw_ent,
                                                                type_msg))

    cls = Interface.polymorphic_subclass(interface_type,
                                         "Invalid interface type")
    extra_args = {}
    default_route = False
    if bootable is not None:
        extra_args["bootable"] = bootable
        default_route = bootable
    if port_group is not None:
        extra_args["port_group"] = port_group

    if not model and not vendor:
        dbmodel = dbhw_ent.model.nic_model
    elif not cls.model_allowed:
        raise ArgumentError("Model/vendor can not be set for a %s." %
                            cls._get_class_label(tolower=True))
    else:
        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   machine_type="nic", compel=True)

    # VLAN interfaces need some special handling
    if interface_type == 'vlan':
        result = _vlan_re.match(name)
        if not result:
            raise ArgumentError("Invalid VLAN interface name '%s'." % name)
        parent_name = result.groups()[0]
        vlan_id = int(result.groups()[1])

        dbparent = None
        for iface in dbhw_ent.interfaces:
            if iface.name == parent_name:
                dbparent = iface
                break
        if not dbparent:
            raise ArgumentError("Parent interface %s for VLAN interface %s "
                                "does not exist, please create it first." %
                                (parent_name, name))

        extra_args["parent"] = dbparent
        extra_args["vlan_id"] = vlan_id

    for key in extra_args.keys():
        if key not in cls.extra_fields:
            raise InternalError("Parameter %s is not valid for %s "
                                "interfaces." % (key, interface_type))

    try:
        dbinterface = cls(name=name, mac=mac, comments=comments, model=dbmodel,
                          default_route=default_route, **extra_args)
    except ValueError, err:
        raise ArgumentError(err)
    def render(
        self,
        session,
        logger,
        interface,
        machine,
        mac,
        model,
        vendor,
        boot,
        pg,
        autopg,
        comments,
        master,
        clear_master,
        default_route,
        rename_to,
        **arguments
    ):
        """This command expects to locate an interface based only on name
        and machine - all other fields, if specified, are meant as updates.

        If the machine has a host, dsdb may need to be updated.

        The boot flag can *only* be set to true.  This is mostly technical,
        as at this point in the interface it is difficult to tell if the
        flag was unset or set to false.  However, it also vastly simplifies
        the dsdb logic - we never have to worry about a user trying to
        remove the boot flag from a host in dsdb.

        """

        audit_results = []

        dbhw_ent = Machine.get_unique(session, machine, compel=True)
        dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent, name=interface, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbhw_ent)

        if arguments.get("hostname", None):
            # Hack to set an intial interface for an aurora host...
            dbhost = dbhw_ent.host
            if dbhost.archetype.name == "aurora" and dbhw_ent.primary_ip and not dbinterface.addresses:
                assign_address(dbinterface, dbhw_ent.primary_ip, dbhw_ent.primary_name.network)

        # We may need extra IP verification (or an autoip option)...
        # This may also throw spurious errors if attempting to set the
        # port_group to a value it already has.
        if pg is not None and dbinterface.port_group != pg.lower().strip():
            dbinterface.port_group = verify_port_group(dbinterface.hardware_entity, pg)
        elif autopg:
            dbinterface.port_group = choose_port_group(session, logger, dbinterface.hardware_entity)
            audit_results.append(("pg", dbinterface.port_group))

        if master:
            if dbinterface.addresses:
                # FIXME: as a special case, if the only address is the
                # primary IP, then we could just move it to the master
                # interface. However this can be worked around by bonding
                # the interface before calling "add host", so don't bother
                # for now.
                raise ArgumentError("Can not enslave {0:l} because it has " "addresses.".format(dbinterface))
            dbmaster = Interface.get_unique(session, hardware_entity=dbhw_ent, name=master, compel=True)
            if dbmaster in dbinterface.all_slaves():
                raise ArgumentError(
                    "Enslaving {0:l} would create a circle, " "which is not allowed.".format(dbinterface)
                )
            dbinterface.master = dbmaster

        if clear_master:
            if not dbinterface.master:
                raise ArgumentError("{0} is not a slave.".format(dbinterface))
            dbinterface.master = None

        if comments:
            dbinterface.comments = comments
        if boot:
            # Should we also transfer the primary IP to the new boot interface?
            # That could get tricky if the new interface already has an IP
            # address...
            for i in dbhw_ent.interfaces:
                if i == dbinterface:
                    i.bootable = True
                    i.default_route = True
                else:
                    i.bootable = False
                    i.default_route = False
        if default_route is not None:
            dbinterface.default_route = default_route
            if not first_of(dbhw_ent.interfaces, lambda x: x.default_route):
                logger.client_info("Warning: {0:l} has no default route, hope " "that's ok.".format(dbhw_ent))

        # Set this mac address last so that you can update to a bootable
        # interface *before* adding a mac address. This is so the validation
        # that takes place in the interface class doesn't have to be worried
        # about the order of update to bootable=True and mac address
        if mac:
            q = session.query(Interface).filter_by(mac=mac)
            other = q.first()
            if other and other != dbinterface:
                raise ArgumentError("MAC address {0} is already in use by " "{1:l}.".format(mac, other))
            dbinterface.mac = mac

        if model or vendor:
            if not dbinterface.model_allowed:
                raise ArgumentError("Model/vendor can not be set for a {0:lc}.".format(dbinterface))

            dbmodel = Model.get_unique(session, name=model, vendor=vendor, machine_type="nic", compel=True)
            dbinterface.model = dbmodel
        if rename_to:
            rename_interface(session, dbinterface, rename_to)

        session.flush()
        session.refresh(dbhw_ent)

        plenary_info = PlenaryMachineInfo(dbhw_ent, logger=logger)
        key = plenary_info.get_write_key()
        try:
            lock_queue.acquire(key)
            plenary_info.write(locked=True)

            if dbhw_ent.host and dbhw_ent.host.archetype.name != "aurora":
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbhw_ent, oldinfo)
                dsdb_runner.commit_or_rollback()
        except AquilonError, err:
            plenary_info.restore_stash()
            raise ArgumentError(err)
Example #32
0
    def render(self, session, logger, machine, model, vendor, serial,
               chassis, slot, clearchassis, multislot,
               vmhost, cluster, allow_metacluster_change,
               cpuname, cpuvendor, cpuspeed, cpucount, memory, ip, uri,
               **arguments):
        dbmachine = Machine.get_unique(session, machine, compel=True)
        oldinfo = DSDBRunner.snapshot_hw(dbmachine)

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if dbmachine.vm_container:
            plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
        if dbmachine.host:
            # Using PlenaryHostData directly, to avoid warnings if the host has
            # not been configured yet
            plenaries.append(PlenaryHostData.get_plenary(dbmachine.host))

        if clearchassis:
            del dbmachine.chassis_slot[:]

        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            dbmachine.location = dbchassis.location
            if slot is None:
                raise ArgumentError("Option --chassis requires --slot "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)
        elif slot is not None:
            dbchassis = None
            for dbslot in dbmachine.chassis_slot:
                if dbchassis and dbslot.chassis != dbchassis:
                    raise ArgumentError("Machine in multiple chassis, please "
                                        "use --chassis argument.")
                dbchassis = dbslot.chassis
            if not dbchassis:
                raise ArgumentError("Option --slot requires --chassis "
                                    "information.")
            self.adjust_slot(session, logger,
                             dbmachine, dbchassis, slot, multislot)

        dblocation = get_location(session, **arguments)
        if dblocation:
            loc_clear_chassis = False
            for dbslot in dbmachine.chassis_slot:
                dbcl = dbslot.chassis.location
                if dbcl != dblocation:
                    if chassis or slot is not None:
                        raise ArgumentError("{0} conflicts with chassis {1!s} "
                                            "location {2}."
                                            .format(dblocation, dbslot.chassis,
                                                    dbcl))
                    else:
                        loc_clear_chassis = True
            if loc_clear_chassis:
                del dbmachine.chassis_slot[:]
            dbmachine.location = dblocation

            if dbmachine.host:
                for vm in dbmachine.host.virtual_machines:
                    plenaries.append(Plenary.get_plenary(vm))
                    vm.location = dblocation

        if model or vendor:
            # If overriding model, should probably overwrite default
            # machine specs as well.
            if not model:
                model = dbmachine.model.name
            if not vendor:
                vendor = dbmachine.model.vendor.name
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       compel=True)
            if not dbmodel.model_type.isMachineType():
                raise ArgumentError("The update_machine command cannot update "
                                    "machines of type %s." %
                                    dbmodel.model_type)
            # We probably could do this by forcing either cluster or
            # location data to be available as appropriate, but really?
            # Failing seems reasonable.
            if dbmodel.model_type != dbmachine.model.model_type and \
               (dbmodel.model_type.isVirtualMachineType() or
                dbmachine.model.model_type.isVirtualMachineType()):
                raise ArgumentError("Cannot change machine from %s to %s." %
                                    (dbmachine.model.model_type,
                                     dbmodel.model_type))

            old_nic_model = dbmachine.model.nic_model
            new_nic_model = dbmodel.nic_model
            if old_nic_model != new_nic_model:
                for iface in dbmachine.interfaces:
                    if iface.model == old_nic_model:
                        iface.model = new_nic_model

            dbmachine.model = dbmodel

        if cpuname or cpuvendor or cpuspeed is not None:
            dbcpu = Cpu.get_unique(session, name=cpuname, vendor=cpuvendor,
                                   speed=cpuspeed, compel=True)
            dbmachine.cpu = dbcpu

        if cpucount is not None:
            dbmachine.cpu_quantity = cpucount
        if memory is not None:
            dbmachine.memory = memory
        if serial:
            dbmachine.serial_no = serial

        if ip:
            update_primary_ip(session, logger, dbmachine, ip)

        if uri and not dbmachine.model.model_type.isVirtualAppliance():
            raise ArgumentError("URI can be specified only for virtual "
                                "appliances and the model's type is %s" %
                                dbmachine.model.model_type)

        if uri:
            dbmachine.uri = uri

        # FIXME: For now, if a machine has its interface(s) in a portgroup
        # this command will need to be followed by an update_interface to
        # re-evaluate the portgroup for overflow.
        # It would be better to have --pg and --autopg options to let it
        # happen at this point.
        if cluster or vmhost:
            if not dbmachine.vm_container:
                raise ArgumentError("Cannot convert a physical machine to "
                                    "virtual.")

            old_holder = dbmachine.vm_container.holder.holder_object
            resholder = get_resource_holder(session, hostname=vmhost,
                                            cluster=cluster, compel=False)
            new_holder = resholder.holder_object

            if self.get_metacluster(new_holder) != self.get_metacluster(old_holder) \
               and not allow_metacluster_change:
                raise ArgumentError("Current {0:l} does not match "
                                    "new {1:l}."
                                    .format(self.get_metacluster(old_holder),
                                            self.get_metacluster(new_holder)))

            plenaries.append(Plenary.get_plenary(old_holder))
            plenaries.append(Plenary.get_plenary(new_holder))

            dbmachine.vm_container.holder = resholder

            for dbdisk in dbmachine.disks:
                if isinstance(dbdisk, VirtualNasDisk):
                    old_share = dbdisk.share
                    if isinstance(old_share.holder, BundleResource):
                        resourcegroup = old_share.holder.resourcegroup.name
                    else:
                        resourcegroup = None

                    new_share = find_resource(Share, new_holder, resourcegroup, old_share.name,
                                           error=ArgumentError)

                    # If the shares are registered at the metacluster level and both
                    # clusters are in the same metacluster, then there will be no
                    # real change here
                    if new_share != old_share:
                        old_share.disks.remove(dbdisk)
                        new_share.disks.append(dbdisk)

                if isinstance(dbdisk, VirtualLocalDisk):
                    old_filesystem = dbdisk.filesystem

                    new_filesystem = find_resource(Filesystem, new_holder, None,
                                                   old_filesystem.name,
                                                   error=ArgumentError)

                    if new_filesystem != old_filesystem:
                        old_filesystem.disks.remove(dbdisk)
                        new_filesystem.disks.append(dbdisk)

            if isinstance(new_holder, Cluster):
                dbmachine.location = new_holder.location_constraint
            else:
                # vmhost
                dbmachine.location = new_holder.hardware_entity.location

        session.flush()

        # Check if the changed parameters still meet cluster capacity
        # requiremets
        if dbmachine.cluster:
            dbmachine.cluster.validate()
            if allow_metacluster_change:
                dbmachine.cluster.metacluster.validate()
        if dbmachine.host and dbmachine.host.cluster:
            dbmachine.host.cluster.validate()

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.

        with plenaries.get_key():
            plenaries.stash()
            try:
                plenaries.write(locked=True)

                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbmachine, oldinfo)
                dsdb_runner.commit_or_rollback("Could not update machine in DSDB")
            except:
                plenaries.restore_stash()
                raise

        return
Example #33
0
def get_or_create_interface(session,
                            dbhw_ent,
                            name=None,
                            mac=None,
                            model=None,
                            vendor=None,
                            interface_type='public',
                            bootable=None,
                            preclude=False,
                            port_group=None,
                            comments=None):
    """
    Look up an existing interface or create a new one.

    If either the name or the MAC address is given and a matching interface
    exists, then that interface is returned and the other parameters are not
    checked.

    If neither the name nor the MAC address is given, but there is just one
    existing interface matching the specified interface_type/bootable/port_group
    combination, then that interface is returned. If there are multiple matches,
    an exception is raised.

    If no interfaces were found, and enough parameters are provided, then a new
    interface is created. For this purpose, at least the name and in some cases
    the MAC address must be specified.

    Setting preclude to True enforces the creation of a new interface. An error
    is raised if a conflicting interface already exists.
    """

    dbinterface = None
    if mac:
        # Look for the MAC globally. If it is present, check that it belongs to
        # the right hardware, and that it does not conflict with the name (if
        # any).
        q = session.query(Interface)
        q = q.filter_by(mac=mac)
        dbinterface = q.first()
        if dbinterface and (dbinterface.hardware_entity != dbhw_ent or
                            (name and dbinterface.name != name)):
            raise ArgumentError(
                "MAC address %s is already in use: %s." %
                (mac, describe_interface(session, dbinterface)))

    if name and not dbinterface:
        # Special logic to allow "add_interface" to succeed if there is an
        # auto-created interface already
        if preclude and len(dbhw_ent.interfaces) == 1:
            dbinterface = dbhw_ent.interfaces[0]
            if dbinterface.mac is None and \
               dbinterface.interface_type == interface_type and \
               dbinterface.comments is not None and \
               dbinterface.comments.startswith("Created automatically"):
                dbinterface.name = name
                dbinterface.mac = mac
                if hasattr(dbinterface, "bootable") and bootable is not None:
                    dbinterface.bootable = bootable
                dbinterface.comments = comments
                if hasattr(dbinterface, "port_group"):
                    dbinterface.port_group = port_group
                session.flush()
                return dbinterface

        dbinterface = None
        for iface in dbhw_ent.interfaces:
            if iface.name == name:
                dbinterface = iface
                break

    if not name and not mac:
        # Time for guessing. If neither the name nor the MAC was given, then
        # there is no guarantee of uniqueness, but we can still return something
        # useful if e.g. there is just one management interface.
        interfaces = []
        for iface in dbhw_ent.interfaces:
            if iface.interface_type != interface_type:
                continue
            if bootable is not None and (not hasattr(iface, "bootable")
                                         or iface.bootable != bootable):
                continue
            if port_group is not None and (not hasattr(iface, "port_group")
                                           or iface.port_group != port_group):
                continue
            interfaces.append(iface)
        if len(interfaces) > 1:
            type_msg = _type_msg(interface_type, bootable)
            raise ArgumentError("{0} has multiple {1} interfaces, please "
                                "specify which one to "
                                "use.".format(dbhw_ent, type_msg))
        elif interfaces:
            dbinterface = interfaces[0]
        else:
            dbinterface = None

    if dbinterface:
        if preclude:
            raise ArgumentError("{0} already exists.".format(dbinterface))
        return dbinterface

    # No suitable interface was found, try to create a new one

    if not name:
        # Not enough information to create it
        type_msg = _type_msg(interface_type, bootable)
        raise ArgumentError("{0} has no {1} interfaces.".format(
            dbhw_ent, type_msg))

    cls = Interface.polymorphic_subclass(interface_type,
                                         "Invalid interface type")
    extra_args = {}
    default_route = False
    if bootable is not None:
        extra_args["bootable"] = bootable
        default_route = bootable
    if port_group is not None:
        extra_args["port_group"] = port_group

    if not model and not vendor:
        dbmodel = dbhw_ent.model.nic_model
    elif not cls.model_allowed:
        raise ArgumentError("Model/vendor can not be set for a %s." %
                            cls._get_class_label(tolower=True))
    else:
        dbmodel = Model.get_unique(session,
                                   name=model,
                                   vendor=vendor,
                                   machine_type="nic",
                                   compel=True)

    # VLAN interfaces need some special handling
    if interface_type == 'vlan':
        result = _vlan_re.match(name)
        if not result:
            raise ArgumentError("Invalid VLAN interface name '%s'." % name)
        parent_name = result.groups()[0]
        vlan_id = int(result.groups()[1])

        dbparent = None
        for iface in dbhw_ent.interfaces:
            if iface.name == parent_name:
                dbparent = iface
                break
        if not dbparent:
            raise ArgumentError("Parent interface %s for VLAN interface %s "
                                "does not exist, please create it first." %
                                (parent_name, name))

        extra_args["parent"] = dbparent
        extra_args["vlan_id"] = vlan_id

    for key in extra_args.keys():
        if key not in cls.extra_fields:
            raise InternalError("Parameter %s is not valid for %s "
                                "interfaces." % (key, interface_type))

    try:
        dbinterface = cls(name=name,
                          mac=mac,
                          comments=comments,
                          model=dbmodel,
                          default_route=default_route,
                          **extra_args)
    except ValueError, err:
        raise ArgumentError(err)
Example #34
0
    def render(self, session, logger, feature, archetype, personality, model,
               vendor, interface, justification, user, **arguments):

        # Binding a feature to a named interface makes sense in the scope of a
        # personality, but not for a whole archetype.
        if interface and not personality:
            raise ArgumentError("Binding to a named interface needs "
                                "a personality.")

        q = session.query(Personality)
        dbarchetype = None

        feature_type = "host"

        justification_required = True

        # Warning: order matters here!
        params = {}
        if personality:
            justification_required = False
            dbpersonality = Personality.get_unique(session,
                                                   name=personality,
                                                   archetype=archetype,
                                                   compel=True)
            params["personality"] = dbpersonality
            if interface:
                params["interface_name"] = interface
                feature_type = "interface"
            dbarchetype = dbpersonality.archetype
            q = q.filter_by(archetype=dbarchetype)
            q = q.filter_by(name=personality)
        elif archetype:
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
            params["archetype"] = dbarchetype
            q = q.filter_by(archetype=dbarchetype)
        else:
            # It's highly unlikely that a feature template would work for
            # _any_ archetype, so disallow this case for now. As I can't
            # rule out that such a case will not have some uses in the
            # future, the restriction is here and not in the model.
            raise ArgumentError("Please specify either an archetype or "
                                "a personality when binding a feature.")

        if model:
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                        compel=True)

            if dbmodel.machine_type == "nic":
                feature_type = "interface"
            else:
                feature_type = "hardware"

            params["model"] = dbmodel

        if dbarchetype and not dbarchetype.is_compileable:
            raise UnimplementedError("Binding features to non-compilable "
                                     "archetypes is not implemented.")

        if not feature_type:  # pragma: no cover
            raise InternalError("Feature type is not known.")

        dbfeature = Feature.get_unique(session, name=feature,
                                       feature_type=feature_type, compel=True)

        cnt = q.count()
        # TODO: should the limit be configurable?
        if justification_required and cnt > 0:
            if not justification:
                raise AuthorizationException(
                      "Changing feature bindings for more "
                      "than just a personality requires --justification.")
            validate_justification(user, justification)

        self.do_link(session, logger, dbfeature, params)
        session.flush()

        idx = 0
        written = 0
        successful = []
        failed = []

        with CompileKey(logger=logger):
            personalities = q.all()

            for personality in personalities:
                idx += 1
                if idx % 1000 == 0:  # pragma: no cover
                    logger.client_info("Processing personality %d of %d..." %
                                       (idx, cnt))

                if not personality.archetype.is_compileable:  # pragma: no cover
                    continue

                try:
                    plenary_personality = PlenaryPersonality(personality)
                    written += plenary_personality.write(locked=True)
                    successful.append(plenary_personality)
                except IncompleteError:
                    pass
                except Exception, err:  # pragma: no cover
                    failed.append("{0} failed: {1}".format(personality, err))

            if failed:  # pragma: no cover
                for plenary in successful:
                    plenary.restore_stash()
                raise PartialError([], failed)
Example #35
0
    def render(self, session, logger, model, vendor, newmodel, newvendor,
               comments, leave_existing, **arguments):
        for (arg, value) in arguments.items():
            # Cleaning the strings isn't strictly necessary but allows
            # for simple equality checks below and removes the need to
            # call refresh().
            if arg in ['newmodel', 'newvendor', 'machine_type',
                       'cpuname', 'cpuvendor', 'disktype', 'diskcontroller',
                       'nicmodel', 'nicvendor']:
                if value is not None:
                    arguments[arg] = value.lower().strip()

        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   compel=True)

        if leave_existing and (newmodel or newvendor):
            raise ArgumentError("Cannot update model name or vendor without "
                                "updating any existing machines.")

        fix_existing = not leave_existing
        dbmachines = set()

        # The sub-branching here is a little difficult to read...
        # Basically, there are three different checks to handle
        # setting a new vendor, a new name, or both.
        if newvendor:
            dbnewvendor = Vendor.get_unique(session, newvendor, compel=True)
            if newmodel:
                Model.get_unique(session, name=newmodel, vendor=dbnewvendor,
                                 preclude=True)
            else:
                Model.get_unique(session, name=dbmodel.name,
                                 vendor=dbnewvendor, preclude=True)
            dbmodel.vendor = dbnewvendor
        if newmodel:
            if not newvendor:
                Model.get_unique(session, name=newmodel, vendor=dbmodel.vendor,
                                 preclude=True)
            dbmodel.name = newmodel
        if newvendor or newmodel:
            q = session.query(Machine).filter_by(model=dbmodel)
            dbmachines.update(q.all())

        # For now, can't update machine_type.  There are too many spots
        # that special case things like aurora_node or virtual_machine to
        # know that the transistion is safe.  If there is enough need we
        # can always add those transitions later.
        if arguments['machine_type'] is not None:
            raise UnimplementedError("Cannot (yet) change a model's "
                                     "machine type.")

        if comments:
            dbmodel.comments = comments
            # The comments also do not affect the templates.

        cpu_args = ['cpuname', 'cpuvendor', 'cpuspeed']
        cpu_info = dict([(self.argument_lookup[arg], arguments[arg])
                         for arg in cpu_args])
        cpu_values = [v for v in cpu_info.values() if v is not None]
        nic_args = ['nicmodel', 'nicvendor']
        nic_info = dict([(self.argument_lookup[arg], arguments[arg])
                         for arg in nic_args])
        nic_values = [v for v in nic_info.values() if v is not None]
        spec_args = ['cpunum', 'memory', 'disktype', 'diskcontroller',
                     'disksize', 'nics']
        specs = dict([(self.argument_lookup[arg], arguments[arg])
                      for arg in spec_args])
        spec_values = [v for v in specs.values() if v is not None]

        if not dbmodel.machine_specs:
            if cpu_values or nic_values or spec_values:
                if not cpu_values or len(spec_values) < len(spec_args):
                    raise ArgumentError("Missing required parameters to store "
                                        "machine specs for the model.  Please "
                                        "give all CPU, disk, RAM, and NIC "
                                        "count information.")
                dbcpu = Cpu.get_unique(session, compel=True, **cpu_info)
                if nic_values:
                    dbnic = Model.get_unique(session, compel=True,
                                             machine_type='nic', **nic_info)
                else:
                    dbnic = Model.default_nic_model(session)
                dbmachine_specs = MachineSpecs(model=dbmodel, cpu=dbcpu,
                                               nic_model=dbnic, **specs)
                session.add(dbmachine_specs)

        # Anything below that updates specs should have been verified above.

        if cpu_values:
            dbcpu = Cpu.get_unique(session, compel=True, **cpu_info)
            self.update_machine_specs(model=dbmodel, dbmachines=dbmachines,
                                      attr='cpu', value=dbcpu,
                                      fix_existing=fix_existing)

        for arg in ['memory', 'cpunum']:
            if arguments[arg] is not None:
                self.update_machine_specs(model=dbmodel, dbmachines=dbmachines,
                                          attr=self.argument_lookup[arg],
                                          value=arguments[arg],
                                          fix_existing=fix_existing)

        if arguments['disktype']:
            if fix_existing:
                raise ArgumentError("Please specify --leave_existing to "
                                    "change the model disktype.  This cannot "
                                    "be converted automatically.")
            dbmodel.machine_specs.disk_type = arguments['disktype']

        for arg in ['diskcontroller', 'disksize']:
            if arguments[arg] is not None:
                self.update_disk_specs(model=dbmodel, dbmachines=dbmachines,
                                       attr=self.argument_lookup[arg],
                                       value=arguments[arg],
                                       fix_existing=fix_existing)

        if nic_values:
            dbnic = Model.get_unique(session, compel=True, **nic_info)
            self.update_interface_specs(model=dbmodel, dbmachines=dbmachines,
                                        value=dbnic, fix_existing=fix_existing)

        if arguments['nics'] is not None:
            dbmodel.machine_specs.nic_count = arguments['nics']

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        for dbmachine in dbmachines:
            plenaries.append(PlenaryMachineInfo(dbmachine, logger=logger))
        plenaries.write()

        return
Example #36
0
    def render(self, session, logger, machine, model, vendor, serial, chassis,
               slot, cpuname, cpuvendor, cpuspeed, cpucount, memory, cluster,
               comments, **arguments):
        dblocation = get_location(session,
                                  query_options=[
                                      subqueryload('parents'),
                                      joinedload('parents.dns_maps')
                                  ],
                                  **arguments)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if slot is None:
                raise ArgumentError("The --chassis option requires a --slot.")
            if dblocation and dblocation != dbchassis.location:
                raise ArgumentError("{0} conflicts with chassis location "
                                    "{1}.".format(dblocation,
                                                  dbchassis.location))
            dblocation = dbchassis.location
        elif slot is not None:
            raise ArgumentError("The --slot option requires a --chassis.")

        dbmodel = Model.get_unique(session,
                                   name=model,
                                   vendor=vendor,
                                   compel=True)

        if dbmodel.machine_type not in [
                'blade', 'rackmount', 'workstation', 'aurora_node',
                'virtual_machine'
        ]:
            raise ArgumentError("The add_machine command cannot add machines "
                                "of type %(type)s.  Try 'add %(type)s'." %
                                {"type": dbmodel.machine_type})

        if cluster:
            if dbmodel.machine_type != 'virtual_machine':
                raise ArgumentError("Only virtual machines can have a cluster "
                                    "attribute.")
            dbcluster = Cluster.get_unique(session,
                                           cluster,
                                           compel=ArgumentError)
            # This test could be either archetype or cluster_type
            if dbcluster.personality.archetype.name != 'esx_cluster':
                raise ArgumentError("Can only add virtual machines to "
                                    "clusters with archetype esx_cluster.")
            # TODO implement the same to vmhosts.
            if dbcluster.status.name == 'decommissioned':
                raise ArgumentError("Cannot add virtual machines to "
                                    "decommissioned clusters.")
            if dblocation and dbcluster.location_constraint != dblocation:
                raise ArgumentError("Cannot override cluster location {0} "
                                    "with location {1}.".format(
                                        dbcluster.location_constraint,
                                        dblocation))
            dblocation = dbcluster.location_constraint
        elif dbmodel.machine_type == 'virtual_machine':
            raise ArgumentError("Virtual machines must be assigned to a "
                                "cluster.")

        Machine.get_unique(session, machine, preclude=True)
        dbmachine = create_machine(session, machine, dblocation, dbmodel,
                                   cpuname, cpuvendor, cpuspeed, cpucount,
                                   memory, serial, comments)

        if chassis:
            # FIXME: Are virtual machines allowed to be in a chassis?
            dbslot = session.query(ChassisSlot).filter_by(
                chassis=dbchassis, slot_number=slot).first()
            if not dbslot:
                dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot)
            dbslot.machine = dbmachine
            session.add(dbslot)

        if cluster:
            if not dbcluster.resholder:
                dbcluster.resholder = ClusterResource(cluster=dbcluster)
            dbvm = VirtualMachine(machine=dbmachine,
                                  name=dbmachine.label,
                                  holder=dbcluster.resholder)
            dbcluster.validate()

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if cluster:
            plenaries.append(Plenary.get_plenary(dbcluster))
            plenaries.append(Plenary.get_plenary(dbvm))

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.write()
        return
Example #37
0
    def render(self, session, logger, switch, model, rack, type, ip, vendor,
               serial, rename_to, discovered_macs, clear, discover, comments, **arguments):
        dbswitch = Switch.get_unique(session, switch, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbswitch)

        if discover:
            discover_switch(session, logger, self.config, dbswitch, False)

        if vendor and not model:
            model = dbswitch.model.name
        if model:
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       machine_type='switch', compel=True)
            dbswitch.model = dbmodel

        dblocation = get_location(session, rack=rack)
        if dblocation:
            dbswitch.location = dblocation

        if serial is not None:
            dbswitch.serial_no = serial

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        if type:
            Switch.check_type(type)
            dbswitch.switch_type = type

        if ip:
            update_primary_ip(session, dbswitch, ip)

        if comments is not None:
            dbswitch.comments = comments

        remove_plenary = None
        if rename_to:
            # Handling alias renaming would not be difficult in AQDB, but the
            # DSDB synchronization would be painful, so don't do that for now.
            # In theory we should check all configured IP addresses for aliases,
            # but this is the most common case
            if dbswitch.primary_name and dbswitch.primary_name.fqdn.aliases:
                raise ArgumentError("The switch has aliases and it cannot be "
                                    "renamed. Please remove all aliases first.")
            remove_plenary = Plenary.get_plenary(dbswitch, logger=logger)
            rename_hardware(session, dbswitch, rename_to)

        if clear:
            session.query(ObservedMac).filter_by(switch=dbswitch).delete()

        if discovered_macs:
            now = datetime.now()
            for (macaddr, port) in discovered_macs:
                update_or_create_observed_mac(session, dbswitch, port, macaddr, now)

        session.flush()

        switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)

        key = switch_plenary.get_write_key()
        if remove_plenary:
            key = CompileKey.merge([key, remove_plenary.get_remove_key()])
        try:
            lock_queue.acquire(key)
            if remove_plenary:
                remove_plenary.stash()
                remove_plenary.remove(locked=True)
            switch_plenary.write(locked=True)

            dsdb_runner = DSDBRunner(logger=logger)
            dsdb_runner.update_host(dbswitch, oldinfo)
            dsdb_runner.commit_or_rollback("Could not update switch in DSDB")
        except:
            if remove_plenary:
                remove_plenary.restore_stash()
            switch_plenary.restore_stash()
            raise
        finally:
            lock_queue.release(key)

        return
Example #38
0
class CommandAddAuroraHost(CommandAddHost):

    required_parameters = ["hostname"]

    # Look for node name like <building><rack_id>c<chassis_id>n<node_num>
    nodename_re = re.compile(r'^\s*([a-zA-Z]+)(\d+)c(\d+)n(\d+)\s*$')

    def render(self, session, logger, hostname, osname, osversion, **kwargs):
        # Pull relevant info out of dsdb...
        dsdb_runner = DSDBRunner(logger=logger)
        try:
            fields = dsdb_runner.show_host(hostname)
        except ProcessException, e:
            raise ArgumentError("Could not find %s in DSDB: %s" %
                                (hostname, e))

        fqdn = fields["fqdn"]
        dsdb_lookup = fields["dsdb_lookup"]
        if fields["node"]:
            machine = fields["node"]
        elif fields["primary_name"]:
            machine = fields["primary_name"]
        else:
            machine = dsdb_lookup

        # Create a machine
        dbmodel = Model.get_unique(session, name="aurora_model",
                                   vendor="aurora_vendor", compel=True)
        dbmachine = Machine.get_unique(session, machine)
        dbslot = None
        if not dbmachine:
            m = self.nodename_re.search(machine)
            if m:
                (building, rid, cid, nodenum) = m.groups()
                dbbuilding = session.query(Building).filter_by(
                        name=building).first()
                if not dbbuilding:
                    raise ArgumentError("Failed to find building %s for "
                                        "node %s, please add an Aurora "
                                        "machine manually and follow with "
                                        "add_host." % (building, machine))
                rack = building + rid
                dbrack = session.query(Rack).filter_by(name=rack).first()
                if not dbrack:
                    try:
                        rack_fields = dsdb_runner.show_rack(rack)
                        dbrack = Rack(name=rack, fullname=rack,
                                      parent=dbbuilding,
                                      rack_row=rack_fields["rack_row"],
                                      rack_column=rack_fields["rack_col"])
                        session.add(dbrack)
                    except (ProcessException, ValueError), e:
                        logger.client_info("Rack %s not defined in DSDB." % rack)
                dblocation = dbrack or dbbuilding
                chassis = rack + "c" + cid
                dbdns_domain = session.query(DnsDomain).filter_by(
                        name="ms.com").first()
                dbchassis = Chassis.get_unique(session, chassis)
                if not dbchassis:
                    dbchassis_model = Model.get_unique(session,
                                                       name="aurora_chassis_model",
                                                       vendor="aurora_vendor",
                                                       compel=True)
                    dbchassis = Chassis(label=chassis, location=dblocation,
                                        model=dbchassis_model)
                    session.add(dbchassis)
                    dbfqdn = Fqdn.get_or_create(session, name=chassis,
                                                dns_domain=dbdns_domain,
                                                preclude=True)
                    dbdns_rec = ReservedName(fqdn=dbfqdn)
                    session.add(dbdns_rec)
                    dbchassis.primary_name = dbdns_rec
                dbslot = session.query(ChassisSlot).filter_by(
                        chassis=dbchassis, slot_number=nodenum).first()
                # Note: Could be even more persnickity here and check that
                # the slot is currently empty.  Seems like overkill.
                if not dbslot:
                    dbslot = ChassisSlot(chassis=dbchassis,
                                         slot_number=nodenum)
                    session.add(dbslot)
            else:
                dbnet_env = NetworkEnvironment.get_unique_or_default(session)

                try:
                    host_ip = gethostbyname(hostname)
                except gaierror, e:
                    raise ArgumentError("Error when looking up host: %d, %s" %
                                        (e.errno, e.strerror))

                dbnetwork = get_net_id_from_ip(session, IPv4Address(host_ip), dbnet_env)
                dblocation = dbnetwork.location.building
Example #39
0
    def render(self, session, logger, machine, model, vendor, serial, chassis,
               slot, cpuname, cpuvendor, cpuspeed, cpucount, memory, cluster,
               vmhost, uri, comments, **arguments):
        dblocation = get_location(session,
                                  query_options=[subqueryload('parents'),
                                                 joinedload('parents.dns_maps')],
                                  **arguments)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if slot is None:
                raise ArgumentError("The --chassis option requires a --slot.")
            if dblocation and dblocation != dbchassis.location:
                raise ArgumentError("{0} conflicts with chassis location "
                                    "{1}.".format(dblocation, dbchassis.location))
            dblocation = dbchassis.location
        elif slot is not None:
            raise ArgumentError("The --slot option requires a --chassis.")

        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   compel=True)

        if not dbmodel.model_type.isMachineType():
            raise ArgumentError("The add_machine command cannot add machines "
                                "of type %s." % str(dbmodel.model_type))

        vmholder = None

        if cluster or vmhost:
            if cluster and vmhost:
                raise ArgumentError("Cluster and vmhost cannot be specified "
                                    "together.")
            if not dbmodel.model_type.isVirtualMachineType():
                raise ArgumentError("{0} is not a virtual machine."
                                    .format(dbmodel))

            # TODO: do we need VMs inside resource groups?
            vmholder = get_resource_holder(session, hostname=vmhost,
                                           cluster=cluster, resgroup=None,
                                           compel=False)

            if vmholder.holder_object.status.name == 'decommissioned':
                raise ArgumentError("Cannot add virtual machines to "
                                    "decommissioned clusters.")

            if cluster:
                container_loc = vmholder.holder_object.location_constraint
            else:
                container_loc = vmholder.holder_object.hardware_entity.location

            if dblocation and dblocation != container_loc:
                raise ArgumentError("Cannot override container location {0} "
                                    "with location {1}.".format(container_loc,
                                                                dblocation))
            dblocation = container_loc
        elif dbmodel.model_type.isVirtualMachineType():
            raise ArgumentError("Virtual machines must be assigned to a "
                                "cluster or a host.")

        Machine.get_unique(session, machine, preclude=True)
        dbmachine = create_machine(session, machine, dblocation, dbmodel,
                                   cpuname, cpuvendor, cpuspeed, cpucount,
                                   memory, serial, comments)

        if uri and not dbmodel.model_type.isVirtualAppliance():
            raise ArgumentError("URI can be specified only for virtual "
                                "appliances and the model's type is %s." %
                                dbmodel.model_type)

        dbmachine.uri = uri

        if chassis:
            # FIXME: Are virtual machines allowed to be in a chassis?
            dbslot = session.query(ChassisSlot).filter_by(chassis=dbchassis,
                                                          slot_number=slot).first()
            if not dbslot:
                dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot)
            dbslot.machine = dbmachine
            session.add(dbslot)

        if vmholder:
            dbvm = VirtualMachine(machine=dbmachine, name=dbmachine.label,
                                  holder=vmholder)
            if hasattr(vmholder.holder_object, "validate") and \
               callable(vmholder.holder_object.validate):
                vmholder.holder_object.validate()

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if vmholder:
            plenaries.append(Plenary.get_plenary(vmholder.holder_object))
            plenaries.append(Plenary.get_plenary(dbvm))

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.write()
        return
Example #40
0
    def render(self, session, logger, hostname, machine, archetype,
               buildstatus, personality, osname, osversion, service, instance,
               model, machine_type, vendor, serial, cluster,
               guest_on_cluster, guest_on_share, member_cluster_share,
               domain, sandbox, branch, sandbox_owner,
               dns_domain, shortname, mac, ip, networkip, network_environment,
               exact_location, server_of_service, server_of_instance, grn,
               eon_id, fullinfo, **arguments):
        dbnet_env = NetworkEnvironment.get_unique_or_default(session,
                                                             network_environment)

        q = session.query(Host)

        if machine:
            dbmachine = Machine.get_unique(session, machine, compel=True)
            q = q.filter_by(machine=dbmachine)

        # Add the machine definition and the primary name. Use aliases to make
        # sure the end result will be ordered by primary name.
        PriDns = aliased(DnsRecord)
        PriFqdn = aliased(Fqdn)
        PriDomain = aliased(DnsDomain)
        q = q.join(Machine,
                   (PriDns, PriDns.id == Machine.primary_name_id),
                   (PriFqdn, PriDns.fqdn_id == PriFqdn.id),
                   (PriDomain, PriFqdn.dns_domain_id == PriDomain.id))
        q = q.order_by(PriFqdn.name, PriDomain.name)
        q = q.options(contains_eager('machine'),
                      contains_eager('machine.primary_name', alias=PriDns),
                      contains_eager('machine.primary_name.fqdn', alias=PriFqdn),
                      contains_eager('machine.primary_name.fqdn.dns_domain',
                                     alias=PriDomain))
        q = q.reset_joinpoint()

        # Hardware-specific filters
        dblocation = get_location(session, **arguments)
        if dblocation:
            if exact_location:
                q = q.filter(Machine.location == dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))

        if model or vendor or machine_type:
            subq = Model.get_matching_query(session, name=model, vendor=vendor,
                                            machine_type=machine_type,
                                            compel=True)
            q = q.filter(Machine.model_id.in_(subq))

        if serial:
            self.deprecated_option("serial", "Please use search machine --serial instead.",
                logger=logger, **arguments)
            q = q.filter(Machine.serial_no == serial)

        # DNS IP address related filters
        if mac or ip or networkip or hostname or dns_domain or shortname:
            # Inner joins are cheaper than outer joins, so make some effort to
            # use inner joins when possible
            if mac or ip or networkip:
                q = q.join(Interface)
            else:
                q = q.outerjoin(Interface)
            if ip or networkip:
                q = q.join(AddressAssignment, Network, from_joinpoint=True)
            else:
                q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True)

            if mac:
                self.deprecated_option("mac", "Please use search machine "
                                       "--mac instead.", logger=logger,
                                       **arguments)
                q = q.filter(Interface.mac == mac)
            if ip:
                q = q.filter(AddressAssignment.ip == ip)
                q = q.filter(Network.network_environment == dbnet_env)
            if networkip:
                dbnetwork = get_network_byip(session, networkip, dbnet_env)
                q = q.filter(AddressAssignment.network == dbnetwork)

            dbdns_domain = None
            if hostname:
                (shortname, dbdns_domain) = parse_fqdn(session, hostname)
            if dns_domain:
                dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True)

            if shortname or dbdns_domain:
                ARecAlias = aliased(ARecord)
                ARecFqdn = aliased(Fqdn)

                q = q.outerjoin((ARecAlias,
                                 and_(ARecAlias.ip == AddressAssignment.ip,
                                      ARecAlias.network_id == AddressAssignment.network_id)),
                                (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id))
                if shortname:
                    q = q.filter(or_(ARecFqdn.name == shortname,
                                     PriFqdn.name == shortname))
                if dbdns_domain:
                    q = q.filter(or_(ARecFqdn.dns_domain == dbdns_domain,
                                     PriFqdn.dns_domain == dbdns_domain))
            q = q.reset_joinpoint()

        (dbbranch, dbauthor) = get_branch_and_author(session, logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if sandbox_owner:
            dbauthor = get_user_principal(session, sandbox_owner)

        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = HostLifecycle.get_unique(session, buildstatus,
                                                     compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if osname and osversion and archetype:
            # archetype was already resolved above
            dbos = OperatingSystem.get_unique(session, name=osname,
                                              version=osversion,
                                              archetype=dbarchetype,
                                              compel=True)
            q = q.filter_by(operating_system=dbos)
        elif osname or osversion:
            q = q.join('operating_system')
            if osname:
                q = q.filter_by(name=osname)
            if osversion:
                q = q.filter_by(version=osversion)
            q = q.reset_joinpoint()

        if service:
            dbservice = Service.get_unique(session, service, compel=True)
            if instance:
                dbsi = get_service_instance(session, dbservice, instance)
                q = q.filter(Host.services_used.contains(dbsi))
            else:
                q = q.join('services_used')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('services_used')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if server_of_service:
            dbserver_service = Service.get_unique(session, server_of_service,
                                                  compel=True)
            if server_of_instance:
                dbssi = get_service_instance(session, dbserver_service,
                                             server_of_instance)
                q = q.join('_services_provided')
                q = q.filter_by(service_instance=dbssi)
                q = q.reset_joinpoint()
            else:
                q = q.join('_services_provided', 'service_instance')
                q = q.filter_by(service=dbserver_service)
                q = q.reset_joinpoint()
        elif server_of_instance:
            q = q.join('_services_provided', 'service_instance')
            q = q.filter_by(name=server_of_instance)
            q = q.reset_joinpoint()

        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if isinstance(dbcluster, MetaCluster):
                q = q.join('_cluster', 'cluster', '_metacluster')
                q = q.filter_by(metacluster=dbcluster)
            else:
                q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_cluster:
            # TODO: this does not handle metaclusters according to Wes
            dbcluster = Cluster.get_unique(session, guest_on_cluster,
                                           compel=True)
            q = q.join('machine', VirtualMachine, ClusterResource)
            q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_share:
            #v2
            v2shares = session.query(Share.id).filter_by(name=guest_on_share).all()
            if not v2shares:
                raise NotFoundException("No shares found with name {0}."
                                        .format(guest_on_share))

            NasAlias = aliased(VirtualDisk)
            q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(
                NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if member_cluster_share:
            #v2
            v2shares = session.query(Share.id).filter_by(name=member_cluster_share).all()
            if not v2shares:
                raise NotFoundException("No shares found with name {0}."
                                        .format(guest_on_share))

            NasAlias = aliased(VirtualDisk)

            q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine,
                       'machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(
                NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if grn or eon_id:
            dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False)

            persq = session.query(Personality.id)
            persq = persq.outerjoin(PersonalityGrnMap)
            persq = persq.filter(or_(Personality.owner_eon_id == dbgrn.eon_id,
                                     PersonalityGrnMap.eon_id == dbgrn.eon_id))
            q = q.outerjoin(HostGrnMap)
            q = q.filter(or_(Host.owner_eon_id == dbgrn.eon_id,
                             HostGrnMap.eon_id == dbgrn.eon_id,
                             Host.personality_id.in_(persq.subquery())))
            q = q.reset_joinpoint()

        if fullinfo:
            return q.all()
        return SimpleHostList(q.all())
Example #41
0
    def render(self, session, logger, interface, machine, mac, model, vendor,
               boot, pg, autopg, comments, master, clear_master, default_route,
               rename_to, **arguments):
        """This command expects to locate an interface based only on name
        and machine - all other fields, if specified, are meant as updates.

        If the machine has a host, dsdb may need to be updated.

        The boot flag can *only* be set to true.  This is mostly technical,
        as at this point in the interface it is difficult to tell if the
        flag was unset or set to false.  However, it also vastly simplifies
        the dsdb logic - we never have to worry about a user trying to
        remove the boot flag from a host in dsdb.

        """

        audit_results = []

        dbhw_ent = Machine.get_unique(session, machine, compel=True)
        dbinterface = Interface.get_unique(session,
                                           hardware_entity=dbhw_ent,
                                           name=interface,
                                           compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbhw_ent)

        if arguments.get('hostname', None):
            # Hack to set an intial interface for an aurora host...
            dbhost = dbhw_ent.host
            if dbhost.archetype.name == 'aurora' and \
               dbhw_ent.primary_ip and not dbinterface.addresses:
                assign_address(dbinterface, dbhw_ent.primary_ip,
                               dbhw_ent.primary_name.network)

        # We may need extra IP verification (or an autoip option)...
        # This may also throw spurious errors if attempting to set the
        # port_group to a value it already has.
        if pg is not None and dbinterface.port_group != pg.lower().strip():
            dbinterface.port_group = verify_port_group(
                dbinterface.hardware_entity, pg)
        elif autopg:
            dbinterface.port_group = choose_port_group(
                session, logger, dbinterface.hardware_entity)
            audit_results.append(('pg', dbinterface.port_group))

        if master:
            if dbinterface.addresses:
                # FIXME: as a special case, if the only address is the
                # primary IP, then we could just move it to the master
                # interface. However this can be worked around by bonding
                # the interface before calling "add host", so don't bother
                # for now.
                raise ArgumentError("Can not enslave {0:l} because it has "
                                    "addresses.".format(dbinterface))
            dbmaster = Interface.get_unique(session,
                                            hardware_entity=dbhw_ent,
                                            name=master,
                                            compel=True)
            if dbmaster in dbinterface.all_slaves():
                raise ArgumentError(
                    "Enslaving {0:l} would create a circle, "
                    "which is not allowed.".format(dbinterface))
            dbinterface.master = dbmaster

        if clear_master:
            if not dbinterface.master:
                raise ArgumentError("{0} is not a slave.".format(dbinterface))
            dbinterface.master = None

        if comments:
            dbinterface.comments = comments
        if boot:
            # Should we also transfer the primary IP to the new boot interface?
            # That could get tricky if the new interface already has an IP
            # address...
            for i in dbhw_ent.interfaces:
                if i == dbinterface:
                    i.bootable = True
                    i.default_route = True
                else:
                    i.bootable = False
                    i.default_route = False
        if default_route is not None:
            dbinterface.default_route = default_route
            if not first_of(dbhw_ent.interfaces, lambda x: x.default_route):
                logger.client_info("Warning: {0:l} has no default route, hope "
                                   "that's ok.".format(dbhw_ent))

        #Set this mac address last so that you can update to a bootable
        #interface *before* adding a mac address. This is so the validation
        #that takes place in the interface class doesn't have to be worried
        #about the order of update to bootable=True and mac address
        if mac:
            q = session.query(Interface).filter_by(mac=mac)
            other = q.first()
            if other and other != dbinterface:
                raise ArgumentError("MAC address {0} is already in use by "
                                    "{1:l}.".format(mac, other))
            dbinterface.mac = mac

        if model or vendor:
            if not dbinterface.model_allowed:
                raise ArgumentError(
                    "Model/vendor can not be set for a {0:lc}.".format(
                        dbinterface))

            dbmodel = Model.get_unique(session,
                                       name=model,
                                       vendor=vendor,
                                       machine_type='nic',
                                       compel=True)
            dbinterface.model = dbmodel
        if rename_to:
            rename_interface(session, dbinterface, rename_to)

        session.flush()
        session.refresh(dbhw_ent)

        plenary_info = PlenaryMachineInfo(dbhw_ent, logger=logger)
        key = plenary_info.get_write_key()
        try:
            lock_queue.acquire(key)
            plenary_info.write(locked=True)

            if dbhw_ent.host and dbhw_ent.host.archetype.name != "aurora":
                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbhw_ent, oldinfo)
                dsdb_runner.commit_or_rollback()
        except AquilonError, err:
            plenary_info.restore_stash()
            raise ArgumentError(err)
Example #42
0
    def render(self, session, logger, model, vendor, newmodel, newvendor,
               comments, leave_existing, **arguments):
        for (arg, value) in arguments.items():
            # Cleaning the strings isn't strictly necessary but allows
            # for simple equality checks below and removes the need to
            # call refresh().
            if arg in [
                    'newmodel', 'newvendor', 'machine_type', 'cpuname',
                    'cpuvendor', 'disktype', 'diskcontroller', 'nicmodel',
                    'nicvendor'
            ]:
                if value is not None:
                    arguments[arg] = value.lower().strip()

        dbmodel = Model.get_unique(session,
                                   name=model,
                                   vendor=vendor,
                                   compel=True)

        if leave_existing and (newmodel or newvendor):
            raise ArgumentError("Cannot update model name or vendor without "
                                "updating any existing machines.")

        fix_existing = not leave_existing
        dbmachines = set()

        # The sub-branching here is a little difficult to read...
        # Basically, there are three different checks to handle
        # setting a new vendor, a new name, or both.
        if newvendor:
            dbnewvendor = Vendor.get_unique(session, newvendor, compel=True)
            if newmodel:
                Model.get_unique(session,
                                 name=newmodel,
                                 vendor=dbnewvendor,
                                 preclude=True)
            else:
                Model.get_unique(session,
                                 name=dbmodel.name,
                                 vendor=dbnewvendor,
                                 preclude=True)
            dbmodel.vendor = dbnewvendor
        if newmodel:
            if not newvendor:
                Model.get_unique(session,
                                 name=newmodel,
                                 vendor=dbmodel.vendor,
                                 preclude=True)
            dbmodel.name = newmodel
        if newvendor or newmodel:
            q = session.query(Machine).filter_by(model=dbmodel)
            dbmachines.update(q.all())

        # For now, can't update machine_type.  There are too many spots
        # that special case things like aurora_node or virtual_machine to
        # know that the transistion is safe.  If there is enough need we
        # can always add those transitions later.
        if arguments['machine_type'] is not None:
            raise UnimplementedError("Cannot (yet) change a model's "
                                     "machine type.")

        if comments:
            dbmodel.comments = comments
            # The comments also do not affect the templates.

        cpu_args = ['cpuname', 'cpuvendor', 'cpuspeed']
        cpu_info = dict([(self.argument_lookup[arg], arguments[arg])
                         for arg in cpu_args])
        cpu_values = [v for v in cpu_info.values() if v is not None]
        nic_args = ['nicmodel', 'nicvendor']
        nic_info = dict([(self.argument_lookup[arg], arguments[arg])
                         for arg in nic_args])
        nic_values = [v for v in nic_info.values() if v is not None]
        spec_args = [
            'cpunum', 'memory', 'disktype', 'diskcontroller', 'disksize',
            'nics'
        ]
        specs = dict([(self.argument_lookup[arg], arguments[arg])
                      for arg in spec_args])
        spec_values = [v for v in specs.values() if v is not None]

        if not dbmodel.machine_specs:
            if cpu_values or nic_values or spec_values:
                if not cpu_values or len(spec_values) < len(spec_args):
                    raise ArgumentError("Missing required parameters to store "
                                        "machine specs for the model.  Please "
                                        "give all CPU, disk, RAM, and NIC "
                                        "count information.")
                dbcpu = Cpu.get_unique(session, compel=True, **cpu_info)
                if nic_values:
                    dbnic = Model.get_unique(session,
                                             compel=True,
                                             machine_type='nic',
                                             **nic_info)
                else:
                    dbnic = Model.default_nic_model(session)
                dbmachine_specs = MachineSpecs(model=dbmodel,
                                               cpu=dbcpu,
                                               nic_model=dbnic,
                                               **specs)
                session.add(dbmachine_specs)

        # Anything below that updates specs should have been verified above.

        if cpu_values:
            dbcpu = Cpu.get_unique(session, compel=True, **cpu_info)
            self.update_machine_specs(model=dbmodel,
                                      dbmachines=dbmachines,
                                      attr='cpu',
                                      value=dbcpu,
                                      fix_existing=fix_existing)

        for arg in ['memory', 'cpunum']:
            if arguments[arg] is not None:
                self.update_machine_specs(model=dbmodel,
                                          dbmachines=dbmachines,
                                          attr=self.argument_lookup[arg],
                                          value=arguments[arg],
                                          fix_existing=fix_existing)

        if arguments['disktype']:
            if fix_existing:
                raise ArgumentError("Please specify --leave_existing to "
                                    "change the model disktype.  This cannot "
                                    "be converted automatically.")
            dbmodel.machine_specs.disk_type = arguments['disktype']

        for arg in ['diskcontroller', 'disksize']:
            if arguments[arg] is not None:
                self.update_disk_specs(model=dbmodel,
                                       dbmachines=dbmachines,
                                       attr=self.argument_lookup[arg],
                                       value=arguments[arg],
                                       fix_existing=fix_existing)

        if nic_values:
            dbnic = Model.get_unique(session, compel=True, **nic_info)
            self.update_interface_specs(model=dbmodel,
                                        dbmachines=dbmachines,
                                        value=dbnic,
                                        fix_existing=fix_existing)

        if arguments['nics'] is not None:
            dbmodel.machine_specs.nic_count = arguments['nics']

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        for dbmachine in dbmachines:
            plenaries.append(PlenaryMachineInfo(dbmachine, logger=logger))
        plenaries.write()

        return
Example #43
0
    def render(self, session, logger, network_device, model, type, ip, vendor,
               serial, rename_to, discovered_macs, clear, discover, comments,
               **arguments):
        dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbnetdev)
        plenary = Plenary.get_plenary(dbnetdev, logger=logger)

        if discover:
            discover_network_device(session, logger, self.config,
                                    dbnetdev, False)

        if vendor and not model:
            model = dbnetdev.model.name
        if model:
            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       model_type=NetworkDeviceType.Switch,
                                       compel=True)
            dbnetdev.model = dbmodel

        dblocation = get_location(session, **arguments)
        if dblocation:
            dbnetdev.location = dblocation

        if serial is not None:
            dbnetdev.serial_no = serial

        # FIXME: What do the error messages for an invalid enum (switch_type)
        # look like?
        if type:
            NetworkDevice.check_type(type)
            dbnetdev.switch_type = type

        if ip:
            update_primary_ip(session, logger, dbnetdev, ip)

        if comments is not None:
            dbnetdev.comments = comments

        if rename_to:
            # Handling alias renaming would not be difficult in AQDB, but the
            # DSDB synchronization would be painful, so don't do that for now.
            # In theory we should check all configured IP addresses for aliases,
            # but this is the most common case
            if dbnetdev.primary_name and dbnetdev.primary_name.fqdn.aliases:
                raise ArgumentError("The network device has aliases and it cannot be "
                                    "renamed. Please remove all aliases first.")
            rename_hardware(session, dbnetdev, rename_to)

        if clear:
            session.query(ObservedMac).filter_by(network_device=dbnetdev).delete()

        if discovered_macs:
            now = datetime.now()
            for (macaddr, port) in discovered_macs:
                update_or_create_observed_mac(session, dbnetdev, port,
                                              macaddr, now)

        session.flush()

        with plenary.get_key():
            plenary.stash()
            try:
                plenary.write(locked=True)

                dsdb_runner = DSDBRunner(logger=logger)
                dsdb_runner.update_host(dbnetdev, oldinfo)
                dsdb_runner.commit_or_rollback("Could not update network device in DSDB")
            except:
                plenary.restore_stash()
                raise

        return
Example #44
0
    def render(self, session, logger, hostname, machine, archetype,
               buildstatus, personality, osname, osversion, service, instance,
               model, machine_type, vendor, serial, cluster, guest_on_cluster,
               guest_on_share, member_cluster_share, domain, sandbox, branch,
               sandbox_owner, dns_domain, shortname, mac, ip, networkip,
               network_environment, exact_location, server_of_service,
               server_of_instance, grn, eon_id, fullinfo, **arguments):
        dbnet_env = NetworkEnvironment.get_unique_or_default(
            session, network_environment)

        q = session.query(Host)

        if machine:
            dbmachine = Machine.get_unique(session, machine, compel=True)
            q = q.filter_by(machine=dbmachine)

        # Add the machine definition and the primary name. Use aliases to make
        # sure the end result will be ordered by primary name.
        PriDns = aliased(DnsRecord)
        PriFqdn = aliased(Fqdn)
        PriDomain = aliased(DnsDomain)
        q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id),
                   (PriFqdn, PriDns.fqdn_id == PriFqdn.id),
                   (PriDomain, PriFqdn.dns_domain_id == PriDomain.id))
        q = q.order_by(PriFqdn.name, PriDomain.name)
        q = q.options(
            contains_eager('machine'),
            contains_eager('machine.primary_name', alias=PriDns),
            contains_eager('machine.primary_name.fqdn', alias=PriFqdn),
            contains_eager('machine.primary_name.fqdn.dns_domain',
                           alias=PriDomain))
        q = q.reset_joinpoint()

        # Hardware-specific filters
        dblocation = get_location(session, **arguments)
        if dblocation:
            if exact_location:
                q = q.filter(Machine.location == dblocation)
            else:
                childids = dblocation.offspring_ids()
                q = q.filter(Machine.location_id.in_(childids))

        if model or vendor or machine_type:
            subq = Model.get_matching_query(session,
                                            name=model,
                                            vendor=vendor,
                                            machine_type=machine_type,
                                            compel=True)
            q = q.filter(Machine.model_id.in_(subq))

        if serial:
            self.deprecated_option(
                "serial",
                "Please use search machine --serial instead.",
                logger=logger,
                **arguments)
            q = q.filter(Machine.serial_no == serial)

        # DNS IP address related filters
        if mac or ip or networkip or hostname or dns_domain or shortname:
            # Inner joins are cheaper than outer joins, so make some effort to
            # use inner joins when possible
            if mac or ip or networkip:
                q = q.join(Interface)
            else:
                q = q.outerjoin(Interface)
            if ip or networkip:
                q = q.join(AddressAssignment, Network, from_joinpoint=True)
            else:
                q = q.outerjoin(AddressAssignment,
                                Network,
                                from_joinpoint=True)

            if mac:
                self.deprecated_option("mac", "Please use search machine "
                                       "--mac instead.",
                                       logger=logger,
                                       **arguments)
                q = q.filter(Interface.mac == mac)
            if ip:
                q = q.filter(AddressAssignment.ip == ip)
                q = q.filter(Network.network_environment == dbnet_env)
            if networkip:
                dbnetwork = get_network_byip(session, networkip, dbnet_env)
                q = q.filter(AddressAssignment.network == dbnetwork)

            dbdns_domain = None
            if hostname:
                (shortname, dbdns_domain) = parse_fqdn(session, hostname)
            if dns_domain:
                dbdns_domain = DnsDomain.get_unique(session,
                                                    dns_domain,
                                                    compel=True)

            if shortname or dbdns_domain:
                ARecAlias = aliased(ARecord)
                ARecFqdn = aliased(Fqdn)

                q = q.outerjoin(
                    (ARecAlias,
                     and_(ARecAlias.ip == AddressAssignment.ip,
                          ARecAlias.network_id
                          == AddressAssignment.network_id)),
                    (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id))
                if shortname:
                    q = q.filter(
                        or_(ARecFqdn.name == shortname,
                            PriFqdn.name == shortname))
                if dbdns_domain:
                    q = q.filter(
                        or_(ARecFqdn.dns_domain == dbdns_domain,
                            PriFqdn.dns_domain == dbdns_domain))
            q = q.reset_joinpoint()

        (dbbranch, dbauthor) = get_branch_and_author(session,
                                                     logger,
                                                     domain=domain,
                                                     sandbox=sandbox,
                                                     branch=branch)
        if sandbox_owner:
            dbauthor = get_user_principal(session, sandbox_owner)

        if dbbranch:
            q = q.filter_by(branch=dbbranch)
        if dbauthor:
            q = q.filter_by(sandbox_author=dbauthor)

        if archetype:
            # Added to the searches as appropriate below.
            dbarchetype = Archetype.get_unique(session, archetype, compel=True)
        if personality and archetype:
            dbpersonality = Personality.get_unique(session,
                                                   archetype=dbarchetype,
                                                   name=personality,
                                                   compel=True)
            q = q.filter_by(personality=dbpersonality)
        elif personality:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(name=personality)
            q = q.reset_joinpoint()
        elif archetype:
            PersAlias = aliased(Personality)
            q = q.join(PersAlias).filter_by(archetype=dbarchetype)
            q = q.reset_joinpoint()

        if buildstatus:
            dbbuildstatus = HostLifecycle.get_unique(session,
                                                     buildstatus,
                                                     compel=True)
            q = q.filter_by(status=dbbuildstatus)

        if osname and osversion and archetype:
            # archetype was already resolved above
            dbos = OperatingSystem.get_unique(session,
                                              name=osname,
                                              version=osversion,
                                              archetype=dbarchetype,
                                              compel=True)
            q = q.filter_by(operating_system=dbos)
        elif osname or osversion:
            q = q.join('operating_system')
            if osname:
                q = q.filter_by(name=osname)
            if osversion:
                q = q.filter_by(version=osversion)
            q = q.reset_joinpoint()

        if service:
            dbservice = Service.get_unique(session, service, compel=True)
            if instance:
                dbsi = get_service_instance(session, dbservice, instance)
                q = q.filter(Host.services_used.contains(dbsi))
            else:
                q = q.join('services_used')
                q = q.filter_by(service=dbservice)
                q = q.reset_joinpoint()
        elif instance:
            q = q.join('services_used')
            q = q.filter_by(name=instance)
            q = q.reset_joinpoint()

        if server_of_service:
            dbserver_service = Service.get_unique(session,
                                                  server_of_service,
                                                  compel=True)
            if server_of_instance:
                dbssi = get_service_instance(session, dbserver_service,
                                             server_of_instance)
                q = q.join('_services_provided')
                q = q.filter_by(service_instance=dbssi)
                q = q.reset_joinpoint()
            else:
                q = q.join('_services_provided', 'service_instance')
                q = q.filter_by(service=dbserver_service)
                q = q.reset_joinpoint()
        elif server_of_instance:
            q = q.join('_services_provided', 'service_instance')
            q = q.filter_by(name=server_of_instance)
            q = q.reset_joinpoint()

        if cluster:
            dbcluster = Cluster.get_unique(session, cluster, compel=True)
            if isinstance(dbcluster, MetaCluster):
                q = q.join('_cluster', 'cluster', '_metacluster')
                q = q.filter_by(metacluster=dbcluster)
            else:
                q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_cluster:
            # TODO: this does not handle metaclusters according to Wes
            dbcluster = Cluster.get_unique(session,
                                           guest_on_cluster,
                                           compel=True)
            q = q.join('machine', VirtualMachine, ClusterResource)
            q = q.filter_by(cluster=dbcluster)
            q = q.reset_joinpoint()
        if guest_on_share:
            #v2
            v2shares = session.query(
                Share.id).filter_by(name=guest_on_share).all()
            if not v2shares:
                raise NotFoundException(
                    "No shares found with name {0}.".format(guest_on_share))

            NasAlias = aliased(VirtualDisk)
            q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if member_cluster_share:
            #v2
            v2shares = session.query(
                Share.id).filter_by(name=member_cluster_share).all()
            if not v2shares:
                raise NotFoundException(
                    "No shares found with name {0}.".format(guest_on_share))

            NasAlias = aliased(VirtualDisk)

            q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine,
                       'machine', 'disks', (NasAlias, NasAlias.id == Disk.id))
            q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
            q = q.reset_joinpoint()

        if grn or eon_id:
            dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False)

            persq = session.query(Personality.id)
            persq = persq.outerjoin(PersonalityGrnMap)
            persq = persq.filter(
                or_(Personality.owner_eon_id == dbgrn.eon_id,
                    PersonalityGrnMap.eon_id == dbgrn.eon_id))
            q = q.outerjoin(HostGrnMap)
            q = q.filter(
                or_(Host.owner_eon_id == dbgrn.eon_id,
                    HostGrnMap.eon_id == dbgrn.eon_id,
                    Host.personality_id.in_(persq.subquery())))
            q = q.reset_joinpoint()

        if fullinfo:
            return q.all()
        return SimpleHostList(q.all())
Example #45
0
    def render(self, session, logger, machine, model, vendor, serial, chassis,
               slot, cpuname, cpuvendor, cpuspeed, cpucount, memory, cluster,
               comments, **arguments):
        dblocation = get_location(session,
                                  query_options=[subqueryload('parents'),
                                                 joinedload('parents.dns_maps')],
                                  **arguments)
        if chassis:
            dbchassis = Chassis.get_unique(session, chassis, compel=True)
            if slot is None:
                raise ArgumentError("The --chassis option requires a --slot.")
            if dblocation and dblocation != dbchassis.location:
                raise ArgumentError("{0} conflicts with chassis location "
                                    "{1}.".format(dblocation, dbchassis.location))
            dblocation = dbchassis.location
        elif slot is not None:
            raise ArgumentError("The --slot option requires a --chassis.")

        dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                   compel=True)

        if dbmodel.machine_type not in ['blade', 'rackmount', 'workstation',
                                        'aurora_node', 'virtual_machine']:
            raise ArgumentError("The add_machine command cannot add machines "
                                "of type %(type)s.  Try 'add %(type)s'." %
                    {"type": dbmodel.machine_type})

        if cluster:
            if dbmodel.machine_type != 'virtual_machine':
                raise ArgumentError("Only virtual machines can have a cluster "
                                    "attribute.")
            dbcluster = Cluster.get_unique(session, cluster,
                                           compel=ArgumentError)
            # This test could be either archetype or cluster_type
            if dbcluster.personality.archetype.name != 'esx_cluster':
                raise ArgumentError("Can only add virtual machines to "
                                    "clusters with archetype esx_cluster.")
            # TODO implement the same to vmhosts.
            if dbcluster.status.name == 'decommissioned':
                raise ArgumentError("Cannot add virtual machines to "
                                    "decommissioned clusters.")
            if dblocation and dbcluster.location_constraint != dblocation:
                raise ArgumentError("Cannot override cluster location {0} "
                                    "with location {1}.".format(
                                        dbcluster.location_constraint,
                                        dblocation))
            dblocation = dbcluster.location_constraint
        elif dbmodel.machine_type == 'virtual_machine':
            raise ArgumentError("Virtual machines must be assigned to a "
                                "cluster.")

        Machine.get_unique(session, machine, preclude=True)
        dbmachine = create_machine(session, machine, dblocation, dbmodel,
                                   cpuname, cpuvendor, cpuspeed, cpucount,
                                   memory, serial, comments)

        if chassis:
            # FIXME: Are virtual machines allowed to be in a chassis?
            dbslot = session.query(ChassisSlot).filter_by(chassis=dbchassis,
                    slot_number=slot).first()
            if not dbslot:
                dbslot = ChassisSlot(chassis=dbchassis, slot_number=slot)
            dbslot.machine = dbmachine
            session.add(dbslot)

        if cluster:
            if not dbcluster.resholder:
                dbcluster.resholder = ClusterResource(cluster=dbcluster)
            dbvm = VirtualMachine(machine=dbmachine, name=dbmachine.label,
                                  holder=dbcluster.resholder)
            dbcluster.validate()

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbmachine))
        if cluster:
            plenaries.append(Plenary.get_plenary(dbcluster))
            plenaries.append(Plenary.get_plenary(dbvm))

        # The check to make sure a plenary file is not written out for
        # dummy aurora hardware is within the call to write().  This way
        # it is consistent without altering (and forgetting to alter)
        # all the calls to the method.
        plenaries.write()
        return
Example #46
0
    args.extend([importer, str(dbswitch.primary_name)])

    try:
        out = run_command(args)
    except ProcessException, err:
        raise ArgumentError("Failed to run switch discovery: %s" % err)

    data = JSONDecoder().decode(out)

    # Safety net: if the discovery program did not manage to collect any usable
    # information, do not do anything.
    if not data["interfaces"]:
        raise ArgumentError("Discovery returned no interfaces, aborting.")

    if "model" in data and "vendor" in data:
        dbmodel = Model.get_unique(session, name=data["model"],
                                   vendor=data["vendor"])
    else:
        dbmodel = None

    if "serial" in data:
        serial_no = data["serial"]
    else:
        serial_no = None

    comments = dbswitch.comments or ""
    if "tags" in data:
        # Remove previous occurances of the tags to ensure consistent casing and
        # ordering
        for tag in data["tags"]:
            pat = re.compile(r"\b" + re.escape(tag) + r"\b", re.I)
            comments = pat.sub("", comments)
Example #47
0
    def render(self, session, logger, interface, machine, mac, model, vendor,
               boot, pg, autopg, comments, master, clear_master, default_route,
               rename_to, **arguments):
        """This command expects to locate an interface based only on name
        and machine - all other fields, if specified, are meant as updates.

        If the machine has a host, dsdb may need to be updated.

        The boot flag can *only* be set to true.  This is mostly technical,
        as at this point in the interface it is difficult to tell if the
        flag was unset or set to false.  However, it also vastly simplifies
        the dsdb logic - we never have to worry about a user trying to
        remove the boot flag from a host in dsdb.

        """

        audit_results = []

        dbhw_ent = Machine.get_unique(session, machine, compel=True)
        dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent,
                                           name=interface, compel=True)

        oldinfo = DSDBRunner.snapshot_hw(dbhw_ent)

        if arguments.get('hostname', None):
            # Hack to set an intial interface for an aurora host...
            dbhost = dbhw_ent.host
            if dbhost.archetype.name == 'aurora' and \
               dbhw_ent.primary_ip and not dbinterface.addresses:
                assign_address(dbinterface, dbhw_ent.primary_ip,
                               dbhw_ent.primary_name.network, logger=logger)

        # We may need extra IP verification (or an autoip option)...
        # This may also throw spurious errors if attempting to set the
        # port_group to a value it already has.
        if pg is not None and dbinterface.port_group != pg.lower().strip():
            dbinterface.port_group = verify_port_group(
                dbinterface.hardware_entity, pg)
        elif autopg:
            dbinterface.port_group = choose_port_group(
                session, logger, dbinterface.hardware_entity)
            audit_results.append(('pg', dbinterface.port_group))

        if master:
            if dbinterface.addresses:
                # FIXME: as a special case, if the only address is the
                # primary IP, then we could just move it to the master
                # interface. However this can be worked around by bonding
                # the interface before calling "add host", so don't bother
                # for now.
                raise ArgumentError("Can not enslave {0:l} because it has "
                                    "addresses.".format(dbinterface))
            dbmaster = Interface.get_unique(session, hardware_entity=dbhw_ent,
                                            name=master, compel=True)
            if dbmaster in dbinterface.all_slaves():
                raise ArgumentError("Enslaving {0:l} would create a circle, "
                                    "which is not allowed.".format(dbinterface))
            dbinterface.master = dbmaster

        if clear_master:
            if not dbinterface.master:
                raise ArgumentError("{0} is not a slave.".format(dbinterface))
            dbinterface.master = None

        if comments:
            dbinterface.comments = comments

        if boot:
            # Figure out if the current bootble interface also has the
            # default route set; the new bootable interface probably
            # wants to have the same settings.  Note that if
            # old_default_route is None there was no bootable interface.
            old_default_route = None
            for i in dbhw_ent.interfaces:
                if i.bootable == True:
                    old_default_route = i.default_route
                    break

            # Apply the bootable flag to the supplied interface, clearing
            # it on all other interfaces.
            for i in dbhw_ent.interfaces:
                if i == dbinterface:
                    i.bootable = True
                else:
                    i.bootable = False

            # If the user was not explicit about the default route flag
            # (default_route is None); there was an existing bootable
            # interface (old_default_route is not None); the new default
            # route setting differs from the old - then produce a warning.
            if (default_route is None and
                old_default_route is not None and
                dbinterface.default_route != old_default_route):
                if old_default_route:
                    logger.client_info("Warning: New boot interface {0} is no "
                                       "longer provides the default route; it "
                                       "did before!".format(dbinterface))
                else:
                    logger.client_info("Warning: New boot interface {0} now "
                                       "provides the default route; it didn't "
                                       "before!".format(dbinterface))

            # Should we also transfer the primary IP to the new boot interface?
            # That could get tricky if the new interface already has an IP
            # address...

        if default_route is not None:
            dbinterface.default_route = default_route
            if not first_of(dbhw_ent.interfaces, lambda x: x.default_route):
                logger.client_info("Warning: {0:l} has no default route, hope "
                                   "that's ok.".format(dbhw_ent))

        #Set this mac address last so that you can update to a bootable
        #interface *before* adding a mac address. This is so the validation
        #that takes place in the interface class doesn't have to be worried
        #about the order of update to bootable=True and mac address
        if mac:
            q = session.query(Interface).filter_by(mac=mac)
            other = q.first()
            if other and other != dbinterface:
                raise ArgumentError("MAC address {0} is already in use by "
                                    "{1:l}.".format(mac, other))
            dbinterface.mac = mac

        if model or vendor:
            if not dbinterface.model_allowed:
                raise ArgumentError("Model/vendor can not be set for a {0:lc}."
                                    .format(dbinterface))

            dbmodel = Model.get_unique(session, name=model, vendor=vendor,
                                       model_type=NicType.Nic, compel=True)
            dbinterface.model = dbmodel
        if rename_to:
            rename_interface(session, dbinterface, rename_to)

        session.flush()

        plenaries = PlenaryCollection(logger=logger)
        plenaries.append(Plenary.get_plenary(dbhw_ent))
        # Interface renaming affects the host and service addresses
        if dbhw_ent.host:
            plenaries.append(Plenary.get_plenary(dbhw_ent.host))
        for addr in dbinterface.assignments:
            if addr.service_address:
                plenaries.append(Plenary.get_plenary(addr.service_address))

        with plenaries.get_key():
            try:
                plenaries.write(locked=True)

                if dbhw_ent.host and dbhw_ent.host.archetype.name != "aurora":
                    dsdb_runner = DSDBRunner(logger=logger)
                    dsdb_runner.update_host(dbhw_ent, oldinfo)
                    dsdb_runner.commit_or_rollback()
            except AquilonError, err:
                plenaries.restore_stash()
                raise ArgumentError(err)
            except: