Пример #1
0
def InitNodeLimit(data):

    # query running network interfaces
    devs = sioc.gifconf()
    ips = dict(list(zip(list(devs.values()), list(devs.keys()))))
    macs = {}
    for dev in devs:
        macs[sioc.gifhwaddr(dev).lower()] = dev

    for interface in data[KEY_NAME]:
        # Get interface name preferably from MAC address, falling
        # back on IP address.
        hwaddr=interface['mac']
        if hwaddr != None: hwaddr=hwaddr.lower()
        if hwaddr in macs:
            dev = macs[interface['mac']]
        elif interface['ip'] in ips:
            dev = ips[interface['ip']]
        else:
            logger.log('net: %s: no such interface with address %s/%s' % (interface['hostname'], interface['ip'], interface['mac']))
            continue

        # Get current node cap
        try:
            old_bwlimit = bwlimit.get_bwcap(dev)
        except:
            old_bwlimit = None

        # Get desired node cap
        if interface['bwlimit'] is None or interface['bwlimit'] < 0:
            new_bwlimit = bwlimit.bwmax
        else:
            new_bwlimit = interface['bwlimit']

        if old_bwlimit != new_bwlimit:
            # Reinitialize bandwidth limits
            bwlimit.init(dev, new_bwlimit)
Пример #2
0
def InitNodeLimit(data):

    # query running network interfaces
    devs = sioc.gifconf()
    ips = dict(zip(devs.values(), devs.keys()))
    macs = {}
    for dev in devs:
        macs[sioc.gifhwaddr(dev).lower()] = dev

    for interface in data[KEY_NAME]:
        # Get interface name preferably from MAC address, falling
        # back on IP address.
        hwaddr=interface['mac']
        if hwaddr <> None: hwaddr=hwaddr.lower()
        if hwaddr in macs:
            dev = macs[interface['mac']]
        elif interface['ip'] in ips:
            dev = ips[interface['ip']]
        else:
            logger.log('net: %s: no such interface with address %s/%s' % (interface['hostname'], interface['ip'], interface['mac']))
            continue

        # Get current node cap
        try:
            old_bwlimit = bwlimit.get_bwcap(dev)
        except:
            old_bwlimit = None

        # Get desired node cap
        if interface['bwlimit'] is None or interface['bwlimit'] < 0:
            new_bwlimit = bwlimit.bwmax
        else:
            new_bwlimit = interface['bwlimit']

        if old_bwlimit != new_bwlimit:
            # Reinitialize bandwidth limits
            bwlimit.init(dev, new_bwlimit)
Пример #3
0
def sync(nmdbcopy):
    """
    Syncs tc, db, and bwmon.pickle.
    Then, starts new slices, kills old ones, and updates byte accounts for each running slice.
    Sends emails and caps those that went over their limit.
    """
    # Defaults
    global DB_FILE, \
        period, \
        default_MaxRate, \
        default_Maxi2Rate, \
        default_MaxKByte,\
        default_Maxi2KByte,\
        default_Share, \
        dev_default

    # All slices
    names = []
    # In case the limits have changed.
    default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
    default_Maxi2Rate = int(bwlimit.bwmax / 1000)

    # Incase default isn't set yet.
    if default_MaxRate == -1:
        default_MaxRate = 1000000

    # xxx $Id$
    # with svn we used to have a trick to detect upgrades of this file
    # this has gone with the move to git, without any noticeable effect on operations though
    try:
        f = open(DB_FILE, "r+")
        logger.verbose("bwmon: Loading %s" % DB_FILE)
        (version, slices, deaddb) = pickle.load(f)
        f.close()
        # Check version of data file
        if version != "$Id$":
            logger.log("bwmon: Not using old version '%s' data file %s" %
                       (version, DB_FILE))
            raise Exception
    except Exception:
        version = "$Id$"
        slices = {}
        deaddb = {}

    # Get/set special slice IDs
    root_xid = bwlimit.get_xid("root")
    default_xid = bwlimit.get_xid("default")

    # Since root is required for sanity, its not in the API/plc database, so pass {}
    # to use defaults.
    if root_xid not in slices.keys():
        slices[root_xid] = Slice(root_xid, "root", {})
        slices[root_xid].reset({}, {})

    # Used by bwlimit.  pass {} since there is no rspec (like above).
    if default_xid not in slices.keys():
        slices[default_xid] = Slice(default_xid, "default", {})
        slices[default_xid].reset({}, {})

    live = {}
    # Get running slivers that should be on this node (from plc). {xid: name}
    # db keys on name, bwmon keys on xid.  db doesnt have xid either.
    for plcSliver in nmdbcopy.keys():
        live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]

    logger.verbose("bwmon: Found %s instantiated slices" %
                   live.keys().__len__())
    logger.verbose("bwmon: Found %s slices in dat file" %
                   slices.values().__len__())

    # Get actual running values from tc.
    # Update slice totals and bandwidth. {xid: {values}}
    kernelhtbs = gethtbs(root_xid, default_xid)
    logger.verbose("bwmon: Found %s running HTBs" %
                   kernelhtbs.keys().__len__())

    # The dat file has HTBs for slices, but the HTBs aren't running
    nohtbslices = set(slices.keys()) - set(kernelhtbs.keys())
    logger.verbose("bwmon: Found %s slices in dat but not running." %
                   nohtbslices.__len__())
    # Reset tc counts.
    for nohtbslice in nohtbslices:
        if live.has_key(nohtbslice):
            slices[nohtbslice].reset({}, live[nohtbslice]['_rspec'])
        else:
            logger.log("bwmon: Removing abondoned slice %s from dat." %
                       nohtbslice)
            del slices[nohtbslice]

    # The dat file doesnt have HTB for the slice but kern has HTB
    slicesnodat = set(kernelhtbs.keys()) - set(slices.keys())
    logger.verbose("bwmon: Found %s slices with HTBs but not in dat" %
                   slicesnodat.__len__())
    for slicenodat in slicesnodat:
        # But slice is running
        if live.has_key(slicenodat):
            # init the slice.  which means start accounting over since kernel
            # htb was already there.
            slices[slicenodat] = Slice(slicenodat, live[slicenodat]['name'],
                                       live[slicenodat]['_rspec'])

    # Get new slices.
    # Slices in GetSlivers but not running HTBs
    newslicesxids = set(live.keys()) - set(kernelhtbs.keys())
    logger.verbose("bwmon: Found %s new slices" % newslicesxids.__len__())

    # Setup new slices
    for newslice in newslicesxids:
        # Delegated slices dont have xids (which are uids) since they haven't been
        # instantiated yet.
        if newslice != None and live[newslice].has_key('_rspec') == True:
            # Check to see if we recently deleted this slice.
            if live[newslice]['name'] not in deaddb.keys():
                logger.log("bwmon: new slice %s||wangyang add1041" %
                           live[newslice]['name'])
                # _rspec is the computed rspec:  NM retrieved data from PLC, computed loans
                # and made a dict of computed values.
                slices[newslice] = Slice(newslice, live[newslice]['name'],
                                         live[newslice]['_rspec'])
                slices[newslice].reset({}, live[newslice]['_rspec'])
            # Double check time for dead slice in deaddb is within 24hr recording period.
            elif (time.time() <=
                  (deaddb[live[newslice]['name']]['slice'].time + period)):
                deadslice = deaddb[live[newslice]['name']]
                logger.log("bwmon: Reinstantiating deleted slice %s" %
                           live[newslice]['name'])
                slices[newslice] = deadslice['slice']
                slices[newslice].xid = newslice
                # Start the HTB
                newvals = {
                    "maxrate": deadslice['slice'].MaxRate * 1000,
                    "minrate": deadslice['slice'].MinRate * 1000,
                    "maxexemptrate": deadslice['slice'].Maxi2Rate * 1000,
                    "usedbytes": deadslice['htb']['usedbytes'] * 1000,
                    "usedi2bytes": deadslice['htb']['usedi2bytes'],
                    "share": deadslice['htb']['share']
                }
                slices[newslice].reset(newvals, live[newslice]['_rspec'])
                # Bring up to date
                slices[newslice].update(newvals, live[newslice]['_rspec'])
                # Since the slice has been reinitialed, remove from dead database.
                del deaddb[deadslice['slice'].name]
                del newvals
        else:
            logger.log("bwmon: Slice %s doesn't have xid.  Skipping." %
                       live[newslice]['name'])

    # Move dead slices that exist in the pickle file, but
    # aren't instantiated by PLC into the dead dict until
    # recording period is over.  This is to avoid the case where a slice is dynamically created
    # and destroyed then recreated to get around byte limits.
    deadxids = set(slices.keys()) - set(live.keys())
    logger.verbose("bwmon: Found %s dead slices" % (deadxids.__len__() - 2))
    for deadxid in deadxids:
        if deadxid == root_xid or deadxid == default_xid:
            continue
        logger.log("bwmon: removing dead slice %s " % deadxid)
        if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid):
            # add slice (by name) to deaddb
            logger.log("bwmon: Saving bandwidth totals for %s." %
                       slices[deadxid].name)
            deaddb[slices[deadxid].name] = {
                'slice': slices[deadxid],
                'htb': kernelhtbs[deadxid]
            }
            del slices[deadxid]
        if kernelhtbs.has_key(deadxid):
            logger.verbose("bwmon: Removing HTB for %s." % deadxid)
            bwlimit.off(deadxid, dev=dev_default)

    # Clean up deaddb
    for deadslice in deaddb.keys():
        if (time.time() >= (deaddb[deadslice]['slice'].time + period)):
            logger.log("bwmon: Removing dead slice %s from dat." \
                        % deaddb[deadslice]['slice'].name)
            del deaddb[deadslice]

    # Get actual running values from tc since we've added and removed buckets.
    # Update slice totals and bandwidth. {xid: {values}}
    kernelhtbs = gethtbs(root_xid, default_xid)
    logger.verbose("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__())

    # Update all byte limites on all slices
    for (xid, slice) in slices.iteritems():
        # Monitor only the specified slices
        if xid == root_xid or xid == default_xid: continue
        if names and name not in names:
            continue

        if (time.time() >= (slice.time + period)) or \
            (kernelhtbs[xid]['usedbytes'] < slice.bytes) or \
            (kernelhtbs[xid]['usedi2bytes'] < slice.i2bytes):
            # Reset to defaults every 24 hours or if it appears
            # that the byte counters have overflowed (or, more
            # likely, the node was restarted or the HTB buckets
            # were re-initialized).
            slice.reset(kernelhtbs[xid], live[xid]['_rspec'])
        elif ENABLE:
            logger.verbose("bwmon: Updating slice %s" % slice.name)
            # Update byte counts
            slice.update(kernelhtbs[xid], live[xid]['_rspec'])

    logger.verbose("bwmon: Saving %s slices in %s" %
                   (slices.keys().__len__(), DB_FILE))
    f = open(DB_FILE, "w")
    pickle.dump((version, slices, deaddb), f)
    f.close()
Пример #4
0
# Defaults
# Set DEBUG to True if you don't want to send emails
DEBUG = False
# Set ENABLE to False to setup buckets, but not limit.
ENABLE = True

DB_FILE = "/var/lib/nodemanager/bwmon.pickle"

# Constants
seconds_per_day = 24 * 60 * 60
bits_per_byte = 8

dev_default = tools.get_default_if()
# Burst to line rate (or node cap).  Set by NM. in KBit/s
default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
default_Maxi2Rate = int(bwlimit.bwmax / 1000)
# 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G
# 5.4 Gbyte per day max allowed transfered per recording period
# 5.4 Gbytes per day is aprox 512k/s for 24hrs (approx because original math was wrong
# but its better to keep a higher byte total and keep people happy than correct
# the problem and piss people off.
# default_MaxKByte = 5662310

# -- 6/1/09
# llp wants to double these, so we use the following
# 1mbit * 24hrs * 60mins * 60secs = bits/day
# 1000000 * 24 * 60 * 60 / (1024 * 8)
default_MaxKByte = 10546875

# 16.4 Gbyte per day max allowed transfered per recording period to I2
Пример #5
0
def sync(nmdbcopy):
    """
    Syncs tc, db, and bwmon.pickle.
    Then, starts new slices, kills old ones, and updates byte accounts for each running slice.
    Sends emails and caps those that went over their limit.
    """
    # Defaults
    global DB_FILE, \
        period, \
        default_MaxRate, \
        default_Maxi2Rate, \
        default_MaxKByte, \
        default_Maxi2KByte, \
        default_Share, \
        dev_default

    # All slices
    names = []
    # In case the limits have changed.
    default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
    default_Maxi2Rate = int(bwlimit.bwmax / 1000)

    # Incase default isn't set yet.
    if default_MaxRate == -1:
        default_MaxRate = 1000000

    # xxx $Id$
    # with svn we used to have a trick to detect upgrades of this file
    # this has gone with the move to git, without any noticeable effect on operations though
    try:
        f = open(DB_FILE, "r+")
        logger.verbose("bwmon: Loading %s" % DB_FILE)
        (version, slices, deaddb) = pickle.load(f)
        f.close()
        # Check version of data file
        if version != "$Id$":
            logger.log("bwmon: Not using old version '%s' data file %s" % (version, DB_FILE))
            raise Exception
    except Exception:
        version = "$Id$"
        slices = {}
        deaddb = {}

    # Get/set special slice IDs
    root_xid = bwlimit.get_xid("root")
    default_xid = bwlimit.get_xid("default")

    # Since root is required for sanity, its not in the API/plc database, so pass {}
    # to use defaults.
    if root_xid not in list(slices.keys()):
        slices[root_xid] = Slice(root_xid, "root", {})
        slices[root_xid].reset({}, {})

    # Used by bwlimit.  pass {} since there is no rspec (like above).
    if default_xid not in list(slices.keys()):
        slices[default_xid] = Slice(default_xid, "default", {})
        slices[default_xid].reset({}, {})

    live = {}
    # Get running slivers that should be on this node (from plc). {xid: name}
    # db keys on name, bwmon keys on xid.  db doesnt have xid either.
    for plcSliver in list(nmdbcopy.keys()):
        live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]

    logger.verbose("bwmon: Found %s instantiated slices" % list(live.keys()).__len__())
    logger.verbose("bwmon: Found %s slices in dat file" % list(slices.values()).__len__())

    # Get actual running values from tc.
    # Update slice totals and bandwidth. {xid: {values}}
    kernelhtbs = gethtbs(root_xid, default_xid)
    logger.verbose("bwmon: Found %s running HTBs" % list(kernelhtbs.keys()).__len__())

    # The dat file has HTBs for slices, but the HTBs aren't running
    nohtbslices =  set(slices.keys()) - set(kernelhtbs.keys())
    logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__())
    # Reset tc counts.
    for nohtbslice in nohtbslices:
        if nohtbslice in live:
            slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] )
        else:
            logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice)
            del slices[nohtbslice]

    # The dat file doesnt have HTB for the slice but kern has HTB
    slicesnodat = set(kernelhtbs.keys()) - set(slices.keys())
    logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__())
    for slicenodat in slicesnodat:
        # But slice is running
        if slicenodat in live:
            # init the slice.  which means start accounting over since kernel
            # htb was already there.
            slices[slicenodat] = Slice(slicenodat,
                live[slicenodat]['name'],
                live[slicenodat]['_rspec'])

    # Get new slices.
    # Slices in GetSlivers but not running HTBs
    newslicesxids = set(live.keys()) - set(kernelhtbs.keys())
    logger.verbose("bwmon: Found %s new slices" % newslicesxids.__len__())

    # Setup new slices
    for newslice in newslicesxids:
        # Delegated slices dont have xids (which are uids) since they haven't been
        # instantiated yet.
        if newslice != None and ('_rspec' in live[newslice]) == True:
            # Check to see if we recently deleted this slice.
            if live[newslice]['name'] not in list(deaddb.keys()):
                logger.log( "bwmon: new slice %s" % live[newslice]['name'] )
                # _rspec is the computed rspec:  NM retrieved data from PLC, computed loans
                # and made a dict of computed values.
                slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec'])
                slices[newslice].reset( {}, live[newslice]['_rspec'] )
            # Double check time for dead slice in deaddb is within 24hr recording period.
            elif (time.time() <= (deaddb[live[newslice]['name']]['slice'].time + period)):
                deadslice = deaddb[live[newslice]['name']]
                logger.log("bwmon: Reinstantiating deleted slice %s" % live[newslice]['name'])
                slices[newslice] = deadslice['slice']
                slices[newslice].xid = newslice
                # Start the HTB
                newvals = {"maxrate": deadslice['slice'].MaxRate * 1000,
                            "minrate": deadslice['slice'].MinRate * 1000,
                            "maxexemptrate": deadslice['slice'].Maxi2Rate * 1000,
                            "usedbytes": deadslice['htb']['usedbytes'] * 1000,
                            "usedi2bytes": deadslice['htb']['usedi2bytes'],
                            "share":deadslice['htb']['share']}
                slices[newslice].reset(newvals, live[newslice]['_rspec'])
                # Bring up to date
                slices[newslice].update(newvals, live[newslice]['_rspec'])
                # Since the slice has been reinitialed, remove from dead database.
                del deaddb[deadslice['slice'].name]
                del newvals
        else:
            logger.log("bwmon: Slice %s doesn't have xid.  Skipping." % live[newslice]['name'])

    # Move dead slices that exist in the pickle file, but
    # aren't instantiated by PLC into the dead dict until
    # recording period is over.  This is to avoid the case where a slice is dynamically created
    # and destroyed then recreated to get around byte limits.
    deadxids = set(slices.keys()) - set(live.keys())
    logger.verbose("bwmon: Found %s dead slices" % (deadxids.__len__() - 2))
    for deadxid in deadxids:
        if deadxid == root_xid or deadxid == default_xid:
            continue
        logger.log("bwmon: removing dead slice %s " % deadxid)
        if deadxid in slices and deadxid in kernelhtbs:
            # add slice (by name) to deaddb
            logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name)
            deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]}
            del slices[deadxid]
        if deadxid in kernelhtbs:
            logger.verbose("bwmon: Removing HTB for %s." % deadxid)
            bwlimit.off(deadxid, dev = dev_default)

    # Clean up deaddb
    for deadslice in list(deaddb.keys()):
        if (time.time() >= (deaddb[deadslice]['slice'].time + period)):
            logger.log("bwmon: Removing dead slice %s from dat." \
                        % deaddb[deadslice]['slice'].name)
            del deaddb[deadslice]

    # Get actual running values from tc since we've added and removed buckets.
    # Update slice totals and bandwidth. {xid: {values}}
    kernelhtbs = gethtbs(root_xid, default_xid)
    logger.verbose("bwmon: now %s running HTBs" % list(kernelhtbs.keys()).__len__())

    # Update all byte limites on all slices
    for (xid, slice) in slices.items():
        # Monitor only the specified slices
        if xid == root_xid or xid == default_xid: continue
        if names and name not in names:
            continue

        if (time.time() >= (slice.time + period)) or \
            (kernelhtbs[xid]['usedbytes'] < slice.bytes) or \
            (kernelhtbs[xid]['usedi2bytes'] < slice.i2bytes):
            # Reset to defaults every 24 hours or if it appears
            # that the byte counters have overflowed (or, more
            # likely, the node was restarted or the HTB buckets
            # were re-initialized).
            slice.reset(kernelhtbs[xid], live[xid]['_rspec'])
        elif ENABLE:
            logger.verbose("bwmon: Updating slice %s" % slice.name)
            # Update byte counts
            slice.update(kernelhtbs[xid], live[xid]['_rspec'])

    logger.verbose("bwmon: Saving %s slices in %s" % (list(slices.keys()).__len__(), DB_FILE))
    f = open(DB_FILE, "w")
    pickle.dump((version, slices, deaddb), f)
    f.close()
Пример #6
0
# Defaults
# Set DEBUG to True if you don't want to send emails
DEBUG = False
# Set ENABLE to False to setup buckets, but not limit.
ENABLE = True

DB_FILE = "/var/lib/nodemanager/bwmon.pickle"

# Constants
seconds_per_day = 24 * 60 * 60
bits_per_byte = 8

dev_default = tools.get_default_if()
# Burst to line rate (or node cap).  Set by NM. in KBit/s
default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
default_Maxi2Rate = int(bwlimit.bwmax / 1000)
# 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G
# 5.4 Gbyte per day max allowed transfered per recording period
# 5.4 Gbytes per day is aprox 512k/s for 24hrs (approx because original math was wrong
# but its better to keep a higher byte total and keep people happy than correct
# the problem and piss people off.
# default_MaxKByte = 5662310

# -- 6/1/09
# llp wants to double these, so we use the following
# 1mbit * 24hrs * 60mins * 60secs = bits/day
# 1000000 * 24 * 60 * 60 / (1024 * 8)
default_MaxKByte = 10546875

# 16.4 Gbyte per day max allowed transfered per recording period to I2