Esempio n. 1
0
def initiate():
    # Boot first machines if not active:
    stack = openstack()
    backends = stack.backends()

    # Gathering first data
    data = {}
    data['acu'] = hastats.get_backend_cum_requests()['stot']
    data['diff'] = 0
    data['diffpt'] = 0
    data['date'] = datetime.datetime.now()
    data['active'] = len(
        stack.active_backends())  #len(hastats.get_backends_up())
    data['haactive'] = len(hastats.get_backends_up())
    data['needed'] = None
    data['epoch'] = (datetime.datetime.now() - epoch_start).seconds
    metrics.append(data)
    print metrics
    time.sleep(sleeptime)
    last = data
    data = {}
    data['acu'] = hastats.get_backend_cum_requests()['stot']
    data['diff'] = int(
        (float(data['acu']) - float(last['acu'])) / float(sleeptime))
    data['diffpt'] = data['diff'] * sleeptime
    data['date'] = datetime.datetime.now()
    data['needed'] = needed_servers(acu=data['acu'], diff=data['diff'])
    data['active'] = len(
        stack.active_backends())  #len(hastats.get_backends_up())
    data['haactive'] = len(hastats.get_backends_up())
    data['epoch'] = (datetime.datetime.now() - epoch_start).seconds
    metrics.append(data)
    time.sleep(sleeptime)
Esempio n. 2
0
def scale_down(Needed=1):
    ha = haproxy.HAproxy()
    stack = openstack()
    active = stack.active_backends()
    passive = stack.passive_backends()
    instance = None

    print "active servs: %s, min bakends: %s, needed: %s" % (str(len(active)), str(min_backends), str(Needed))
    toremove = len(active) - Needed
    removed = 0
    threads = []

    if len(passive) > 1:
        # Delete the stopped nodes, and leave one
        for node in passive[1:][::-1]:
            handle_scaledown(node, delete=True)

    if toremove > 0:
        print "Want to remove %s nodes" % str(toremove)
        if len(active) <= min_backends:
            return False
        elif (len(active) - toremove) > min_backends:
            for i in range(1, toremove + 1):
                if 'ACTIVE' in active[-i].status:
                    handle_scaledown(active[-i], stop=True)
        else:
            recalculate = len(active) - (Needed + min_backends)
            for i in range(1, recalculate+1):
                if 'ACTIVE' in active[-i].status:
                    handle_scaledown(active[-i], stop=True)

    else:
        print "No nodes to stop/delete"
        return False
    return True
Esempio n. 3
0
def initiate():
    # Boot first machines if not active:
    stack = openstack()
    backends = stack.backends()

    # Gathering first data
    data = {}
    data['acu'] = hastats.get_backend_cum_requests()['stot']
    data['diff'] = 0
    data['diffpt'] = 0
    data['date'] = datetime.datetime.now()
    data['active'] = len(stack.active_backends())#len(hastats.get_backends_up())
    data['haactive'] = len(hastats.get_backends_up())
    data['needed'] = None
    data['epoch'] = (datetime.datetime.now()-epoch_start).seconds
    metrics.append(data)
    print metrics
    time.sleep(sleeptime)
    last = data
    data = {}
    data['acu'] = hastats.get_backend_cum_requests()['stot']
    data['diff'] = int((float(data['acu']) - float(last['acu'])) / float(sleeptime))
    data['diffpt'] = data['diff'] * sleeptime
    data['date'] = datetime.datetime.now()
    data['needed'] = needed_servers(acu=data['acu'], diff=data['diff'])
    data['active'] = len(stack.active_backends())#len(hastats.get_backends_up())
    data['haactive'] = len(hastats.get_backends_up())
    data['epoch'] = (datetime.datetime.now()-epoch_start).seconds
    metrics.append(data)
    time.sleep(sleeptime)
Esempio n. 4
0
def update_conf():
    """ Do we need to update the configuration? """
    global ha_reloaded
    stats = hastats.get_stat_backends()
    stack = openstack()
    backends = stack.backends()
    if not len(backends) == len(stats):
        ha = haproxy.HAproxy()
        ha.compile(backends)
        global ha_last_reload
        ha_last_reload = datetime.datetime.now()
        if ha.restart():
            ha_reloaded = True
        return True
    return False
Esempio n. 5
0
def update_conf():
    """ Do we need to update the configuration? """
    global ha_reloaded
    stats = hastats.get_stat_backends()
    stack = openstack()
    backends = stack.backends()
    if not len(backends) == len(stats):
        ha = haproxy.HAproxy()
        ha.compile(backends)
        global ha_last_reload
        ha_last_reload = datetime.datetime.now()
        if ha.restart():
            ha_reloaded = True
        return True
    return False
Esempio n. 6
0
def main():
    # Starting the first time
    # getting current cum connections
    try:
        if not metrics:
            print("Gathering initial data...")

            # Gathering first data
            initiate()

        while True:
            current = new_metrics(hastats.get_backend_cum_requests()['stot'])
            print metrics[-1]
            print "Needed servers: %s" % str(
                needed_servers(diff=current['diff']))

            # What to do? Scale up/down or are we happy?
            stack = openstack()
            active_backends = stack.active_backends()
            up_backends = hastats.get_backends_up()

            needed = needed_servers(diff=current['diff'])
            if needed > len(active_backends):
                print "Scaling up"
                scale_up(needed - len(active_backends))
            elif needed < len(active_backends):
                print "Scaling down"
                if not scale_down(Needed=needed):
                    print "Lowest number"
            else:
                # Sleeping
                print "Sleeping one more round"

            if update_conf():
                print "HAproxy config reloaded"
                print ha_last_reload

            for line in hastats.get_stat_backends():
                print line['svname'] + ', ' + line['status']

            time.sleep(sleeptime)

    except KeyboardInterrupt:
        write_data()
Esempio n. 7
0
def handle_scaledown(instance, delete=False, stop=False):
    print "Starting to handle scaledown of %s" % instance.name
    ha = haproxy.HAproxy()
    stack = openstack()
    # Set the instance in draining mode.
    # No new conns. Active finishes
    ha.drain(instance)
    # Operating with short draintime (only static page)
    time.sleep(1)
    try:
        if stop:
            print "Stopping node %s" % instance.name
            instance.stop()
        elif delete:
            print "Deleting node %s" % instance.name
            instance.delete()
    except:
        print "Cant stop/delete instnace %s" % instance.name
        traceback.print_exc(file=sys.stdout)
Esempio n. 8
0
def handle_scaledown(instance, delete=False, stop=False):
    print "Starting to handle scaledown of %s" % instance.name
    ha = haproxy.HAproxy()
    stack = openstack()
    # Set the instance in draining mode.
    # No new conns. Active finishes
    ha.drain(instance)
    # Operating with short draintime (only static page)
    time.sleep(1)
    try:
        if stop:
            print "Stopping node %s" % instance.name
            instance.stop()
        elif delete:
            print "Deleting node %s" % instance.name
            instance.delete()
    except:
        print "Cant stop/delete instnace %s" % instance.name
        traceback.print_exc(file=sys.stdout)
Esempio n. 9
0
def main():
    # Starting the first time
    # getting current cum connections
    try:
        if not metrics:
            print("Gathering initial data...")

            # Gathering first data
            initiate()

        while True:
            current = new_metrics(hastats.get_backend_cum_requests()['stot'])
            print metrics[-1]
            print "Needed servers: %s" % str(needed_servers(diff=current['diff']))

            # What to do? Scale up/down or are we happy?
            stack = openstack()
            active_backends = stack.active_backends()
            up_backends = hastats.get_backends_up()

            needed = needed_servers(diff=current['diff'])
            if needed > len(active_backends):
                print "Scaling up"
                scale_up(needed-len(active_backends))
            elif needed < len(active_backends):
                print "Scaling down"
                if not scale_down(Needed=needed):
                    print "Lowest number"
            else:
                # Sleeping
                print "Sleeping one more round"

            if update_conf():
                print "HAproxy config reloaded"
                print ha_last_reload

            for line in hastats.get_stat_backends():
                print line['svname'] + ', ' + line['status']

            time.sleep(sleeptime)

    except KeyboardInterrupt:
        write_data()
Esempio n. 10
0
def new_metrics(current_cumulated, hareset=False):
    global ha_reloaded
    global ha_last_reload
    current = {}
    current['acu'] = current_cumulated
    current['date'] = datetime.datetime.now()

    if ha_reloaded:
        last_cumulated = 0
        difference = int(ceil((float(current_cumulated) - float(last_cumulated)) \
                / float((current['date'] - ha_last_reload).seconds)))
        diffpt = int(difference) * (current['date'] - ha_last_reload).seconds

    try:
        print "Current new cumulated connections: %s" % str(current_cumulated)
        print "Calculation: float(%s) - float(%s) / float(%s-%s.seconds (%s))" % \
                (str(current_cumulated), metrics[-1]['acu'], str(current['date']), str(metrics[-1]['date']),\
                str((current['date'] - metrics[-1]['date']).seconds))

        if ha_reloaded:
            current['diff'] = difference
            current['diffpt'] = diffpt
            ha_reloaded = False
        else:
            current['diff'] = int(ceil((float(current_cumulated) - float(metrics[-1]['acu'])) \
                    / float((current['date']-metrics[-1]['date']).seconds)))
            current['diffpt'] = current['diff'] * (current['date'] -
                                                   metrics[-1]['date']).seconds
    except ZeroDivisionError:
        current['diff'] = 0

    stack = openstack()
    current['needed'] = needed_servers(acu=current['acu'],
                                       diff=current['diff'])
    current['active'] = len(
        stack.active_backends())  #len(hastats.get_backends_up())
    current['haactive'] = len(hastats.get_backends_up())
    current['epoch'] = (datetime.datetime.now() - epoch_start).seconds

    metrics.append(current)
    return current
Esempio n. 11
0
def new_metrics(current_cumulated, hareset=False):
    global ha_reloaded
    global ha_last_reload
    current = {}
    current['acu'] = current_cumulated
    current['date'] = datetime.datetime.now()

    if ha_reloaded:
        last_cumulated = 0
        difference = int(ceil((float(current_cumulated) - float(last_cumulated)) \
                / float((current['date'] - ha_last_reload).seconds)))
        diffpt = int(difference) * (current['date'] - ha_last_reload).seconds

    try:
        print "Current new cumulated connections: %s" % str(current_cumulated)
        print "Calculation: float(%s) - float(%s) / float(%s-%s.seconds (%s))" % \
                (str(current_cumulated), metrics[-1]['acu'], str(current['date']), str(metrics[-1]['date']),\
                str((current['date'] - metrics[-1]['date']).seconds))

        if ha_reloaded:
            current['diff'] = difference
            current['diffpt'] = diffpt
            ha_reloaded = False
        else:
            current['diff'] = int(ceil((float(current_cumulated) - float(metrics[-1]['acu'])) \
                    / float((current['date']-metrics[-1]['date']).seconds)))
            current['diffpt'] = current['diff'] * (current['date']-metrics[-1]['date']).seconds
    except ZeroDivisionError:
        current['diff'] = 0

    stack = openstack()
    current['needed'] = needed_servers(acu=current['acu'], diff=current['diff'])
    current['active'] = len(stack.active_backends())#len(hastats.get_backends_up())
    current['haactive'] = len(hastats.get_backends_up())
    current['epoch'] = (datetime.datetime.now()-epoch_start).seconds

    metrics.append(current)
    return current
Esempio n. 12
0
def scale_down(Needed=1):
    ha = haproxy.HAproxy()
    stack = openstack()
    active = stack.active_backends()
    passive = stack.passive_backends()
    instance = None

    print "active servs: %s, min bakends: %s, needed: %s" % (str(
        len(active)), str(min_backends), str(Needed))
    toremove = len(active) - Needed
    removed = 0
    threads = []

    if len(passive) > 1:
        # Delete the stopped nodes, and leave one
        for node in passive[1:][::-1]:
            handle_scaledown(node, delete=True)

    if toremove > 0:
        print "Want to remove %s nodes" % str(toremove)
        if len(active) <= min_backends:
            return False
        elif (len(active) - toremove) > min_backends:
            for i in range(1, toremove + 1):
                if 'ACTIVE' in active[-i].status:
                    handle_scaledown(active[-i], stop=True)
        else:
            recalculate = len(active) - (Needed + min_backends)
            for i in range(1, recalculate + 1):
                if 'ACTIVE' in active[-i].status:
                    handle_scaledown(active[-i], stop=True)

    else:
        print "No nodes to stop/delete"
        return False
    return True
Esempio n. 13
0
def scale_up(Number=1):
    stack = openstack()
    sleeping = stack.sleeping_machine()

    if len(stack.backends()) == (quota_limit):
        return False

    scaled = 0
    if sleeping:
        print sleeping
        if len(sleeping) > 1:
            for node in sleeping:
                if scaled < Number:  # and not 'powering-on' in node.state:
                    node.start()
                    scaled += 1
        else:
            if not 'ACTIVE' in sleeping[0].status:
                sleeping[0].start()
                scaled += 1

    else:
        if scaled < Number:
            thread = Thread(target=stack.create_multiple(Number - scaled))
            thread.start()
Esempio n. 14
0
def scale_up(Number=1):
    stack = openstack()
    sleeping = stack.sleeping_machine()

    if len(stack.backends()) == (quota_limit):
        return False

    scaled = 0
    if sleeping:
        print sleeping
        if len(sleeping) > 1:
            for node in sleeping:
                if scaled < Number: # and not 'powering-on' in node.state:
                    node.start()
                    scaled += 1
        else:
            if not 'ACTIVE' in sleeping[0].status:
                sleeping[0].start()
                scaled += 1

    else:
        if scaled < Number:
            thread = Thread(target=stack.create_multiple(Number-scaled))
            thread.start()