def __init__(self, sfc_func_name='morton', sfc_func_dict=None, **args): LoadBalancer.__init__(self, **args) self.method = 'serial_sfc' if sfc_func_dict is None: sfc_func_dict = space_filling_curves.sfc_func_dict self.sfc_func_dict = sfc_func_dict self.sfc_func = sfc_func_name
class TestStringMethods(unittest.TestCase): @classmethod def setUpClass(self): self.server = Server() self.loadBalancer = LoadBalancer() def test_server_add(self): self.server.add_connection("192.168.1.1") self.assertTrue(len(self.server.connections) == 1) def test_server_close(self): self.server.close_connection("192.168.1.1") self.assertTrue(len(self.server.connections) == 0) def test_loadBalancer_add(self): self.loadBalancer.add_connection("fdca:83d2::f20d") self.assertTrue(len(self.loadBalancer.connections) == 1) self.assertTrue(len(self.loadBalancer.servers) == 1) def test_loadBalancer_avgAndAppend(self): avg = self.loadBalancer.avg_load() self.assertTrue(avg > 0) self.loadBalancer.servers.append(Server()) newAvg = self.loadBalancer.avg_load() self.assertTrue(avg > newAvg) def test_loadBalancer_close(self): self.loadBalancer.close_connection("fdca:83d2::f20d") self.assertTrue(len(self.loadBalancer.connections) == 0) self.assertTrue(self.loadBalancer.avg_load() == 0) def test_loadBalancer_ensureAvailability(self): for connection in range(20): self.loadBalancer.add_connection(connection) self.assertTrue(self.loadBalancer.avg_load() < 50)
def __add_new_lb(self, cip, cport, sip, sport): p = multiprocessing.current_process() lb = LoadBalancer(cip, cport, sip, sport) print "****[Adding Load Balancer [{}]]".format(p.pid) # print " CIP:{}, CPort:{}".format(lb.cip, lb.cport) # print " SIP:{}, SPort:{}".format(lb.sip, lb.sport) # print "-------------------------------------" lb.start()
def add_lb(self, image, ip): lb_index = 0 for lb in lbs: if lb.get_image() == image: lbs[lb_index] = LoadBalancer(image, ip) return True lb_index += 1 load_balancer = LoadBalancer(image, ip) self.lbs.append((load_balancer)) return True
def calc(self): """calculates the cells in each process, the cell and particle loads and the imbalance in the distribution""" self.proc_blocks = [cluster.cells for cluster in self.clusters] self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])] self.particle_loads = [cluster.np for cluster in self.clusters] self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)
class BackendList: def __init__(self): self.load_balancer = LoadBalancer() self.current = 0 def getserver(self): return self.load_balancer.get_server()
def get_distribution(self): """return the list of cells and the number of particles in each cluster to be used for distribution to processes""" self.calc() proc_blocks = self.proc_blocks proc_num_particles = self.particle_loads cell_proc = LoadBalancer.get_block_proc(proc_blocks=proc_blocks) return cell_proc, proc_num_particles
class TestLoad(TestCase): def setUp(self): self.machine = Machine(2) self.load_balancer = LoadBalancer() def test_up_tasks(self): machine = Machine(2) self.assertEqual(self.machine.up_tasks(4, 1, 2), 0) def testando_a_maquina_executa_as_tasks_e_retorna_o_valor_das_que_n_conseguiu_executar( self): self.assertEqual(self.machine.up_tasks(4, 6, 2), 4) def testando_a_maquina_fecha_as_tasks_e_retorna_none(self): self.assertEqual(self.machine.close_processed_task(6), None) self.assertEqual(len(self.machine.tasks_death_tick), 0) def testando_a_maquina_o_valor_que_a_maquina_criou_na_lista(self): self.assertEqual(self.machine.up_tasks(4, 6, 2), 4) self.assertEqual(self.machine.tasks_death_tick[0], 6) def testando_se_adjust_list_retorna_a_lista_ajustada_coretamente(self): lista = [1, 2, 3, 4] self.assertEqual(len(self.load_balancer.adjust_list(lista, 4)), 7) def testando_se_o_begin_process_adequa_as_variaveis_corretamente(self): self.load_balancer.begin_process() self.assertIsInstance(self.load_balancer.ticks, list) def testando_se_o_ttask_esta_maior_que_0_menor_que_11(self): self.load_balancer.begin_process() self.assertGreaterEqual(self.load_balancer.ttask, 1) self.assertLessEqual(self.load_balancer.ttask, 10) def testando_se_o_umax_esta_maior_que_0_menor_que_11(self): self.load_balancer.begin_process() self.assertGreaterEqual(self.load_balancer.umax, 1) self.assertLessEqual(self.load_balancer.umax, 10) def testando_se_input_existe(self): self.assertTrue(os.path.exists(self.load_balancer.input_file_name)) def testando_se_output_e_criado(self): self.load_balancer.run() self.assertTrue(os.path.exists(self.load_balancer.output_file_name))
def test_price_increase(self): load_balancer = LoadBalancer(MAX_USER_SERVER) users_creation = 3 for user in range(users_creation): new_user = User(TASKS) load_balancer.allocate_new_user(new_user) load_balancer.execute_tick() assert load_balancer.price != 'R$ 0,00'
def gen_clusters(self, proc_cells=None, proc_num_particles=None, **kwargs): """generate the clusters to operate on. This is automatically called by the constructor if its `init` argument is True (default)""" cell_np = {} for tmp_cells_np in self.proc_block_np: cell_np.update(tmp_cells_np) self.cell_np = cell_np if proc_cells is None: proc_cells, proc_num_particles = LoadBalancer.distribute_particles_geometric( self.cell_np, self.num_procs) self.np_req = numpy.average(proc_num_particles) self.clusters = [Cluster(cells, cell_np, self.np_req, **kwargs) for cells in proc_cells] self.calc()
def gen_clusters(self, proc_cells=None, proc_num_particles=None, **kwargs): """generate the clusters to operate on. This is automatically called by the constructor if its `init` argument is True (default)""" cell_np = {} for tmp_cells_np in self.proc_block_np: cell_np.update(tmp_cells_np) self.cell_np = cell_np if proc_cells is None: proc_cells, proc_num_particles = LoadBalancer.distribute_particles_geometric( self.cell_np, self.num_procs) self.np_req = numpy.average(proc_num_particles) self.clusters = [ Cluster(cells, cell_np, self.np_req, **kwargs) for cells in proc_cells ] self.calc()
supervisors = [] hpaThreads = [] loadBalancerThreads = [] supervisorThreads = [] count = 0 instructions = open("instructions.txt", "r") commands = instructions.readlines() for command in commands: cmdAttributes = command.split() print(str(command)) with apiServer.etcdLock: if cmdAttributes[0] == 'Deploy': apiServer.CreateDeployment(cmdAttributes[1:]) deployment = apiServer.GetDepByLabel(cmdAttributes[1]) loadbalancer = LoadBalancer(kind, apiServer, deployment) lbThread = threading.Thread(target=loadbalancer) lbThread.start() loadBalancers.append(loadbalancer) loadBalancerThreads.append(lbThread) elif cmdAttributes[0] == 'AddNode': apiServer.CreateWorker(cmdAttributes[1:]) elif cmdAttributes[0] == 'DeleteDeployment': #We have to makesure that our load balancer will end gracefully here for loadBalancer in loadBalancers: if loadBalancer.deployment.deploymentLabel == cmdAttributes[1]: loadBalancer.running = False apiServer.RemoveDeployment(cmdAttributes[1:]) elif cmdAttributes[0] == 'ReqIn': apiServer.PushReq(cmdAttributes[1:]) elif cmdAttributes[0] == 'CreateHPA':
def test_allocate_new_user(self): load_balancer = LoadBalancer(MAX_USER_SERVER) new_user = User(TASKS) status = load_balancer.allocate_new_user(new_user) assert status is True
from load_balancer import LoadBalancer, User import os if __name__ == '__main__': if os.path.exists('input.txt'): with open('input.txt', 'r') as file: lines = file.read().splitlines() ttask = int(lines[0]) umax = int(lines[1]) load_balancer = LoadBalancer(umax) print(50 * '=') tick = 0 for tick, users in enumerate(lines[2:], start=1): if users == '': continue print(f'TICK: {tick}') if int(users) > 0: for user in range(int(users)): new_user = User(ttask) load_balancer.allocate_new_user(new_user) load_balancer.set_output(tick=tick, input=users) load_balancer.execute_tick() load_balancer.clean_servers() print(50 * '=')
def add_new_lb(cip, cport, sip, sport): p = multiprocessing.current_process() click.echo("Starting Load Balancer with pid[%s], at [%s:%s]" % (p.pid, cip, cport)) lb = LoadBalancer(cip, cport, sip, sport, ) lb.start()
vcn = VCN(config) vcn.create_vcn() vcn.create_gateway() vcn.create_route_rules() subnet_threads = [] for ad in ['ad_1', 'ad_2']: thread = threading.Thread(target=vcn.create_subnet, args=(ad, )) subnet_threads.append(thread) thread.start() join_threads(subnet_threads) vcn.create_security_rules() install_threads = [] for subnet in vcn.subnets: thread = threading.Thread(target=install_mean_stack, args=( subnet, vcn, )) install_threads.append(thread) thread.start() join_threads(install_threads) lb = LoadBalancer(config, vcn) lb.create_load_balancer() lb.create_backend_set() lb.create_backends() lb.create_listener() print('MEAN Stack URL: http://' + lb.public_ip + ':8080')
from load_balancer import LoadBalancer if __name__ == '__main__': b = LoadBalancer() b.load_balance()
def setUpClass(self): self.server = Server() self.loadBalancer = LoadBalancer()
umax_invalid_mesage = ( "umax input is an invalid option. it should be a number between 1 or 10" ) tasks = [int(i.rstrip("\n")) for i in filereader] TTASK = int(tasks[0]) if TTASK < 1 or TTASK > 10: print(ttask_invalid_mesage) sys.exit() UMAX = int(tasks[1]) if UMAX < 1 or UMAX > 10: print(umax_invalid_mesage) sys.exit() load_balancer = LoadBalancer(TTASK, UMAX) tasks = tasks[2:] with open("output.txt", "w") as output_file: for i in tasks: load_balancer.run_tasks() load_balancer.assign_task_to_server(i) load_balancer.calculate_user() output_file.write(load_balancer.output()) output_file.write("\n") while len(load_balancer.servers): load_balancer.run_tasks() load_balancer.calculate_user() output_file.write(load_balancer.output()) output_file.write("\n")
from load_balancer import LoadBalancer from provider import Provider provider1 = Provider(provider_id='1') provider2 = Provider(provider_id='2') load_balancer = LoadBalancer() load_balancer.register(provider=provider1) load_balancer.register(provider=provider2) load_balancer.get('random') load_balancer.get('random') load_balancer.get('random') load_balancer.get('random') load_balancer.get('round_robin') load_balancer.get('round_robin') load_balancer.get('round_robin') load_balancer.get('random') load_balancer.get('random')
def test_run_full(self): with open('input.txt', 'r') as file: lines = file.read() data = lines.split('\n') ttask = int(data[0]) umax = int(data[1]) file.close() load_balancer = LoadBalancer(umax) print(50 * '=') tick = 0 for tick, users in enumerate(data[2:], start=1): if users == '': continue print(f'TICK: {tick}') if int(users) > 0: for user in range(int(users)): new_user = User(ttask) load_balancer.allocate_new_user(new_user) load_balancer.set_output(tick=tick, input=users) load_balancer.execute_tick() load_balancer.clean_servers() print(50 * '=') load_balancer.end_active_tasks(last_tick=tick) assert load_balancer.price == 'R$ 15,00'
def test_create_new_server(self): load_balancer = LoadBalancer(MAX_USER_SERVER) status = load_balancer.create_new_server() assert status == 'success'
class TestLoadBalancer(TestCase): def setUp(self) -> None: self.load_balancer = LoadBalancer(4, 2) def test_create_a_server_should_return_a_server_instance(self): server = self.load_balancer.create_a_server() self.assertIsInstance(server, Server) def test_calculate_user_should_return_one_when_exist_only_one_server_with_one_running_task(self): self.load_balancer.assign_task_to_server(1) expected = 1 result = self.load_balancer.calculate_user() self.assertEqual(expected, result) def test_run_tasks_should_reduce_ttask_user_attribute_by_one(self): self.load_balancer.assign_task_to_server(1) self.load_balancer.run_tasks() expected = 3 result = self.load_balancer.servers[0].users[0].ttask self.assertEqual(expected, result) def test_output_should_return_two_comma_two_when_exist_four_active_users(self): self.load_balancer.assign_task_to_server(4) expected = "2, 2" result = self.load_balancer.output() self.assertEqual(expected, result) def test_assign_task_to_server_should_create_a_server_for_a_task_when_there_is_no_server_running(self): self.load_balancer.assign_task_to_server(1) expected = 1 result = len(self.load_balancer.servers) self.assertEqual(expected, result) def test_assign_task_to_server_should_not_create_a_server_for_a_task_when_there_is_an_available_server_running(self): self.load_balancer.assign_task_to_server(1) self.load_balancer.assign_task_to_server(1) expected = 1 result = len(self.load_balancer.servers) self.assertEqual(expected, result) def test_assign_task_to_server_should_create_a_new_instance_when_there_is_a_server_running_but_not_available(self): self.load_balancer.assign_task_to_server(2) self.load_balancer.assign_task_to_server(1) expected = 2 result = len(self.load_balancer.servers) self.assertEqual(expected, result)
policy = 'weighted least connection' cfg = { 'host': 'http://host.docker.internal', 'main_endpoint': 'work', 'port': 5000, 'policy': policy, 'hosts_configuration': { 'emea': 1, 'us': 2, 'asia': 2 }, 'req_num': _req_num } load_balancer = LoadBalancer(cfg) load_balancer.run() print('Finished requests') else: # Create a config object for load balancer policy = None if 0 <= req_num < 100: policy = 'round robin' elif 100 <= req_num < 500: policy = 'randomized static' elif 500 <= req_num < 1250: policy = 'least connection' elif 1250 <= req_num < 2500:
def setUp(self) -> None: self.load_balancer = LoadBalancer(4, 2)
ga_fwle = 'ga_fwle.log' logfile = open(ga_fwle, "w") for r in forwarding_unit_latency_estiamation: logfile.write( 'Region: {} forward unit latency estimation: {}'.format( r, forwarding_unit_latency_estiamation[r])) logfile.write('\n') logfile.close() elif param == 'policies': """ Test load balancer policies compare them """ rr_cfg = config['production_round_robin'] print(rr_cfg) load_balancer = LoadBalancer(rr_cfg) load_balancer.run() wrr_cfg = config['production_weighted_round_robin'] print(wrr_cfg) load_balancer = LoadBalancer(wrr_cfg) load_balancer.run() lc_cfg = config['production_least_connection'] print(lc_cfg) load_balancer = LoadBalancer(lc_cfg) load_balancer.run() wlc_cfg = config['production_weighted_least_connection'] print(wlc_cfg) load_balancer = LoadBalancer(wlc_cfg)
ec2_north_virginia = EC2(ubuntu20_nvirginia_img, ec2_nv_key_name, 't2.micro', 'us-east-1', 'secgroup-teste', script_machines, session) nv_subnets_ids, nv_vpc_ids = ec2_north_virginia.describe_subnets() print("creating server instance...") ec2_id_north_virginia = ec2_north_virginia.create_instances(1) print("Status - OK") security_group_id = [ instance.security_groups[0]['GroupId'] for instance in ec2_id_north_virginia ] ## Load balancer - North Virginia __init__ lb = LoadBalancer(load_balancer_name, nv_subnets_ids, 'us-east-1') print("creating load balancer...") elb_obj = lb.create_elb(security_group_id) elb_arn = elb_obj['LoadBalancers'][0]['LoadBalancerArn'] elb_dns = elb_obj['LoadBalancers'][0]['DNSName'] a_file = open("script", "r") list_of_lines = a_file.readlines() list_of_lines[6] = f"URL_SERVER = '{elb_dns}'\n" a_file = open("script", "w") a_file.writelines(list_of_lines) a_file.close() print("Status - OK")
def setUp(self): self.machine = Machine(2) self.load_balancer = LoadBalancer()
def round_robin_invocation(): load_balancer = LoadBalancer() for i in range(100): print(load_balancer.get_round_robin())
from flask import Flask, render_template, redirect, request from html import * from Apps.App_master.forms import SearchForm, DocQuery from load_balancer import LoadBalancer app = Flask(__name__) app.config['SECRET_KEY'] = '5791628bb0b13ce1c676dfde280ba245' load_balancer = LoadBalancer() @app.route('/') def landing(): return redirect('/search', code=302) @app.route('/search', methods=['GET']) def search(): forms = [SearchForm(), DocQuery()] return render_template( 'search.html', title='Search', forms=forms, slaves=load_balancer.slave_names ) # dont forget to add templates folder to 'templates' in project structure @app.route('/search', methods=['POST']) def get_results(): result = request.json print(request.get_data()) query = str(request.get_data().split(b'&')[1])[8:-1]
def random_invocation(): load_balancer = LoadBalancer() for i in range(100): print(load_balancer.get_random())