def test_remove_two_servers_while_request_in_progress():
    sim = SimulatorKernel(outputDirectory = None)

    server1 = MockServer(sim, latency = 10)
    server2 = MockServer(sim, latency = 10)
    server3 = MockServer(sim, latency = 10)

    lb = LoadBalancer(sim)
    lb.algorithm = 'SQF'
    lb.addBackend(server1)
    lb.addBackend(server2)
    lb.addBackend(server3)

    onShutdownCompleted = Mock()

    r1 = Request()
    r1.onCompleted = Mock()
    r2 = Request()
    r2.onCompleted = Mock()
    r3 = Request()
    r3.onCompleted = Mock()
    sim.add(0, lambda: lb.request(r1))
    sim.add(0, lambda: lb.request(r2))
    sim.add(0, lambda: lb.request(r3))
    sim.add(1, lambda: lb.removeBackend(server1, onShutdownCompleted))
    sim.add(2, lambda: lb.removeBackend(server2, onShutdownCompleted))
    sim.run()

    r1.onCompleted.assert_called_once_with()
    r2.onCompleted.assert_called_once_with()
    r3.onCompleted.assert_called_once_with()
    assert onShutdownCompleted.call_count == 2
    assert server1.numSeenRequests == 1
    assert server2.numSeenRequests == 1
    assert server3.numSeenRequests == 1
def test_remove_while_request_not_in_progress():
    sim = SimulatorKernel(outputDirectory = None)

    server1 = MockServer(sim, latency = 0.1)
    server2 = MockServer(sim, latency = 0.1)

    lb = LoadBalancer(sim)
    lb.addBackend(server1)
    lb.addBackend(server2)

    onShutdownCompleted = Mock()

    def remove_active_server():
        if server1.numSeenRequests:
            lb.removeBackend(server1, onShutdownCompleted)
        else:
            lb.removeBackend(server2, onShutdownCompleted)

    r1 = Request()
    r1.onCompleted = Mock()
    sim.add(0, lambda: lb.request(r1))
    sim.add(1, lambda: remove_active_server())
    sim.add(1, lambda: lb.request(Request()))
    sim.add(2, lambda: lb.request(Request()))
    sim.add(2, lambda: lb.request(Request()))
    sim.run()

    r1.onCompleted.assert_called_once_with()
    onShutdownCompleted.assert_called_once_with()
    assert server1.numSeenRequests == 1 or server2.numSeenRequests == 1
    assert server1.numSeenRequests == 3 or server2.numSeenRequests == 3
Example #3
0
  def __init__(self,params):
    number_of_servers=int(params["number_of_servers"])

    self.number_of_requests = int(params["number_of_requests"])
    self.number_of_types = len(params["types_of_requests"])

    self.number_of_requests_per_sec = int(params["number_of_requests_per_sec"])
    self.generator = RequestGenerator(params["types_of_requests"],int(params["number_of_requests_per_sec"]))

    request_type_capacity=[x["output_capacity"] for x in params["types_of_requests"].values()]
    server_capacity = self.distribute_output_capacity(number_of_servers,request_type_capacity)
     
    #print "server capacity:", server_capacity

    servers = [Server(server_capacity[z]) for z in range(number_of_servers)]
    self.loadbalancer = LoadBalancer(servers=servers,method=params["method"])
def test():
    sim = SimulatorKernel(outputDirectory = None)

    server1 = Mock()
    server2 = Mock()

    lb = LoadBalancer(sim)
    lb.addBackend(server1)
    lb.addBackend(server2)

    lb.removeBackend(server2)

    assert str(lb)
Example #5
0
 def test_balance(self):
     lb = LoadBalancer(umax=2, ttask=4)
     lb.process([1, 3, 0, 1, 0, 1])
     assert lb.snapshots == ['1', '2,2', '2,2', '2,2,1', '1,2,1', '2', '2', '1', '1', '0']
Example #6
0
 def test_invalid_ttask_max(self):
     with pytest.raises(Exception):
         assert LoadBalancer(umax=1, ttask=40)
Example #7
0
 def test_invalid_umax_size(self):
     with pytest.raises(Exception):
         assert LoadBalancer(umax=11, ttask=4)
Example #8
0
 def test_coast_calculator(self):
     lb = LoadBalancer(umax=2, ttask=4)
     lb.process([1, 3, 0, 1, 0, 1])
     assert lb.total_coast == 15
Example #9
0
class Simulator:

  def __init__(self,params):
    number_of_servers=int(params["number_of_servers"])

    self.number_of_requests = int(params["number_of_requests"])
    self.number_of_types = len(params["types_of_requests"])

    self.number_of_requests_per_sec = int(params["number_of_requests_per_sec"])
    self.generator = RequestGenerator(params["types_of_requests"],int(params["number_of_requests_per_sec"]))

    request_type_capacity=[x["output_capacity"] for x in params["types_of_requests"].values()]
    server_capacity = self.distribute_output_capacity(number_of_servers,request_type_capacity)
     
    #print "server capacity:", server_capacity

    servers = [Server(server_capacity[z]) for z in range(number_of_servers)]
    self.loadbalancer = LoadBalancer(servers=servers,method=params["method"])
  
  def distribute_output_capacity(self,number_of_servers,type_capacity):
    server_capacity = [[0 for z in range(self.number_of_types)]
      for server in range(number_of_servers)]

    for z in range(self.number_of_types):
      while (type_capacity[z]>0):
        for server in range(number_of_servers):
          if type_capacity[z] <= 0:
            break
          server_capacity[server][z]+=1
          type_capacity[z]-=1

    return server_capacity
        
  def number_of_servers(self):
    return self.loadbalancer.number_of_servers()

  def total_service_time(self):
    return self.loadbalancer.total_service_time()

  def total_wait_time(self):
    return self.loadbalancer.total_wait_time()

  def total_wait_requests(self):
    return self.loadbalancer.total_wait_requests()

  def number_of_requests_per_type(self):
    return self.loadbalancer.number_of_requests_per_type()

  def number_of_failed_requests_per_type(self):
    return self.loadbalancer.number_of_failed_requests_per_type()

  def simulate(self):
    arrival_time = 0
    reqno = 0
    while True:     
       for req in self.generator.generate(number_of_requests=100):
         reqno += 1
         request_type,delay,service_time=req
         arrival_time += delay

         #print "--- %s arrive at %f" % (request_type,arrival_time)
         self.loadbalancer.process(request_type,arrival_time,service_time)

       if  reqno >=self.number_of_requests:
          break
Example #10
0
host_logger = 'logstash'
port_logger = 5000

# Get you a test logger
test_logger = logging.getLogger('python-logstash-logger')
# Set it to whatever level you want - default will be info
test_logger.setLevel(logging.DEBUG)
# Create a handler for it
async_handler = AsynchronousLogstashHandler(host_logger,
                                            port_logger,
                                            database_path=None)
# Add the handler to the logger
test_logger.addHandler(async_handler)

# Initialize load balancer
load_balancer = LoadBalancer()

gateway = Gateway()  #TODO: make singleton if needed????

# TODO: make it work with True!!!
SAVE_CACHE_RESPONSE = True
# SAVE_CACHE_RESPONSE = False

response_caching = ResponseCaching()

coordinator = TwoPhaseCommit()

###### Define possible cache statuses#####
SUCCESS = 1
CUSTOM_CACHE_FAILED = 2
REDIS_CACHE_FAILED = 3
Example #11
0
        else:
            raise Exception('Input file entered does not exist')

    if len(sys.argv) > 2:
        output_file = sys.argv[2]

    if not os.path.isfile(input_file):
        raise Exception('Default input file does not exists')

    data = []
    print('Loading file in: {}'.format(input_file))
    with open(input_file, 'r') as file:
        for i, val in enumerate(file.read().split('\n')):
            if val.isdigit():
                data.append(int(val))
            else:
                raise Exception('Input values need to be Integers: {} in line {}'.format(val, i))

    lb = LoadBalancer(ttask=data[0], umax=data[1])
    lb.process(tasks=data[2:])
    with open(output_file, 'w+') as file:
        file.write('\n'.join(lb.report))
    print('Done! Written file in {}'.format(output_file))







Example #12
0
def main():
    template = Template()
    template.add_description("Example Server")

    for key, value in Mappings().mappings.iteritems():
        template.add_mapping(key, value)

    parameters = Parameters()
    for param in parameters.values():
        template.add_parameter(param)

    template.add_metadata({
        "AWS::CloudFormation::Interface": {
            "ParameterGroups": [
                {
                    "Label": {
                        "default": "Required parameters."
                    },
                    "Parameters": [
                        "DBPassword",
                        "KeyPair",
                    ]
                },
                {
                    "Label": {
                        "default": "Advanced: Database and instance"
                    },
                    "Parameters": [
                        "DBInstanceType", "DBStorageSize", "DBBackupRetention",
                        "EC2InstanceType"
                    ]
                },
            ],
            "ParameterLabels": {
                "DBPassword": {
                    "default": "Choose a database password"
                },
                "DBStorageSize": {
                    "default": "Database storage (advanced)"
                },
                "DBBackupRetention": {
                    "default": "How long to keep backups (advanced)"
                },
                "DBInstanceType": {
                    "default": "Database instance class (advanced)"
                },
                "KeyPair": {
                    "default": "Choose a key pair"
                },
                "EC2InstanceType": {
                    "default": "Instance class (advanced)"
                },
            }
        }
    })

    vpc = VPC()
    for res in vpc.values():
        template.add_resource(res)

    elb = LoadBalancer(vpc=vpc)
    for res in elb.values():
        template.add_resource(res)

    db = Database(parameters=parameters, vpc=vpc, loadbalancer=elb)
    for res in db.values():
        template.add_resource(res)

    ec2 = EC2(parameters=parameters, vpc=vpc, loadbalancer=elb)
    for res in ec2.values():
        template.add_resource(res)

    template.add_output(
        Output("LoadBalancerDNSName",
               Value=GetAtt(elb.load_balancer, "DNSName")))

    print(template.to_json())
Example #13
0
 def __init__(self):
     self.load_balancer = LoadBalancer()
Example #14
0
class Gateway:
    MAX_RETRIES = 5

    map_service_type_paths = {
        "init-student": "type1",
        "init_student": "type1",
        "nota": "type1",
        "nota-atestare": "type1",
        "nota_atestare": "type1",
        "nota-examen": "type2",
        "nota_examen": "type2",
        "pune-nota_atestare": "type2",
        "pune_nota_atestare": "type2",
        "nota-finala": "type2",
        "nota_finala": "type2",
        "get-all-exam-marks": "type2",
        "get-all-midterm-marks": "type2",
        "s2-nota-atestare": "type2",
        "s2-validate-student-marks": "type2",
        "s2-status": "type2",
        "s1-status": "type1",
        "status": "",
        "test-route": "type1",
        "test-route-t2": "type2"
    }

    def __init__(self):
        self.load_balancer = LoadBalancer()

    def is_path_allowed(self, path):
        allowed_paths = self.map_service_type_paths.keys()

        return path in allowed_paths

    def get_service_type(self, path):
        service_type = self.map_service_type_paths[path]

        if path == "s1-status":
            path = "status"
            service_type = "type1"
        elif path == "s2-status":
            path = "status"
            service_type = "type2"

        return service_type

    async def make_next_request(self,
                                path,
                                service_type,
                                data,
                                method,
                                counter=0):
        if not self.load_balancer.any_available(service_type):
            test_logger.error("ERROR: No service of type " + service_type +
                              " available")
            return {"status": "error", "message": "No services available"}

        test_logger.debug("Request data: " + str(data))

        parameters = {"path": path, "parameters": data}

        test_logger.debug("Parameters " + str(parameters))

        circuit_breaker = self.load_balancer.next(service_type)

        if circuit_breaker is None:
            return {
                "status":
                "error",
                "message":
                "Server error in load_balancer.next(...) method. No services found in cache."
            }

        service_response = await circuit_breaker.request(parameters, method)

        if "status" in service_response and service_response[
                "status"] == "success":
            return {
                "status": "success",
                "response": service_response["response"]
            }

        if "status" in service_response and service_response[
                "status"] == "error":
            if counter < self.MAX_RETRIES:
                counter += 1
                return await self.make_next_request(path, service_type, data,
                                                    method, counter)

            if "message" in service_response:
                return {
                    "status": "error",
                    "message": service_response["message"]
                }

            return {"status": "error"}

        return {"status": "error", "message": "Error in request to service"}