예제 #1
0
    def request(self, request):
        newRequest = Request()
        newRequest.originalRequest = request
        newRequest.onCompleted = lambda: self.onCompleted(newRequest)
        self.loadBalancer.request(newRequest)

        action = self.controller.onRequest(newRequest)
        self.scaleBy(action)
def test_remove_two_servers_while_request_in_progress():
    sim = SimulatorKernel(outputDirectory = None)

    server1 = MockServer(sim, latency = 10)
    server2 = MockServer(sim, latency = 10)
    server3 = MockServer(sim, latency = 10)

    lb = LoadBalancer(sim)
    lb.algorithm = 'SQF'
    lb.addBackend(server1)
    lb.addBackend(server2)
    lb.addBackend(server3)

    onShutdownCompleted = Mock()

    r1 = Request()
    r1.onCompleted = Mock()
    r2 = Request()
    r2.onCompleted = Mock()
    r3 = Request()
    r3.onCompleted = Mock()
    sim.add(0, lambda: lb.request(r1))
    sim.add(0, lambda: lb.request(r2))
    sim.add(0, lambda: lb.request(r3))
    sim.add(1, lambda: lb.removeBackend(server1, onShutdownCompleted))
    sim.add(2, lambda: lb.removeBackend(server2, onShutdownCompleted))
    sim.run()

    r1.onCompleted.assert_called_once_with()
    r2.onCompleted.assert_called_once_with()
    r3.onCompleted.assert_called_once_with()
    assert onShutdownCompleted.call_count == 2
    assert server1.numSeenRequests == 1
    assert server2.numSeenRequests == 1
    assert server3.numSeenRequests == 1
def test_request_hooks():
    sim = SimulatorKernel(outputDirectory = None)

    loadBalancer = MockLoadBalancer(sim, latency = 1)
    autoScalerController = mock.Mock()
    autoScalerController.controlInterval = 1
    autoScalerController.onRequest = mock.Mock(return_value=0)
    autoScalerController.onCompleted = mock.Mock(return_value=0)
    autoScalerController.onControlPeriod = mock.Mock(return_value=0)
    autoScalerController.onStatus = mock.Mock(return_value=0)

    autoScaler = AutoScaler(sim, loadBalancer, controller = autoScalerController)
    assert str(autoScaler)

    server1 = mock.Mock(name = 'server1')
    server2 = mock.Mock(name = 'server2')
    autoScaler.addBackend(server1)
    autoScaler.addBackend(server2)

    r = Request()
    autoScaler.request(r)
    sim.add(100, lambda: autoScaler.scaleUp())
    sim.run(until = 1000)

    # TODO: Check exact call parameters
    assert autoScalerController.onRequest.call_count == 1, autoScalerController.onRequest.call_count
    assert autoScalerController.onCompleted.call_count == 1, autoScalerController.onCompleted.call_count
    assert autoScalerController.onStatus.call_count == 3, autoScalerController.onStatus.call_count
    assert autoScalerController.onControlPeriod.call_count == 1000, autoScalerController.onControlPeriod.call_count
예제 #4
0
    def send(self, request: Request):
        if request.headers['Type'] == 'CONNECT':
            logging.info("Process CONNECT HTTP Verb on {}".format(
                request.headers['Host']))
            request.conn.send(
                b'HTTP/1.1 200 OK\r\nProxy-agent: Python Proxy/0.1.0 Draft 1\r\n\r\n'
            )
            request.connect_2_step()
            logging.info("CONNECT HTTP Successfully on {}".format(
                request.headers['Host']))

        logging.info("Send request {} to {}:{}".format(
            request.headers['Type'], request.headers['Host'],
            request.headers['Port']))
        # Initialize ssl client socket
        context = ssl.SSLContext(
            ssl.PROTOCOL_SSLv23)  # Set SSL protocol to use
        context.verify_mode = ssl.CERT_REQUIRED
        context.check_hostname = True
        context.load_verify_locations("E:\\Desktop\\proxy\\certs\\ca-cert.pem")
        response = context.wrap_socket(socket.socket(socket.AF_INET,
                                                     socket.SOCK_STREAM),
                                       server_hostname=request.headers['Host'])
        response.connect((request.headers['Host'], 443))
        # Send request to client
        response.send(request.raw_data)
        data = b''
        response.settimeout(10)
        while True:
            try:
                t = response.recv(self._BUFFER)
                if t != b'':
                    data += t
                else:
                    break
            except socket.timeout:
                break
        logging.info("Get correctly response of {}".format(
            request.headers['Host']))
        request.conn.send(data)
        request.conn.close()
        response.close()
def test_remove_while_request_not_in_progress():
    sim = SimulatorKernel(outputDirectory = None)

    server1 = MockServer(sim, latency = 0.1)
    server2 = MockServer(sim, latency = 0.1)

    lb = LoadBalancer(sim)
    lb.addBackend(server1)
    lb.addBackend(server2)

    onShutdownCompleted = Mock()

    def remove_active_server():
        if server1.numSeenRequests:
            lb.removeBackend(server1, onShutdownCompleted)
        else:
            lb.removeBackend(server2, onShutdownCompleted)

    r1 = Request()
    r1.onCompleted = Mock()
    sim.add(0, lambda: lb.request(r1))
    sim.add(1, lambda: remove_active_server())
    sim.add(1, lambda: lb.request(Request()))
    sim.add(2, lambda: lb.request(Request()))
    sim.add(2, lambda: lb.request(Request()))
    sim.run()

    r1.onCompleted.assert_called_once_with()
    onShutdownCompleted.assert_called_once_with()
    assert server1.numSeenRequests == 1 or server2.numSeenRequests == 1
    assert server1.numSeenRequests == 3 or server2.numSeenRequests == 3
def test_with_controller_always_yes():
    completedRequests = []

    controller = Mock()
    controller.withOptional.return_value = True, 1

    sim = SimulatorKernel(outputDirectory = None)
    server = Server(sim,
            serviceTimeY = 10, serviceTimeYVariance = 0,
            serviceTimeN =  1, serviceTimeNVariance = 0)
    server.controller = controller

    r = Request()
    r.onCompleted = lambda: completedRequests.append(r)
    sim.add(0, lambda: server.request(r))
    
    r2 = Request()
    r2.onCompleted = lambda: completedRequests.append(r2)
    sim.add(0, lambda: server.request(r2))

    sim.run()

    controller.withOptional.assert_called_with()

    assert set(completedRequests) == set([ r, r2 ])
    assert abs(server.getActiveTime() - 20.0) < eps, server.getActiveTime()
예제 #7
0
def application(environ, start_response):
    # body = 'Request type: {}'.format(environ['QUERY_STRING'])
    # status = '200 OK'
    # headers = [
    #     ('Content-Type', 'text/plain'),
    #     ('Content-Length', str(len(body)))
    # ]
    # start_response(status, headers)
    # return body
    view = get_view(environ)
    # try:
    status, headers, body = view(Request(environ))
    # except Exception, error:
    #     status, headers, body = view_500(error)
    start_response(status, headers)
    return body
def test_invalid_action():
    sim = SimulatorKernel(outputDirectory = None)

    loadBalancer = MockLoadBalancer(sim, latency = 1)
    autoScalerController = mock.Mock()
    autoScalerController.controlInterval = 1
    autoScalerController.onRequest = mock.Mock(return_value=-2)
    autoScalerController.onCompleted = mock.Mock(return_value=0)
    autoScalerController.onControlPeriod = mock.Mock(return_value=0)
    autoScalerController.onStatus = mock.Mock(return_value=0)

    autoScaler = AutoScaler(sim, loadBalancer, controller = autoScalerController)
    assert str(autoScaler)

    server1 = mock.Mock(name = 'server1')
    server2 = mock.Mock(name = 'server2')
    autoScaler.addBackend(server1)
    autoScaler.addBackend(server2)

    r = Request()
    autoScaler.request(r)
    sim.add(100, lambda: autoScaler.scaleUp())
    sim.run()
예제 #9
0
    def listen(self):
        """
        This method accept petitions.
        :return: Request
        """
        if self._STATUS:
            cnn, ad = self._SOCK.accept()  # Accept connection from browser
            cnn.settimeout(0.5)  # Set non-blocking to socket
            try:
                data = b''
                info = cnn.recv(self._BUFFER)  # Received data
                while info:
                    data += info
                    try:
                        info = cnn.recv(self._BUFFER)  # Received data
                    except socket.timeout:
                        break
            except socket.timeout:
                logging.warning('[!] Request response empy')
                data = b''

            return Request(data, cnn, ad)
        else:
            logging.warning("[!] Server is down, method don't execute...")
def test_without_controller():
    completedRequests = []

    sim = SimulatorKernel(outputDirectory = None)

    server = Server(sim, serviceTimeY = 1, serviceTimeYVariance = 0)

    r = Request()
    r.onCompleted = lambda: completedRequests.append(r)
    sim.add(0, lambda: server.request(r))
    
    r2 = Request()
    r2.onCompleted = lambda: completedRequests.append(r2)
    sim.add(0, lambda: server.request(r2))

    sim.run()

    assert set(completedRequests) == set([ r, r2 ])
    assert server.getActiveTime() == 2.0, server.getActiveTime()
예제 #11
0
    def request(self, request):
        #self.sim.log(self, "Got request {0}", request)
        request.arrival = self.sim.now
        if self.algorithm in [
                'weighted-RR', 'theta-diff', 'theta-diff-plus', 'equal-thetas',
                'ctl-simplify'
        ]:
            chosenBackendIndex = \
             weightedChoice(zip(range(0, len(self.backends)), self.weights), self.random)
        elif self.algorithm == 'equal-thetas-SQF' or self.algorithm == 'equal-thetas-fast' or self.algorithm == 'equal-thetas-fast-mul':
            # Update controller in the -fast version
            if self.algorithm == 'equal-thetas-fast' or self.algorithm == 'equal-thetas-fast-mul':
                dt = self.sim.now - self.lastDecision
                if dt > 1: dt = 1
                for i in range(0, len(self.backends)):
                    # Gain
                    gamma = self.equal_thetas_fast_gain * dt

                    # Calculate the negative deviation from the average
                    e = self.lastThetas[i] - avg(self.lastThetas)
                    # Integrate the negative deviation from the average
                    self.queueOffsets[
                        i] += gamma * e  # + Kp * (e - self.lastThetaErrors[i])
                    self.lastThetaErrors[i] = e
                self.lastDecision = self.sim.now

            # To prevent starvation, choose a random empty server..
            empty_servers = [i for i in range(0, len(self.queueLengths)) \
             if self.queueLengths[i] == 0]

            if empty_servers:
                chosenBackendIndex = self.random.choice(empty_servers)
            else:
                if self.algorithm == 'equal-thetas-fast-mul':
                    # ...or choose replica with shortest (queue * 2 ** queueOffset)
                    chosenBackendIndex = \
                     min(range(0, len(self.queueLengths)), \
                     key = lambda i: self.queueLengths[i] * (2 ** (-self.queueOffsets[i])))
                else:
                    # ...or choose replica with shortest (queue + queueOffset)
                    chosenBackendIndex = \
                     min(range(0, len(self.queueLengths)), \
                     key = lambda i: self.queueLengths[i]-self.queueOffsets[i])

        elif self.algorithm == 'theta-diff-plus-SQF':
            # choose replica with shortest (queue + queueOffset)
            chosenBackendIndex = \
             min(range(0, len(self.queueLengths)), \
             key = lambda i: self.queueLengths[i]-self.queueOffsets[i])
            pass
        elif self.algorithm == 'random':
            # round robin
            chosenBackendIndex = \
             self.random.choice(range(0, len(self.backends)))
        elif self.algorithm == 'RR':
            # round robin
            chosenBackendIndex = \
             (self.numRequests % len(self.backends)) - 1
        elif self.algorithm == 'SQF':
            # choose replica with shortest queue
            chosenBackendIndex = \
             min(range(0, len(self.queueLengths)), \
             key = lambda i: self.queueLengths[i])
        elif self.algorithm == 'SQF-plus':
            # choose replica with shortest queue
            minIndices = [
                i for i, x in enumerate(self.queueLengths)
                if x == min(self.queueLengths)
            ]
            if len(minIndices) == 1:
                chosenBackendIndex = minIndices[0]
            else:
                dimmers = [self.lastThetas[i] for i in minIndices]
                maxDimmerIndex = dimmers.index(max(dimmers))
                chosenBackendIndex = minIndices[maxDimmerIndex]
        elif self.algorithm == '2RC':
            maxlat = [max(x) if x else 0 for x in self.lastLatencies]
            if len(self.backends) == 1:
                chosenBackendIndex = 0
            # randomly select two backends and send it to the one with lowest latency
            else:
                backends = set(range(0, len(self.backends)))
                randomlychosen = self.random.sample(backends, 2)
                if maxlat[randomlychosen[0]] > maxlat[randomlychosen[1]]:
                    chosenBackendIndex = randomlychosen[1]
                else:
                    chosenBackendIndex = randomlychosen[0]
        elif self.algorithm == 'FRF':
            # choose replica with minimum latency
            maxlat = [max(x) if x else 0 for x in self.lastLatencies]
            chosenBackendIndex = \
             maxlat.index(min(maxlat))
        elif self.algorithm == 'FRF-EWMA':
            # choose replica with minimum EWMA latency
            #self.sim.log(self, "EWMA RT {0}", self.ewmaResponseTime)
            chosenBackendIndex = \
             min(range(0, len(self.backends)), \
             key = lambda i: self.ewmaResponseTime[i])
        elif self.algorithm == 'predictive':
            maxlat = np.array([max(x) if x else 0 for x in self.lastLatencies])
            maxlatLast = np.array(
                [max(x) if x else 0 for x in self.lastLastLatencies])
            wlat = 0.2
            wqueue = 0.8
            points = wlat * (maxlat - maxlatLast) + wqueue * (
                np.array(self.queueLengths) - np.array(self.lastQueueLengths))
            # choose replica with shortest queue
            chosenBackendIndex = \
             min(range(0, len(points)), \
             key = lambda i: points[i])
        elif self.algorithm == "SRTF":
            # choose replica with shortest "time" queue
            #chosenBackendIndex = \
            #	min(range(0, len(self.backends)), \
            #	key = lambda i: sum([r.remainingTime if hasattr(r, 'remainingTime') else 0 for r in self.backends[i].activeRequests]))
            chosenBackendIndex = \
             min(range(0, len(self.queueLengths)), \
             key = lambda i: self.queueLengths[i] * (self.backends[i].serviceTimeY * self.lastThetas[i] + self.backends[i].serviceTimeN * (1 - self.lastThetas[i])))
        elif self.algorithm == 'theta-diff-plus-fast':
            dt = self.sim.now - self.lastDecision
            if dt > 1: dt = 1

            for i in range(0, len(self.backends)):
                # Gain
                Kp = 0.25
                Ti = 5.0
                gammaTr = .01

                # PI control law
                e = self.lastThetas[i] - self.lastLastThetas[i]
                self.queueOffsets[i] += (Kp * e +
                                         (Kp / Ti) * self.lastThetas[i]) * dt

                # Anti-windup
                self.queueOffsets[i] -= gammaTr * (self.queueOffsets[i] -
                                                   self.queueLengths[i]) * dt
                self.lastThetaErrors[i] = e

            self.lastDecision = self.sim.now

            # choose replica with shortest (queue + queueOffset)
            chosenBackendIndex = \
             min(range(0, len(self.queueLengths)), \
             key = lambda i: self.queueLengths[i]-self.queueOffsets[i])

        else:
            raise Exception("Unknown load-balancing algorithm " +
                            self.algorithm)

        request.chosenBackend = self.backends[chosenBackendIndex]
        newRequest = Request()
        newRequest.originalRequest = request
        newRequest.onCompleted = lambda: self.onCompleted(newRequest)
        #self.sim.log(self, "Directed request to {0}", chosenBackendIndex)
        self.queueLengths[chosenBackendIndex] += 1
        self.numRequestsPerReplica[chosenBackendIndex] += 1
        self.backends[chosenBackendIndex].request(newRequest)