예제 #1
0
    def run_balance_test(self,
                         user=None,
                         default_route=None,
                         side_effect=None):
        yield self.connect('127.0.0.1', self.pbPort)
        yield self.prepareRoutingsAndStartConnector(user, default_route,
                                                    side_effect)

        # Set baseurl
        params = {
            'username': self.params['username'],
            'password': self.params['password'],
        }
        baseurl = 'http://127.0.0.1:1401/balance'

        # Send a balance check request
        agent = Agent(reactor)
        client = HTTPClient(agent)
        response = yield client.get(baseurl, params=params)
        response_text = yield text_content(response)
        response_code = response.code

        # Wait 5 seconds before stopping SmppClientConnectors
        yield waitFor(5)
        yield self.stopSmppClientConnectors()

        defer.returnValue((response_text, response_code))
예제 #2
0
def main(reactor, *args):
    agent = make_custom_agent(reactor)
    http_client = HTTPClient(agent)
    d = http_client.get('https://secure.example.net/area51',
                        auth=('admin', "you'll never guess!"))
    d.addCallback(print_response)
    return d
예제 #3
0
    def run_rate_test(self,
                      user=None,
                      content=None,
                      source_address=None,
                      destination_address=None,
                      default_route=None,
                      side_effect=None):
        yield self.connect('127.0.0.1', self.pbPort)
        yield self.prepareRoutingsAndStartConnector(user, default_route,
                                                    side_effect)

        # Set content
        if content is not None:
            self.params['content'] = content
        else:
            del self.params['content']
        if source_address is not None:
            self.params['from'] = source_address
        if destination_address is not None:
            self.params['to'] = destination_address
        baseurl = 'http://127.0.0.1:1401/rate'

        # Send a MT
        # We should receive a msg id
        agent = Agent(reactor)
        client = HTTPClient(agent)
        response = yield client.get(baseurl, params=self.params)
        response_text = yield text_content(response)
        response_code = response.code

        # Wait 5 seconds before stopping SmppClientConnectors
        yield waitFor(5)
        yield self.stopSmppClientConnectors()

        defer.returnValue((response_text, response_code))
예제 #4
0
 def handle_outbound_message(self, message):
     # The transport does not make any attempt to
     # interpret AfricasTalking responses
     self.emit("consuming %s" % message)
     message_id = message['message_id']
     missing_fields = self.ensure_message_values(message,
                                                 ['to_addr', 'content'])
     if missing_fields:
         returnValue(self.reject_message(message, missing_fields))
     outbound_msg = {
         'username': self.username,
         'to': ','.join(message.payload['to_addr']),
         'message': message.payload['content'].encode('utf-8'),
         'bulkSMSMode': 1,
     }
     self.emit("outbound message {}".format(outbound_msg))
     http_client = HTTPClient(self.agent_factory)
     args = dict(url=self.outbound_url,
                 data=outbound_msg,
                 headers=self.headers,
                 allow_redirects=False)
     response = yield http_client.post(**args)
     validate = yield self.validate_outbound(response)
     validate['message_id'] = message_id
     yield self.outbound_status(**validate)
예제 #5
0
 def validate_ticket(self, ticket, request):
     service_name = self.service_name
     ticket_name = self.ticket_name
     
     this_url = self.get_url(request)
     p = urlparse.urlparse(this_url)
     qs_map = urlparse.parse_qs(p.query)
     if ticket_name in qs_map:
         del qs_map[ticket_name]
     param_str = urlencode(qs_map)
     p = urlparse.ParseResult(*tuple(p[:4] + (param_str,) + p[5:]))
     service_url = urlparse.urlunparse(p)
     
     params = {
             service_name: service_url,
             ticket_name: ticket,}
     param_str = urlencode(params)
     p = urlparse.urlparse(self.cas_info['service_validate_url'])
     p = urlparse.ParseResult(*tuple(p[:4] + (param_str,) + p[5:]))
     service_validate_url = urlparse.urlunparse(p)
     
     log.msg("[INFO] requesting URL '%s' ..." % service_validate_url)
     http_client = HTTPClient(self.agent) 
     d = http_client.get(service_validate_url)
     d.addCallback(treq.content)
     d.addCallback(self.parse_sv_results, service_url, ticket, request)
     return d
예제 #6
0
 def __init__(self,
              dockerAddr=None,
              dockerPort=None,
              dockerSocket=None,
              path='',
              reactor=reactor,
              config=None):
     """
     A docker proxy resource which knows how to connect to real Docker
     daemon either via socket (dockerSocket specified) or address + port for
     TCP connection (dockerAddr + dockerPort specified).
     """
     if config is None:
         # Try to get the configuration from the default place on the
         # filesystem.
         self.config = PluginConfiguration()
     else:
         self.config = config
     self.config.read_and_parse()
     self.parser = EndpointParser(self.config)
     Resource.__init__(self)
     self.host = dockerAddr
     self.port = dockerPort
     self.socket = dockerSocket
     self.path = path
     self.reactor = reactor
     proxy.ReverseProxyResource.__init__(
         self, dockerAddr, dockerPort, path,
         reactor)  # NB dockerAddr is not actually used
     self.agent = Agent(reactor)  # no connectionpool
     self.client = HTTPClient(self.agent)
예제 #7
0
 def validate_ticket(self, ticket, request):
     service_name = self.service_name
     ticket_name = self.ticket_name
     this_url = self.get_url(request)
     p = urlparse.urlparse(this_url)
     qs_map = urlparse.parse_qs(p.query)
     if ticket_name in qs_map:
         del qs_map[ticket_name]
     param_str = urlencode(qs_map, doseq=True)
     p = urlparse.ParseResult(*tuple(p[:4] + (param_str, ) + p[5:]))
     service_url = urlparse.urlunparse(p)
     params = {
         service_name: service_url,
         ticket_name: ticket,
     }
     param_str = urlencode(params, doseq=True)
     p = urlparse.urlparse(self.cas_info['service_validate_url'])
     p = urlparse.ParseResult(*tuple(p[:4] + (param_str, ) + p[5:]))
     service_validate_url = urlparse.urlunparse(p)
     self.log("Requesting service-validate URL => '{0}' ...".format(
         service_validate_url))
     http_client = HTTPClient(self.cas_agent)
     d = http_client.get(service_validate_url)
     d.addCallback(treq.content)
     d.addCallback(self.parse_sv_results, service_url, ticket, request)
     return d
예제 #8
0
    def run_test(self, content, datacoding=None, port=1401):
        yield self.connect('127.0.0.1', self.pbPort)
        yield self.prepareRoutingsAndStartConnector()

        # Set content
        self.params['content'] = content
        # Set datacoding
        if datacoding is None and 'coding' in self.params:
            del self.params['coding']
        if datacoding is not None:
            self.params['coding'] = datacoding
        # Prepare baseurl
        baseurl = 'http://127.0.0.1:%s/send' % port

        # Send a MT
        # We should receive a msg id
        agent = Agent(reactor)
        client = HTTPClient(agent)
        response = yield client.post(baseurl, data=self.params)
        text = yield text_content(response)
        msgStatus = text[:7]

        # Wait 2 seconds before stopping SmppClientConnectors
        exitDeferred = defer.Deferred()
        reactor.callLater(2, exitDeferred.callback, None)
        yield exitDeferred

        yield self.stopSmppClientConnectors()

        # Run tests
        self.assertEqual(msgStatus, 'Success')
예제 #9
0
 def validate_ticket(self, ticket, request):
     service_name = self.service_name
     ticket_name = self.ticket_name
     this_url = self.get_url(request)
     p = urlparse.urlparse(this_url)
     qs_map = urlparse.parse_qs(p.query)
     if ticket_name in qs_map:
         del qs_map[ticket_name]
     param_str = urlencode(qs_map, doseq=True)
     p = urlparse.ParseResult(*tuple(p[:4] + (param_str,) + p[5:]))
     service_url = urlparse.urlunparse(p)
     params = {
             service_name: service_url,
             ticket_name: ticket,}
     param_str = urlencode(params, doseq=True)
     p = urlparse.urlparse(self.cas_info['service_validate_url'])
     p = urlparse.ParseResult(*tuple(p[:4] + (param_str,) + p[5:]))
     service_validate_url = urlparse.urlunparse(p)
     self.log(
         "Requesting service-validate URL => '{0}' ...".format(
             service_validate_url))
     http_client = HTTPClient(self.cas_agent) 
     d = http_client.get(service_validate_url)
     d.addCallback(treq.content)
     d.addCallback(self.parse_sv_results, service_url, ticket, request)
     return d
예제 #10
0
    def test_rate_interceptorpb_not_connected(self):
        _ic = self.stats_http.get('interceptor_count')
        _iec = self.stats_http.get('interceptor_error_count')

        # Send a SMS MT through http interface
        url = 'http://127.0.0.1:1401/rate'
        params = {
            'to': '06155423',
            'username': self.u1.username,
            'password': self.u1_password
        }

        agent = Agent(reactor)
        client = HTTPClient(agent)
        response = yield client.get(url, params=params)

        lastErrorStatus = response.code
        lastResponse = yield text_content(response)

        # Asserts
        self.assertEqual(lastErrorStatus, 503)
        self.assertEqual(lastResponse, '"InterceptorPB not connected !"')
        self.assertEqual(_ic, self.stats_http.get('interceptor_count'))
        self.assertEqual(_iec + 1,
                         self.stats_http.get('interceptor_error_count'))
예제 #11
0
    def test_rate_syntax_error(self):
        _ic = self.stats_http.get('interceptor_count')
        _iec = self.stats_http.get('interceptor_error_count')

        # Connect to InterceptorPB
        yield self.ipb_connect()

        # Send a SMS MT through http interface
        url = 'http://127.0.0.1:1401/rate'
        params = {
            'to': '06155423',
            'username': self.u1.username,
            'password': self.u1_password
        }

        agent = Agent(reactor)
        client = HTTPClient(agent)
        response = yield client.get(url, params=params)

        lastErrorStatus = response.code
        lastResponse = yield text_content(response)

        # Asserts
        self.assertEqual(lastErrorStatus, 400)
        self.assertEqual(
            lastResponse,
            '"Failed running interception script, check log for details"')
        self.assertEqual(_ic, self.stats_http.get('interceptor_count'))
        self.assertEqual(_iec + 1,
                         self.stats_http.get('interceptor_error_count'))
예제 #12
0
    def test_rate_success(self):
        _ic = self.stats_http.get('interceptor_count')
        _iec = self.stats_http.get('interceptor_error_count')

        # Re-provision interceptor with correct script
        mt_interceptor = MTInterceptorScript(self.update_message_sript)
        yield self.mtinterceptor_add(DefaultInterceptor(mt_interceptor), 0)

        # Connect to InterceptorPB
        yield self.ipb_connect()

        # Send a SMS MT through http interface
        url = 'http://127.0.0.1:1401/rate'
        params = {
            'to': '06155423',
            'username': self.u1.username,
            'password': self.u1_password
        }

        # We should receive an error since interceptorpb is not connected

        agent = Agent(reactor)
        client = HTTPClient(agent)
        response = yield client.get(url, params=params)

        lastErrorStatus = response.code
        lastResponse = yield text_content(response)

        # Asserts
        self.assertEqual(lastErrorStatus, 200)
        self.assertEqual(_ic + 1, self.stats_http.get('interceptor_count'))
        self.assertEqual(_iec, self.stats_http.get('interceptor_error_count'))
예제 #13
0
    def setUp(self):
        """
        Construct a fake "Docker daemon" (one which does much less than the
        actual Docker daemon) and a Proxy instance.

        Pre- and post-hook API servers are provided by the individual tests.
        """
        self.agent = Agent(reactor) # no connectionpool
        self.client = HTTPClient(self.agent)
예제 #14
0
    def setUp(self):
        self.agent = mock.Mock(Agent)
        self.client = HTTPClient(self.agent)

        self.fbp_patcher = mock.patch('treq.client.FileBodyProducer')
        self.FileBodyProducer = self.fbp_patcher.start()
        self.addCleanup(self.fbp_patcher.stop)

        self.mbp_patcher = mock.patch('treq.multipart.MultiPartProducer')
        self.MultiPartProducer = self.mbp_patcher.start()
        self.addCleanup(self.mbp_patcher.stop)
예제 #15
0
    def run_send_test(self,
                      user=None,
                      content='anycontent',
                      hex_content=None,
                      dlr_level=None,
                      dlr_method=None,
                      source_address=None,
                      priority=None,
                      schedule_delivery_time=None,
                      validity_period=None,
                      destination_address=None,
                      default_route=None,
                      side_effect=None):
        yield self.connect('127.0.0.1', self.pbPort)
        yield self.prepareRoutingsAndStartConnector(user, default_route,
                                                    side_effect)

        # Set params
        if content is None:
            del (self.params['content'])
        else:
            self.params['content'] = content
        if hex_content is not None:
            self.params['hex-content'] = hex_content
        if dlr_level is not None:
            self.params['dlr-level'] = dlr_level
        if dlr_method is not None:
            self.params['dlr-method'] = dlr_method
        if source_address is not None:
            self.params['from'] = source_address
        if priority is not None:
            self.params['priority'] = priority
        if schedule_delivery_time is not None:
            self.params['sdt'] = schedule_delivery_time
        if validity_period is not None:
            self.params['validity-period'] = validity_period
        if destination_address is not None:
            self.params['to'] = destination_address
        baseurl = 'http://127.0.0.1:1401/send'

        # Send a MT
        # We should receive a msg id
        agent = Agent(reactor)
        client = HTTPClient(agent)
        response = yield client.post(baseurl, data=self.params)
        response_text = yield text_content(response)
        response_code = response.code

        # Wait 5 seconds before stopping SmppClientConnectors
        yield waitFor(5)
        yield self.stopSmppClientConnectors()

        defer.returnValue((response_text, response_code))
예제 #16
0
 def perform_task(reactor):
     agent = Agent(reactor) 
     http = HTTPClient(agent)
     d = http.put(url, auth=(admin, passwd))
     d.addCallback(check_created) 
     d.addCallback(json_content)
     d.addCallback(create_design_doc, http, scheme, host, port, db, admin, passwd)
     d.addCallback(report_status) 
     d.addCallback(json_content)
     #d.addCallback(print_result)
     d.addErrback(log_error)
     d.addBoth(stop, reactor)
     return d
예제 #17
0
파일: client.py 프로젝트: ra2003/txacme
    def __init__(self,
                 agent,
                 key,
                 alg,
                 user_agent=u'txacme/{}'.format(__version__).encode('ascii')):
        self._treq = HTTPClient(agent=agent)
        self._agent = agent
        self._current_request = None
        self._key = key
        self._alg = alg
        self._user_agent = user_agent

        self._nonces = set()
예제 #18
0
 def perform_task(reactor):
     agent = Agent(reactor)
     http = HTTPClient(agent)
     d = http.put(url, auth=(admin, passwd))
     d.addCallback(check_created)
     d.addCallback(json_content)
     d.addCallback(create_design_doc, http, scheme, host, port, db, admin,
                   passwd)
     d.addCallback(report_status)
     d.addCallback(json_content)
     #d.addCallback(print_result)
     d.addErrback(log_error)
     d.addBoth(stop, reactor)
     return d
예제 #19
0
class PathResource(resource.Resource):
    """
    Docker has asked us for the concrete on-disk location of an extant volume.
    If it hasn't already asked for it to be mounted, or is currently on another
    machine, this is an error.
    """
    def __init__(self, *args, **kw):
        self._agent = Agent(reactor) # no connectionpool
        self.client = HTTPClient(self._agent)
        return resource.Resource.__init__(self, *args, **kw)

    def render_POST(self, request):
        # TODO make a FlockerResource base class
        self.base_url = os.environ.get("FLOCKER_CONTROL_SERVICE_BASE_URL")
        # expect Name
        data = json.loads(request.content.read())
        print "path:", data
        d = self.client.get(self.base_url + "/configuration/datasets")
        d.addCallback(treq.json_content)
        def get_dataset(datasets):
            dataset_id = None
            # 1. find the flocker dataset_id of the named volume
            # 2. look up the path of that volume in the datasets current state
            for dataset in datasets:
                if dataset["metadata"]["name"] == data["Name"]:
                    dataset_id = dataset["dataset_id"]
            d = self.client.get(self.base_url + "/state/datasets")
            d.addCallback(treq.json_content)
            def get_path(datasets, dataset_id):
                if dataset_id is None:
                    path = None
                else:
                    for dataset in datasets:
                        if dataset["dataset_id"] == dataset_id:
                            path = dataset["path"]
                if path is not None:
                    request.write(json.dumps(dict(
                         Mountpoint=path,
                         Err=None,
                    )))
                else:
                    request.write(json.dumps(dict(
                         Mountpoint="",
                         Err="unable to find %s" % (data["Name"],),
                    )))
                request.finish()
            d.addCallback(get_path, dataset_id=dataset_id)
            return d
        d.addCallback(get_dataset)
        return server.NOT_DONE_YET
예제 #20
0
 def request(self, url: str, expected_certificate: x509.Certificate):
     """
     Send a HTTPS request to the given URL, ensuring that the given
     certificate is the one used via SPKI-hash-based pinning comparison.
     """
     # No persistent connections, so we don't have dirty reactor at the end
     # of the test.
     treq_client = HTTPClient(
         Agent(
             reactor,
             _StorageClientHTTPSPolicy(
                 expected_spki_hash=get_spki_hash(expected_certificate)),
             pool=HTTPConnectionPool(reactor, persistent=False),
         ))
     return treq_client.get(url)
예제 #21
0
파일: scpd.py 프로젝트: jackrobison/txupnp
 def __init__(self, gateway_address, service_port, control_url, service_id, method, param_names, returns,
              reactor=None):
     if not reactor:
         from twisted.internet import reactor
     self._reactor = reactor
     self._pool = HTTPConnectionPool(reactor)
     self.agent = Agent(reactor, connectTimeout=1)
     self._http_client = HTTPClient(self.agent, data_to_body_producer=StringProducer)
     self.gateway_address = gateway_address
     self.service_port = service_port
     self.control_url = control_url
     self.service_id = service_id
     self.method = method
     self.param_names = param_names
     self.returns = returns
예제 #22
0
def status(options):
    """
    ``magic-folder status`` entry-point.

    :param StatusOptions options: Values for configurable status parameters.

    :return Deferred: A ``Deferred`` which fires with an exit status for the
        process when the status operation has completed.
    """
    nodedir = options.parent.node_directory
    stdout, stderr = options.stdout, options.stderr

    # Create a client without persistent connections to simplify testing.
    # Connections will typically be to localhost anyway so there isn't
    # much performance difference.
    from twisted.internet import reactor
    treq = HTTPClient(Agent(reactor))

    name = options["name"].decode("utf-8")
    try:
        status_obj = yield _status(
            name,
            FilePath(nodedir),
            treq,
        )
    except Exception as e:
        print(e, file=stderr)
        returnValue(1)
    else:
        print(_format_status(datetime.now(), status_obj), file=stdout)
        returnValue(0)
예제 #23
0
    def test_failed_node_connection(self, folder_name, collective_dircap,
                                    upload_dircap):
        """
        If an HTTP request to the Tahoe-LAFS node fails, ``status`` returns a
        ``Deferred`` that fails with that failure.
        """
        assume(collective_dircap != upload_dircap)

        tempdir = FilePath(self.mktemp())
        node_directory = tempdir.child(u"node")
        node = self.useFixture(NodeDirectory(node_directory))

        node.create_magic_folder(
            folder_name,
            collective_dircap,
            upload_dircap,
            tempdir.child(u"folder"),
            60,
        )

        exception = Exception("Made up failure")
        treq = HTTPClient(FailingAgent(Failure(exception)))
        self.assertThat(
            status(folder_name, node_directory, treq),
            failed(AfterPreprocessing(
                lambda f: f.value,
                Equals(exception),
            ), ),
        )
예제 #24
0
    def __init__(self, base_url='http://localhost:8888', quiet_requests=True, **kwargs):
        self.client_id = str(uuid4())
        self.base_url = base_url
        pool = HTTPConnectionPool(reactor, persistent=True)
        agent = ContentDecoderAgent(RedirectAgent(Agent(reactor, pool=pool)), [('gzip', GzipDecoder)])

        if quiet_requests:
            pool._factory = QuieterHTTP11ClientFactory

        auth_url = kwargs.get('auth_url')
        if auth_url:
            username = kwargs.get('username')
            password = kwargs.get('password')
            api_key = kwargs.get('api_key')

            if not username:
                raise RuntimeError('Marconi "auth_url" specified with no username')

            if api_key:
                cred = api_key
                auth_type = 'api_key'
            elif password:
                cred = password
                auth_type = 'password'
            else:
                raise RuntimeError('Marconi "auth_url" specified with no "password" or "api_key"')

            agent = KeystoneAgent(agent, auth_url, (username, cred), auth_type=auth_type)

        self.http_client = HTTPClient(agent)
예제 #25
0
    def __attrs_post_init__(self):
        MultiService.__init__(self)
        if self.tahoe_client is None:
            self.tahoe_client = create_tahoe_client(
                self.config.tahoe_client_url,
                HTTPClient(Agent(self.reactor)),
            )
        self._listen_endpoint = serverFromString(
            self.reactor,
            self.config.api_endpoint,
        )
        web_service = magic_folder_web_service(
            self._listen_endpoint,
            self.config,
            self,
            self._get_auth_token,
            self.tahoe_client,
            self.status_service,
        )
        web_service.setServiceParent(self)

        # We can create the services for all configured folders right now.
        # They won't do anything until they are started which won't happen
        # until this service is started.
        self._create_magic_folder_services()
예제 #26
0
    def __init__(self,
                 host='127.0.0.1',
                 port=8500,
                 scheme='http',
                 verify=True,
                 cert=None,
                 contextFactory=None,
                 **kwargs):
        self.host = host
        self.port = port
        self.scheme = scheme
        self.base_uri = '%s://%s:%s' % (self.scheme, self.host, self.port)

        agent_kwargs = dict(reactor=reactor,
                            pool=HTTPConnectionPool(reactor),
                            **kwargs)
        if contextFactory is not None:
            # use the provided context factory
            agent_kwargs['contextFactory'] = contextFactory
        elif not verify:
            # if no context is provided and verify is set to false, use the
            # insecure context factory implementation
            agent_kwargs['contextFactory'] = InsecureContextFactory()

        self.client = TreqHTTPClient(Agent(**agent_kwargs))
예제 #27
0
    def from_nurl(cls,
                  nurl: DecodedURL,
                  reactor,
                  persistent: bool = True) -> StorageClient:
        """
        Create a ``StorageClient`` for the given NURL.

        ``persistent`` indicates whether to use persistent HTTP connections.
        """
        assert nurl.fragment == "v=1"
        assert nurl.scheme == "pb"
        swissnum = nurl.path[0].encode("ascii")
        certificate_hash = nurl.user.encode("ascii")

        treq_client = HTTPClient(
            Agent(
                reactor,
                _StorageClientHTTPSPolicy(expected_spki_hash=certificate_hash),
                pool=HTTPConnectionPool(reactor, persistent=persistent),
            ))

        https_url = DecodedURL().replace(scheme="https",
                                         host=nurl.host,
                                         port=nurl.port)
        return cls(https_url, swissnum, treq_client)
예제 #28
0
    def setUp(self):
        self.agent = mock.Mock(Agent)
        self.client = HTTPClient(self.agent)

        self.fbp_patcher = mock.patch('treq.client.FileBodyProducer')
        self.FileBodyProducer = self.fbp_patcher.start()
        self.addCleanup(self.fbp_patcher.stop)
예제 #29
0
파일: api.py 프로젝트: lapki/treq
def _client(*args, **kwargs):
    agent = kwargs.get('agent')
    if agent is None:
        reactor = default_reactor(kwargs.get('reactor'))
        pool = default_pool(reactor, kwargs.get('pool'),
                            kwargs.get('persistent'))
        agent = Agent(reactor, pool=pool)
    return HTTPClient(agent)
예제 #30
0
파일: http.py 프로젝트: patolin/txcas
def createVerifyingHTTPClient(
        reactor, 
        agent_kwds=None, 
        policy_factory=BrowserLikePolicyForHTTPS, 
        **kwds):
    agent_kwds = normalizeDict_(agent_kwds)
    agent_kwds['contextFactory'] = policy_factory()
    return HTTPClient(Agent(reactor, **agent_kwds), **kwds)
예제 #31
0
 def http_put(self, url, data, callback=None):
     connection_pool = self.config["pool"] if "pool" in self.config else None
     HTTPClient(Agent(self.reactor, contextFactory=DoNotVerifySSLContextFactory())) \
         .put(url, data, timeout=HTTP_TIMEOUT, pool=connection_pool).addCallbacks(
         lambda response, url=url, callback=callback: self.http_response(response, url, callback),
         errback=lambda error, url=url: anode.Log(logging.ERROR).log("Plugin", "error",
                                                                     lambda: "[{}] error processing HTTP GET [{}] with [{}]".format(
                                                                         self.name, url, error.getErrorMessage())))
예제 #32
0
def _default_client(jws_client, reactor, key, alg):
    """
    Make a client if we didn't get one.
    """
    if jws_client is None:
        pool = HTTPConnectionPool(reactor)
        agent = Agent(reactor, pool=pool)
        jws_client = JWSClient(HTTPClient(agent=agent), key, alg)
    return jws_client
예제 #33
0
def main():

    url = "http://google.com"

    factory = ssl.ClientContextFactory()
    factory.protocol = LineReceiver

    tor_endpoint = TCP4ClientEndpoint(reactor, '127.0.0.1', 9050)
    #tls_endpoint = TLSWrapClientEndpoint(tor_endpoint, factory)

    socks_agent = SOCKS5Agent(reactor, proxyEndpoint=tor_endpoint)
    socks_client = HTTPClient(socks_agent)

    d = socks_client.get("https://wtfismyip.com/text")
    d.addCallback(readBody)
    d.addCallback(foo)

    reactor.run()
예제 #34
0
    def setUp(self):
        """
        Construct a fake "Docker daemon" (one which does much less than the
        actual Docker daemon) and a Proxy instance.

        Pre- and post-hook API servers are provided by the individual tests.
        """
        self.agent = Agent(reactor) # no connectionpool
        self.client = HTTPClient(self.agent)
예제 #35
0
    def __init__(self,  base_url, username, password):
        self.base_url = base_url
        self.username = username
        self.password = password

        self.http_client = HTTPClient(self.agent_factory(
            self.clock, pool=self.pool_factory(self.clock)))

        self.semaphore = defer.DeferredSemaphore(TPS_LIMIT)
예제 #36
0
    def test_throughput_limit_rejection(self):
        user = copy.copy(self.user1)
        user.mt_credential.setQuota('http_throughput', 2)
        route = DefaultRoute(self.c1, rate=0.0)

        yield self.connect('127.0.0.1', self.pbPort)
        yield self.prepareRoutingsAndStartConnector(user, route)

        # Set content
        self.params['content'] = 'Any Content'
        baseurl = 'http://127.0.0.1:1401/send'

        # Send a bunch of MT messages
        # We should receive a msg id for success and error when throughput is exceeded
        start_time = datetime.now()
        throughput_exceeded_errors = 0
        request_counter = 0
        for x in range(5000):
            agent = Agent(reactor)
            client = HTTPClient(agent)
            response = yield client.post(baseurl, data=self.params)
            response_text = yield text_content(response)
            response_code = response.code

            request_counter += 1
            if response_code == 403 and response_text == 'Error "User throughput exceeded"':
                throughput_exceeded_errors += 1
        end_time = datetime.now()

        # Wait 2 seconds before stopping SmppClientConnectors
        yield waitFor(2)
        yield self.stopSmppClientConnectors()

        # Asserts (tolerance of -/+ 3 messages)
        throughput = 1 / float(user.mt_credential.getQuota('http_throughput'))
        dt = end_time - start_time
        max_unsuccessfull_requests = request_counter - (dt.seconds /
                                                        throughput)
        unsuccessfull_requests = throughput_exceeded_errors

        self.assertGreaterEqual(unsuccessfull_requests,
                                max_unsuccessfull_requests - 3)
        self.assertLessEqual(unsuccessfull_requests,
                             max_unsuccessfull_requests + 3)
예제 #37
0
    def handle_outbound_message(self, message):
        headers = {
            'Content-Type': 'application/json; charset=utf-8',
        }
        headers.update(self.get_auth_headers())

        params = {
            'to_addr': message['to_addr'],
            'content': message['content'],
            'message_id': message['message_id'],
            'in_reply_to': message['in_reply_to'],
            'session_event': message['session_event']
        }
        if 'helper_metadata' in message:
            params['helper_metadata'] = message['helper_metadata']

        http_client = HTTPClient(self.agent_factory())
        resp = yield http_client.put(self.get_url('messages.json'),
                                     data=json.dumps(params).encode('utf-8'),
                                     headers=headers)
        resp_body = yield resp.content()

        if resp.code != http.OK:
            log.warning('Unexpected status code: %s, body: %s' %
                        (resp.code, resp_body))
            self.update_status(
                status='down',
                component='submitted-to-vumi-go',
                type='bad_request',
                message='Message submission rejected by Vumi Go')
            yield self.publish_nack(message['message_id'],
                                    reason='Unexpected status code: %s' %
                                    (resp.code, ))
            return

        remote_message = json.loads(resp_body)
        yield self.map_message_id(remote_message['message_id'],
                                  message['message_id'])
        self.update_status(status='ok',
                           component='submitted-to-vumi-go',
                           type='good_request',
                           message='Message accepted by Vumi Go')
        yield self.publish_ack(user_message_id=message['message_id'],
                               sent_message_id=remote_message['message_id'])
예제 #38
0
    def handle_outbound_message(self, message):
        headers = {
            'Content-Type': 'application/json; charset=utf-8',
        }
        headers.update(self.get_auth_headers())

        params = {
            'to_addr': message['to_addr'],
            'content': message['content'],
            'message_id': message['message_id'],
            'in_reply_to': message['in_reply_to'],
            'session_event': message['session_event']
        }
        if 'helper_metadata' in message:
            params['helper_metadata'] = message['helper_metadata']

        http_client = HTTPClient(self.agent_factory())
        resp = yield http_client.put(
            self.get_url('messages.json'),
            data=json.dumps(params).encode('utf-8'),
            headers=headers)
        resp_body = yield resp.content()

        if resp.code != http.OK:
            log.warning('Unexpected status code: %s, body: %s' % (
                resp.code, resp_body))
            self.update_status(
                status='down', component='submitted-to-vumi-go',
                type='bad_request',
                message='Message submission rejected by Vumi Go')
            yield self.publish_nack(message['message_id'],
                                    reason='Unexpected status code: %s' % (
                                        resp.code,))
            return

        remote_message = json.loads(resp_body)
        yield self.map_message_id(
            remote_message['message_id'], message['message_id'])
        self.update_status(
            status='ok', component='submitted-to-vumi-go',
            type='good_request', message='Message accepted by Vumi Go')
        yield self.publish_ack(user_message_id=message['message_id'],
                               sent_message_id=remote_message['message_id'])
예제 #39
0
def _client(*args, **kwargs):
    reactor = default_reactor(kwargs.get('reactor'))
    pool = default_pool(reactor, kwargs.get('pool'), kwargs.get('persistent'))
    if 'proxy' in kwargs.keys():
        address, port = kwargs.get('proxy')
        endpoint = TCP4ClientEndpoint(reactor, address, port)
        agent = ProxyAgent(endpoint)
    else:
        agent = Agent(reactor, pool=pool)
    return HTTPClient(agent)
예제 #40
0
def http_request_full(url,
                      data=None,
                      headers={},
                      method='POST',
                      timeout=None,
                      data_limit=None,
                      context_factory=None,
                      agent_class=None,
                      reactor=None):
    """
    This is a drop in replacement for the original `http_request_full` method
    but it has its internals completely replaced by treq. Treq supports SNI
    and our implementation does not for some reason. Also, we do not want
    to continue maintaining this because we're favouring treq everywhere
    anyway.

    """
    agent_class = agent_class or Agent
    if reactor is None:
        # The import replaces the local variable.
        from twisted.internet import reactor
    kwargs = {'pool': HTTPConnectionPool(reactor, persistent=False)}
    if context_factory is not None:
        kwargs['contextFactory'] = context_factory
    agent = agent_class(reactor, **kwargs)
    client = HTTPClient(agent)

    def handle_response(response):
        return SimplishReceiver(response, data_limit).deferred

    d = client.request(method, url, headers=headers, data=data)
    d.addCallback(handle_response)

    if timeout is not None:
        cancelling_on_timeout = [False]

        def raise_timeout(reason):
            if not cancelling_on_timeout[0] or reason.check(HttpTimeoutError):
                return reason
            return Failure(HttpTimeoutError("Timeout while connecting"))

        def cancel_on_timeout():
            cancelling_on_timeout[0] = True
            d.cancel()

        def cancel_timeout(r, delayed_call):
            if delayed_call.active():
                delayed_call.cancel()
            return r

        d.addErrback(raise_timeout)
        delayed_call = reactor.callLater(timeout, cancel_on_timeout)
        d.addCallback(cancel_timeout, delayed_call)

    return d
예제 #41
0
 def setUp(self):
     self.pool = HTTPConnectionPool(reactor, persistent=False)
     self.client = HTTPClient.with_config(
         pool=self.pool, allow_redirects=False, reactor=reactor)
     self.resource = TxSyncMLResource(reactor)
     self.site = server.Site(self.resource)
     self.listener = reactor.listenTCP(0, self.site, interface='localhost')
     self.listener_port = self.listener.getHost().port
     self.fixtures = FixtureHelper()
     self.addCleanup(self.listener.loseConnection)
     self.addCleanup(self.pool.closeCachedConnections)
예제 #42
0
파일: test_client.py 프로젝트: qq40660/treq
    def setUp(self):
        self.agent = mock.Mock(Agent)
        self.client = HTTPClient(self.agent)

        self.fbp_patcher = mock.patch("treq.client.FileBodyProducer")
        self.FileBodyProducer = self.fbp_patcher.start()
        self.addCleanup(self.fbp_patcher.stop)

        self.mbp_patcher = mock.patch("treq.multipart.MultiPartProducer")
        self.MultiPartProducer = self.mbp_patcher.start()
        self.addCleanup(self.mbp_patcher.stop)
예제 #43
0
 def __init__(self, host='127.0.0.1', port=8500, scheme='http',
              verify=True):
     self.host = host
     self.port = port
     self.scheme = scheme
     self.base_uri = '%s://%s:%s' % (self.scheme, self.host, self.port)
     self.verify = SSLSpec.CERT_NONE \
         if not verify else SSLSpec.CERT_REQUIRED
     agent = Agent(reactor=reactor, pool=HTTPConnectionPool(reactor),
                   contextFactory=AsyncClientSSLContextFactory(
                       verify=self.verify))
     self.client = TreqHTTPClient(agent)
예제 #44
0
class StreamingEliotLogsTests(SyncTestCase):
    """
    Tests for the log streaming resources created by ``create_log_resources``.
    """
    def setUp(self):
        self.resource = create_log_resources()
        self.agent = RequestTraversalAgent(self.resource)
        self.client =  HTTPClient(self.agent)
        return super(StreamingEliotLogsTests, self).setUp()

    def test_v1(self):
        """
        There is a resource at *v1*.
        """
        self.assertThat(
            self.client.get(b"http:///v1"),
            succeeded(has_response_code(Equals(OK))),
        )
예제 #45
0
class TestFakeDockerServer(TestCase):
    def setUp(self):
        self.dockerAPI = FakeDockerServer()
        self.dockerServer = reactor.listenTCP(0, self.dockerAPI)
        self.dockerPort = self.dockerServer.getHost().port
        self.agent = Agent(reactor) # no connectionpool
        self.client = HTTPClient(self.agent)

    def tearDown(self):
        return self.dockerServer.stopListening()

    def test_douglas_adams_would_be_proud(self):
        d = self.client.post('http://127.0.0.1:%d/towel' % (self.dockerPort,),
                      json.dumps({"hiding": "things"}),
                      headers={'Content-Type': ['application/json']})
        d.addCallback(treq.json_content)
        def verify(response):
            self.assertEqual(response,
                    {"hiding": "things", "SeenByFakeDocker": 42})
        d.addCallback(verify)
        return d
예제 #46
0
    def __init__(self,
                 host='127.0.0.1',
                 port=8500,
                 scheme='http',
                 verify=True,
                 contextFactory=None,
                 **kwargs):
        self.host = host
        self.port = port
        self.scheme = scheme
        self.base_uri = '%s://%s:%s' % (self.scheme, self.host, self.port)

        agent_kwargs = dict(
            reactor=reactor, pool=HTTPConnectionPool(reactor), **kwargs)
        if contextFactory is not None:
            # use the provided context factory
            agent_kwargs['contextFactory'] = contextFactory
        elif not verify:
            # if no context is provided and verify is set to false, use the
            # insecure context factory implementation
            agent_kwargs['contextFactory'] = InsecureContextFactory()

        self.client = TreqHTTPClient(Agent(**agent_kwargs))
예제 #47
0
class RabbitmqManagementClient(object):

    clock = reactor

    @classmethod
    def pool_factory(self, reactor):
        pool = HTTPConnectionPool(reactor, persistent=True)
        pool.maxPersistentPerHost = TPS_LIMIT

    @classmethod
    def agent_factory(self, reactor, pool=None):
        return Agent(reactor, pool=pool)

    def __init__(self,  base_url, username, password):
        self.base_url = base_url
        self.username = username
        self.password = password

        self.http_client = HTTPClient(self.agent_factory(
            self.clock, pool=self.pool_factory(self.clock)))

        self.semaphore = defer.DeferredSemaphore(TPS_LIMIT)

    def get_queue(self, vhost, queue_name):

        url = 'http://%s/api/queues/%s/%s' % (
            self.base_url,
            urllib.quote(vhost, safe=''),
            queue_name
        )

        def _get_queue():
            d = self.http_client.get(url, auth=(self.username, self.password))
            d.addCallback(treq.json_content)
            return d

        return self.semaphore.run(_get_queue)
예제 #48
0
 def __init__(self, dockerAddr=None, dockerPort=None, dockerSocket=None,
         path='', reactor=reactor, config=None):
     """
     A docker proxy resource which knows how to connect to real Docker
     daemon either via socket (dockerSocket specified) or address + port for
     TCP connection (dockerAddr + dockerPort specified).
     """
     if config is None:
         # Try to get the configuration from the default place on the
         # filesystem.
         self.config = PluginConfiguration()
     else:
         self.config = config
     self.config.read_and_parse()
     self.parser = EndpointParser(self.config)
     Resource.__init__(self)
     self.host = dockerAddr
     self.port = dockerPort
     self.socket = dockerSocket
     self.path = path
     self.reactor = reactor
     proxy.ReverseProxyResource.__init__(self, dockerAddr, dockerPort, path, reactor) # NB dockerAddr is not actually used
     self.agent = Agent(reactor) # no connectionpool
     self.client = HTTPClient(self.agent)
    def setUp(self):
        """
        Ready the environment for tests which actually run docker
        with flocker-plugin enabled.

        * Log into each node in turn:
          * Load flocker-plugin into docker
        """
        self.agent = Agent(reactor) # no connectionpool
        self.client = HTTPClient(self.agent)
        d = get_test_cluster(self, 2)
        def got_cluster(cluster):
            self.cluster = cluster
            self.plugins = {}
            daemonReadyDeferreds = []
            self.ips = [node.address for node in cluster.nodes]
            # Build docker if necessary (if there's a docker submodule)
            self._buildDockerOnce()
            for ip in self.ips:
                # cleanup after previous test runs
                #run(ip, ["pkill", "-f", "flocker"])
                shell(ip, "sleep 5 && initctl stop docker || true")
                # Copy docker into the respective node
                self._injectDockerOnce(ip)
                # workaround https://github.com/calavera/docker/pull/4#issuecomment-100046383
                shell(ip, "mkdir -p %s" % (PLUGIN_DIR,))
                # cleanup stale sockets
                shell(ip, "rm -f %s/*" % (PLUGIN_DIR,))
                #shell(ip, "supervisorctl stop flocker-agent")
                #shell(ip, "supervisorctl start flocker-agent")
                """
                for container in ("flocker",):
                    try:
                        run(ip, ["docker", "rm", "-f", container])
                    except Exception:
                        print container, "was not running, not killed, OK."
                # start flocker-plugin
                FLOCKER_PLUGIN = "%s/flocker-plugin:%s" % (DOCKER_PULL_REPO, PF_VERSION)
                run(ip, ["docker", "pull", FLOCKER_PLUGIN])
                """
                # TODO - come up with cleaner/nicer way of flocker-plugin
                # being able to establish its own host uuid (or volume
                # mountpoints), such as API calls.
                # See https://github.com/ClusterHQ/flocker-plugin/issues/2
                # for how to do this now.
                """
                self.plugins[ip] = remote_service_for_test(self, ip,
                    ["docker", "run", "--name=flocker",
                        "-v", "%s:%s" % (PLUGIN_DIR, PLUGIN_DIR),
                        "-e", "FLOCKER_CONTROL_SERVICE_BASE_URL=%s" % (self.cluster.base_url,),
                        "-e", "MY_NETWORK_IDENTITY=%s" % (ip,),
                        "-e", "MY_HOST_UUID=%s" % (host_uuid,),
                       FLOCKER_PLUGIN])
                """
                host_uuid = run(ip, ["python", "-c", "import json; "
                    "print json.load(open('/etc/flocker/volume.json'))['uuid']"]).strip()
                cmd = ("cd /root && if [ ! -e powerstrip-flocker ]; then "
                           "git clone https://github.com/clusterhq/powerstrip-flocker && "
                           "cd powerstrip-flocker && "
                           "git checkout %s && cd /root;" % (PF_VERSION,)
                       + "fi && cd /root/powerstrip-flocker && "
                       + "FLOCKER_CONTROL_SERVICE_BASE_URL=%s" % (self.cluster.base_url,)
                       + " MY_NETWORK_IDENTITY=%s" % (ip,)
                       + " MY_HOST_UUID=%s" % (host_uuid,)
                       + " twistd -noy flockerdockerplugin.tac")
                print "CMD >>", cmd
                self.plugins[ip] = remote_service_for_test(self, ip,
                    ["bash", "-c", cmd])
                # XXX Better not to have sleep 5 in here but hey
                shell(ip, "sleep 5 && initctl start docker")
                print "Waiting for flocker-plugin to show up on", ip, "..."
                # XXX This will only work for the first test, need to restart
                # docker in tearDown.
                daemonReadyDeferreds.append(wait_for_plugin(ip))

            d = defer.gatherResults(daemonReadyDeferreds)
            # def debug():
            #     services
            #     import pdb; pdb.set_trace()
            # d.addCallback(lambda ignored: deferLater(reactor, 1, debug))
            return d
        d.addCallback(got_cluster)
        return d
예제 #50
0
 def setUp(self):
     self.agent = Agent(reactor) # no connectionpool
     self.client = HTTPClient(self.agent)
예제 #51
0
class TestAdderPlugin(TestCase):
    def _getAdder(self, *args, **kw):
        self.adderAPI = AdderPlugin(*args, **kw)
        self.adderServer = reactor.listenTCP(0, self.adderAPI)
        self.adderPort = self.adderServer.getHost().port

    def setUp(self):
        self.agent = Agent(reactor) # no connectionpool
        self.client = HTTPClient(self.agent)

    def tearDown(self):
        return self.adderServer.stopListening()

    def test_adder_explode(self):
        """
        The adder adapter blows up (sends an HTTP 500) when asked to.
        """
        self._getAdder(explode=True)
        d = self.client.post('http://127.0.0.1:%d/adapter' % (self.adderPort,),
                      json.dumps({}),
                      headers={'Content-Type': ['application/json']})
        def verifyResponseCode(response):
            self.assertEqual(response.code, 500)
            return response
        d.addCallback(verifyResponseCode)
        d.addCallback(treq.content)
        def verify(body):
            self.assertEqual(body, "sadness for you, today.")
        d.addCallback(verify)
        return d

    def test_adder_pre(self):
        """
        The adder pre-hook increments an integer according to the protocol
        defined in the README.
        """
        self._getAdder(pre=True)
        d = self.client.post('http://127.0.0.1:%d/adapter' % (self.adderPort,),
                      json.dumps({
                          "PowerstripProtocolVersion": 1,
                          "Type": "pre-hook",
                          "ClientRequest": {
                              "Method": "POST",
                              "Request": "/fictional",
                              "Body": json.dumps({"Number": 7})}}),
                      headers={'Content-Type': ['application/json']})
        def verifyResponseCode(response):
            self.assertEqual(response.code, 200)
            return response
        d.addCallback(verifyResponseCode)
        d.addCallback(treq.json_content)
        def verify(body):
            self.assertEqual(json.loads(body["ModifiedClientRequest"]["Body"])["Number"], 8)
        d.addCallback(verify)
        return d

    def test_adder_post(self):
        """
        The adder post-hook increments an integer according to the protocol
        defined in the README.
        """
        self._getAdder(post=True)
        d = self.client.post('http://127.0.0.1:%d/adapter' % (self.adderPort,),
                      json.dumps({
                          "Type": "post-hook",
                          "ClientRequest": {
                              "Method": "POST",
                              "Request": "/fictional",
                              "Body": json.dumps({}),},
                          "ServerResponse": {
                              "ContentType": "application/json",
                              "Body": json.dumps({"Number": 7}),
                              "Code": 200,},
                          }),
                      headers={'Content-Type': ['application/json']})
        def verifyResponseCode(response):
            self.assertEqual(response.code, 200)
            return response
        d.addCallback(verifyResponseCode)
        d.addCallback(treq.json_content)
        def verify(body):
            self.assertEqual(json.loads(body["ModifiedServerResponse"]["Body"])["Number"], 8)
        d.addCallback(verify)
        return d
예제 #52
0
 def setUp(self):
     self.dockerAPI = FakeDockerServer()
     self.dockerServer = reactor.listenTCP(0, self.dockerAPI)
     self.dockerPort = self.dockerServer.getHost().port
     self.agent = Agent(reactor) # no connectionpool
     self.client = HTTPClient(self.agent)
예제 #53
0
class MarconiClient(object):
    USER_AGENT = 'txmarconi/{version}'.format(version=__version__)
    RETRYABLE_ERRORS = [RequestTransmissionFailed]

    def __init__(self, base_url='http://localhost:8888', quiet_requests=True, **kwargs):
        self.client_id = str(uuid4())
        self.base_url = base_url
        pool = HTTPConnectionPool(reactor, persistent=True)
        agent = ContentDecoderAgent(RedirectAgent(Agent(reactor, pool=pool)), [('gzip', GzipDecoder)])

        if quiet_requests:
            pool._factory = QuieterHTTP11ClientFactory

        auth_url = kwargs.get('auth_url')
        if auth_url:
            username = kwargs.get('username')
            password = kwargs.get('password')
            api_key = kwargs.get('api_key')

            if not username:
                raise RuntimeError('Marconi "auth_url" specified with no username')

            if api_key:
                cred = api_key
                auth_type = 'api_key'
            elif password:
                cred = password
                auth_type = 'password'
            else:
                raise RuntimeError('Marconi "auth_url" specified with no "password" or "api_key"')

            agent = KeystoneAgent(agent, auth_url, (username, cred), auth_type=auth_type)

        self.http_client = HTTPClient(agent)

    def _wrap_error(self, failure):
        if not failure.check(MarconiError):
            log.err(failure)
            raise MarconiError(failure.value)

        log.err(failure.value)
        return failure

    def _handle_error_response(self, response):
        def _raise_error(content_str):
            content_str = content_str.strip()
            if len(content_str) > 0:
                raise MarconiError(json.loads(content_str))
            else:
                msg = 'Received {code} response with empty body'.format(code=response.code)
                raise MarconiError(msg)

        d = content(response)
        d.addCallback(_raise_error)
        return d

    def _request(self, method, path, params=None, data=None):
        url = '{base_url}{path}'.format(
            base_url=self.base_url,
            path=path,
        )

        headers = {
            'Content-Type': 'application/json',
            'Accept': 'application/json',
            'User-Agent': self.USER_AGENT,
            'Client-ID': self.client_id,
        }

        def _possibly_retry(failure):
            # Either I'm doing something wrong (likely) or Marconi is doing
            # something unpleasant to connections after it returns a 201 to us,
            # because the next request always seems to get one of these.
            if failure.check(*self.RETRYABLE_ERRORS):
                return self._request(method, path, params=params, data=data)
            else:
                return failure

        if data:
            body = QuieterFileBodyProducer(StringIO(json.dumps(data)))
        else:
            body = None

        d = self.http_client.request(method, url, headers=headers, data=body, params=params)
        d.addErrback(_possibly_retry)
        return d

    def _expect_204(self, response):
        if response.code == 204:
            return None
        else:
            return self._handle_error_response(response)

    def ensure_queue(self, queue_name):
        path = '/v1/queues/{queue_name}'.format(queue_name=queue_name)

        def _on_response(response):
            if response.code in (201, 204):
                return path
            else:
                return self._handle_error_response(response)

        d = self._request('PUT', path)
        d.addCallback(_on_response)
        d.addErrback(self._wrap_error)
        return d

    def push_message(self, queue_name, body, ttl):
        path = '/v1/queues/{queue_name}/messages'.format(queue_name=queue_name)
        data = [
            {
                'ttl': ttl,
                'body': body,
            }
        ]

        def _construct_message(obj):
            return MarconiMessage(body=body, ttl=ttl, age=0, href=obj['resources'][0])

        def _on_response(response):
            if response.code == 201:
                return json_content(response).addCallback(_construct_message)
            else:
                return self._handle_error_response(response)

        d = self._request('POST', path, data=data)
        d.addCallback(_on_response)
        d.addErrback(self._wrap_error)
        return d

    def claim_message(self, queue_name, ttl, grace, polling_interval=1):
        path = '/v1/queues/{queue_name}/claims'.format(queue_name=queue_name)
        data = {
            'ttl': ttl,
            'grace': grace,
        }
        params = {
            'limit': 1,
        }

        d = defer.Deferred()

        def _construct_message(obj, response):
            claim_href = response.headers.getRawHeaders('location')[0]
            d.callback(ClaimedMarconiMessage(claim_href=claim_href, **obj[0]))

        def _on_response(response):
            if response.code == 201:
                json_content(response).addCallback(_construct_message, response)
            elif response.code == 204:
                reactor.callLater(polling_interval, _perform_call)
            else:
                return self._handle_error_response(response)

        def _perform_call():
            d1 = self._request('POST', path, data=data, params=params)
            d1.addCallback(_on_response)
            d1.addErrback(self._wrap_error)
            d1.addErrback(d.errback)

        _perform_call()
        return d

    def update_claim(self, claimed_message, ttl):
        data = {
            'ttl': ttl,
        }

        d = self._request('PATCH', claimed_message.claim_href, data=data)
        d.addCallback(self._expect_204)
        d.addErrback(self._wrap_error)
        return d

    def release_claim(self, claimed_message):
        d = self._request('DELETE', claimed_message.claim_href)
        d.addCallback(self._expect_204)
        d.addErrback(self._wrap_error)
        return d

    def delete_message(self, message):
        d = self._request('DELETE', message.href)
        d.addCallback(self._expect_204)
        d.addErrback(self._wrap_error)
        return d
class PowerstripFlockerTests(TestCase):
    """
    Real flocker-plugin tests against two nodes using the flocker
    acceptance testing framework.
    """

    # Slow builds because initial runs involve pulling some docker images
    # (flocker-plugin).
    timeout = 1200


    def _buildDockerOnce(self):
        """
        Using blocking APIs, build docker once per test run.
        """
        if len(BUILD_ONCE):
            return
        if path.exists(DOCKER_PATH):
            dockerCmd = ("cd %(dockerDir)s;"
                   "docker build -t custom-docker .;"
                   "docker run --privileged --rm "
                       "-e DOCKER_EXPERIMENTAL=1 "
                       "-e DOCKER_GITCOMMIT=`git log -1 --format=%%h` "
                       "-v %(dockerDir)s:/go/src/github.com/docker/docker "
                       "custom-docker hack/make.sh binary" % dict(
                           dockerDir=DOCKER_PATH))
            print "Running docker command:", dockerCmd
            exit = system(dockerCmd)
            if exit > 0:
                raise Exception("failed to build docker")
        BUILD_ONCE.append(1)


    def _injectDockerOnce(self, ip):
        """
        Using blocking APIs, copy the docker binary from whence it was built in
        _buildDockerOnce to the given ip.
        """
        if ip not in INJECT_ONCE:
            INJECT_ONCE[ip] = []
        if len(INJECT_ONCE[ip]):
            return

        if path.exists(DOCKER_PATH):
            # e.g. 1.5.0-plugins
            dockerVersion = "1.7.0-dev-experimental" # XXX Docker need to update their VERSION file open("%s/VERSION" % (DOCKER_PATH,)).read().strip()
            binaryPath = "%(dockerDir)s/bundles/%(dockerVersion)s/binary/docker-%(dockerVersion)s" % dict(
                    dockerDir=DOCKER_PATH, dockerVersion=dockerVersion)
            hostBinaryPath = "/usr/bin/docker"
            key = "/home/buildslave/.ssh/id_rsa_flocker"
            exit = system("scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "
                          "-i %(key)s %(binaryPath)s root@%(ip)s:%(hostBinaryPath)s" % dict(
                            key=key, hostBinaryPath=hostBinaryPath, binaryPath=binaryPath, ip=ip))
            if exit > 0:
                raise Exception("failed to inject docker into %(ip)s" % dict(ip=ip))

        INJECT_ONCE[ip].append(1)


    def setUp(self):
        """
        Ready the environment for tests which actually run docker
        with flocker-plugin enabled.

        * Log into each node in turn:
          * Load flocker-plugin into docker
        """
        self.agent = Agent(reactor) # no connectionpool
        self.client = HTTPClient(self.agent)
        d = get_test_cluster(self, 2)
        def got_cluster(cluster):
            self.cluster = cluster
            self.plugins = {}
            daemonReadyDeferreds = []
            self.ips = [node.address for node in cluster.nodes]
            # Build docker if necessary (if there's a docker submodule)
            self._buildDockerOnce()
            for ip in self.ips:
                # cleanup after previous test runs
                #run(ip, ["pkill", "-f", "flocker"])
                shell(ip, "sleep 5 && initctl stop docker || true")
                # Copy docker into the respective node
                self._injectDockerOnce(ip)
                # workaround https://github.com/calavera/docker/pull/4#issuecomment-100046383
                shell(ip, "mkdir -p %s" % (PLUGIN_DIR,))
                # cleanup stale sockets
                shell(ip, "rm -f %s/*" % (PLUGIN_DIR,))
                #shell(ip, "supervisorctl stop flocker-agent")
                #shell(ip, "supervisorctl start flocker-agent")
                """
                for container in ("flocker",):
                    try:
                        run(ip, ["docker", "rm", "-f", container])
                    except Exception:
                        print container, "was not running, not killed, OK."
                # start flocker-plugin
                FLOCKER_PLUGIN = "%s/flocker-plugin:%s" % (DOCKER_PULL_REPO, PF_VERSION)
                run(ip, ["docker", "pull", FLOCKER_PLUGIN])
                """
                # TODO - come up with cleaner/nicer way of flocker-plugin
                # being able to establish its own host uuid (or volume
                # mountpoints), such as API calls.
                # See https://github.com/ClusterHQ/flocker-plugin/issues/2
                # for how to do this now.
                """
                self.plugins[ip] = remote_service_for_test(self, ip,
                    ["docker", "run", "--name=flocker",
                        "-v", "%s:%s" % (PLUGIN_DIR, PLUGIN_DIR),
                        "-e", "FLOCKER_CONTROL_SERVICE_BASE_URL=%s" % (self.cluster.base_url,),
                        "-e", "MY_NETWORK_IDENTITY=%s" % (ip,),
                        "-e", "MY_HOST_UUID=%s" % (host_uuid,),
                       FLOCKER_PLUGIN])
                """
                host_uuid = run(ip, ["python", "-c", "import json; "
                    "print json.load(open('/etc/flocker/volume.json'))['uuid']"]).strip()
                cmd = ("cd /root && if [ ! -e powerstrip-flocker ]; then "
                           "git clone https://github.com/clusterhq/powerstrip-flocker && "
                           "cd powerstrip-flocker && "
                           "git checkout %s && cd /root;" % (PF_VERSION,)
                       + "fi && cd /root/powerstrip-flocker && "
                       + "FLOCKER_CONTROL_SERVICE_BASE_URL=%s" % (self.cluster.base_url,)
                       + " MY_NETWORK_IDENTITY=%s" % (ip,)
                       + " MY_HOST_UUID=%s" % (host_uuid,)
                       + " twistd -noy flockerdockerplugin.tac")
                print "CMD >>", cmd
                self.plugins[ip] = remote_service_for_test(self, ip,
                    ["bash", "-c", cmd])
                # XXX Better not to have sleep 5 in here but hey
                shell(ip, "sleep 5 && initctl start docker")
                print "Waiting for flocker-plugin to show up on", ip, "..."
                # XXX This will only work for the first test, need to restart
                # docker in tearDown.
                daemonReadyDeferreds.append(wait_for_plugin(ip))

            d = defer.gatherResults(daemonReadyDeferreds)
            # def debug():
            #     services
            #     import pdb; pdb.set_trace()
            # d.addCallback(lambda ignored: deferLater(reactor, 1, debug))
            return d
        d.addCallback(got_cluster)
        return d

    def test_create_a_dataset(self):
        """
        Running a docker container specifying a dataset name which has never
        been created before creates it in the API.
        """
        node1, node2 = sorted(self.ips)
        fsName = "test001"
        print "About to run docker run..."
        shell(node1, "docker run "
                     "-v %s:/data --volume-driver=flocker busybox "
                     "sh -c 'echo 1 > /data/file'" % (fsName,))
        url = self.cluster.base_url + "/configuration/datasets"
        d = self.client.get(url)
        d.addCallback(treq.json_content)
        def verify(result):
            self.assertTrue(len(result) > 0)
            self.assertEqual(result[0]["metadata"], {"name": fsName})
            #self.assertEqual(result[0]["primary"], node1)
        d.addBoth(verify)
        return d

    def test_create_a_dataset_manifests(self):
        """
        Running a docker container specifying a dataset name which has never
        been created before creates the actual filesystem and mounts it in
        place in time for the container to start.

        We can verify this by asking Docker for the information about which
        volumes are *actually* mounted in the container, then going and
        checking that the real volume path on the host contains the '1' written
        to the 'file' file specified in the docker run command...
        """
        node1, node2 = sorted(self.ips)
        fsName = "test001"
        shell(node1, "docker run -d "
                     "-v %s:/data --volume-driver=flocker busybox "
                     "sh -c 'echo fish > /data/file'" % (fsName,)).strip()
        # The volume that Docker now has mounted exists as a ZFS volume...
        zfs_volumes = shell(node1, "zfs list -t snapshot,filesystem -r flocker "
                                   "|grep flocker/ |wc -l").strip()
        self.assertEqual(int(zfs_volumes), 1)
        # ... and contains a file which contains the characters "fish".
        catFileOutput = shell(node1, "docker run "
                                     "-v %s:/data --volume-driver=flocker busybox "
                                     "cat /data/file" % (fsName,)).strip()
        self.assertEqual(catFileOutput, "fish")

    def test_create_two_datasets_same_name(self):
        """
        The metadata stored about a dataset name is checked to make sure that
        no two volumes with the same name are created.  (In fact, if two
        volumes are created with the same name on the same host, it's a shared
        volume.)
        """
        node1, node2 = sorted(self.ips)
        fsName = "test001"
        # First volume...
        container_id_1 = shell(node1, "docker run -d "
                                      "-v %s:/data --volume-driver=flocker busybox "
                                      "sh -c 'echo fish > /data/file'" % (fsName,)).strip()
        docker_inspect = json.loads(run(node1, ["docker", "inspect", container_id_1]))
        volume_1 = docker_inspect[0]["Volumes"].values()[0]

        # Second volume...
        container_id_2 = shell(node1, "docker run -d "
                                      "-v %s:/data --volume-driver=flocker busybox "
                                      "sh -c 'echo fish > /data/file'" % (fsName,)).strip()
        docker_inspect = json.loads(run(node1, ["docker", "inspect", container_id_2]))
        volume_2 = docker_inspect[0]["Volumes"].values()[0]
        # ... have the same flocker UUID.
        self.assertEqual(volume_1, volume_2)

    def test_move_a_dataset(self):
        """
        Running a docker container specifying a dataset name which has been
        created before but which is no longer running moves the dataset before
        starting the container.
        """
        node1, node2 = sorted(self.ips)
        fsName = "test001"
        # Write some bytes to a volume on one host...
        shell(node1, "docker run "
                     "-v %s:/data --volume-driver=flocker busybox "
                     "sh -c 'echo chicken > /data/file'" % (fsName,))
        # ... and read them from the same named volume on another...
        container_id = shell(node2, "docker run -d "
                                    "-v %s:/data --volume-driver=flocker busybox "
                                    "sh -c 'cat /data/file'" % (fsName,)).strip()
        output = run(node2, ["docker", "logs", container_id])
        self.assertEqual(output.strip(), "chicken")

    def test_move_a_dataset_check_persistence(self):
        """
        The data in the dataset between the initial instantiation of it and the
        second instantiation of it persists.
        """
        pass
    test_move_a_dataset_check_persistence.skip = "not implemented yet"

    def test_dataset_is_not_moved_when_being_used(self):
        """
        If a container (*any* container) is currently running with a dataset
        mounted, an error is reported rather than ripping it out from
        underneath a running container.
        """
        pass
    test_dataset_is_not_moved_when_being_used.skip = "not implemented yet"

    def test_two_datasets_one_move_one_create(self):
        """
        When a docker run command mentions two datasets, one which is currently
        not running on another host, and another which is new, the new one gets
        created and the extant one gets moved. Both operations complete before
        the container is started.
        """
        pass
    test_two_datasets_one_move_one_create.skip = "not implemented yet"
예제 #55
0
class DockerProxy(proxy.ReverseProxyResource):
    proxyClientFactoryClass = DockerProxyClientFactory


    def __init__(self, dockerAddr=None, dockerPort=None, dockerSocket=None,
            path='', reactor=reactor, config=None):
        """
        A docker proxy resource which knows how to connect to real Docker
        daemon either via socket (dockerSocket specified) or address + port for
        TCP connection (dockerAddr + dockerPort specified).
        """
        if config is None:
            # Try to get the configuration from the default place on the
            # filesystem.
            self.config = PluginConfiguration()
        else:
            self.config = config
        self.config.read_and_parse()
        self.parser = EndpointParser(self.config)
        Resource.__init__(self)
        self.host = dockerAddr
        self.port = dockerPort
        self.socket = dockerSocket
        self.path = path
        self.reactor = reactor
        proxy.ReverseProxyResource.__init__(self, dockerAddr, dockerPort, path, reactor) # NB dockerAddr is not actually used
        self.agent = Agent(reactor) # no connectionpool
        self.client = HTTPClient(self.agent)


    def render(self, request, reactor=reactor):
        # We are processing a leaf request.
        # Get the original request body from the client.
        skipPreHooks = False
        if request.requestHeaders.getRawHeaders('content-type') == ["application/json"]:
            originalRequestBody = request.content.read()
            request.content.seek(0) # hee hee
        elif request.requestHeaders.getRawHeaders('content-type') == ["application/tar"]:
            # We can't JSON encode binary data, so don't even try.
            skipPreHooks = True
            originalRequestBody = None
        else:
            originalRequestBody = None
        preHooks = []
        postHooks = []
        d = defer.succeed(None)
        for endpoint in self.parser.match_endpoint(request.method, request.uri.split("?")[0]):
            # It's possible for a request to match multiple endpoint
            # definitions.  Order of matched endpoint is not defined in
            # that case.
            adapters = self.config.endpoint(endpoint)
            preHooks.extend(adapters.pre)
            postHooks.extend(adapters.post)
        def callPreHook(result, hookURL):
            if result is None:
                newRequestBody = originalRequestBody
            else:
                newRequestBody = result["ModifiedClientRequest"]["Body"]
            return self.client.post(hookURL, json.dumps({
                        "PowerstripProtocolVersion": 1,
                        "Type": "pre-hook",
                        "ClientRequest": {
                            "Method": request.method,
                            "Request": request.uri,
                            "Body": newRequestBody,
                        }
                    }), headers={'Content-Type': ['application/json']})
        if not skipPreHooks:
            for preHook in preHooks:
                hookURL = self.config.adapter_uri(preHook)
                d.addCallback(callPreHook, hookURL=hookURL)
                d.addCallback(treq.json_content)
        def doneAllPrehooks(result):
            # Finally pass through the request to actual Docker.  For now we
            # mutate request in-place in such a way that ReverseProxyResource
            # understands it.
            if result is not None:
                requestBody = b""
                bodyFromAdapter = result["ModifiedClientRequest"]["Body"]
                if bodyFromAdapter is not None:
                    requestBody = bodyFromAdapter.encode("utf-8")
                request.content = StringIO.StringIO(requestBody)
                request.requestHeaders.setRawHeaders(b"content-length",
                        [str(len(requestBody))])
            ###########################
            # The following code is copied from t.w.proxy.ReverseProxy so that
            # clientFactory reference can be kept.
            if not self.socket:
                if self.port == 80:
                    host = self.host
                else:
                    host = "%s:%d" % (self.host, self.port)
                request.requestHeaders.setRawHeaders(b"host", [host])
            request.content.seek(0, 0)
            qs = urlparse.urlparse(request.uri)[4]
            if qs:
                rest = self.path + '?' + qs
            else:
                rest = self.path
            allRequestHeaders = request.getAllHeaders()
            if allRequestHeaders.get("transfer-encoding") == "chunked":
                del allRequestHeaders["transfer-encoding"]
            # XXX Streaming the contents of the request body into memory could
            # cause OOM issues for large build contexts POSTed through
            # powerstrip. See https://github.com/ClusterHQ/powerstrip/issues/51
            body = request.content.read()
            allRequestHeaders["content-length"] = str(len(body))
            clientFactory = self.proxyClientFactoryClass(
                request.method, rest, request.clientproto,
                allRequestHeaders, body, request)
            ###########################
            if self.socket:
                self.reactor.connectUNIX(self.socket, clientFactory)
            else:
                self.reactor.connectTCP(self.host, self.port, clientFactory)
            d = defer.Deferred()
            clientFactory.onCreate(d)
            return d
        d.addCallback(doneAllPrehooks)
        def inspect(client):
            # If there are no post-hooks, allow the response to be streamed
            # back to the client, rather than buffered.
            d = defer.Deferred()
            client.registerListener(d)
            if not postHooks:
                client.setStreamingMode(True)
            return d
        d.addCallback(inspect)
        def callPostHook(result, hookURL):
            serverResponse = result["ModifiedServerResponse"]
            return self.client.post(hookURL, json.dumps({
                        # TODO Write tests for the information provided to the adapter.
                        "PowerstripProtocolVersion": 1,
                        "Type": "post-hook",
                        "ClientRequest": {
                            "Method": request.method,
                            "Request": request.uri,
                            "Body": originalRequestBody,
                            },
                        "ServerResponse": {
                            "ContentType": serverResponse["ContentType"],
                            "Body": serverResponse["Body"],
                            "Code": serverResponse["Code"],
                        },
                    }), headers={'Content-Type': ['application/json']})
        # XXX Need to skip post-hooks for tar archives from e.g. docker export.
        # https://github.com/ClusterHQ/powerstrip/issues/52
        for postHook in postHooks:
            hookURL = self.config.adapter_uri(postHook)
            d.addCallback(callPostHook, hookURL=hookURL)
            d.addCallback(treq.json_content)
        def sendFinalResponseToClient(result):            
            resultBody = result["ModifiedServerResponse"]["Body"].encode("utf-8")
            # Update the Content-Length, since we're modifying the request object in-place.
            request.responseHeaders.setRawHeaders(
                b"content-length",
                [str(len(resultBody))]
            )
            # Write the final response to the client.
            request.write(resultBody)
            request.finish()
        d.addCallback(sendFinalResponseToClient)
        def squashNoPostHooks(failure):
            failure.trap(NoPostHooks)
        d.addErrback(squashNoPostHooks)
        d.addErrback(log.err, 'while running chain')
        return NOT_DONE_YET


    def getChild(self, path, request):
        fragments = request.uri.split("/")
        fragments.pop(0)
        proxyArgs = (self.host, self.port, self.socket, self.path + '/' + urlquote(path, safe=""),
                     self.reactor)
        #if not request.postpath:
        resource = DockerProxy(*proxyArgs, config=self.config)
        return resource
예제 #56
0
    def reverse_proxy(self, request, protected=True):
        if protected:
            sess = request.getSession()
            valid_sessions = self.valid_sessions
            sess_uid = sess.uid
            username = valid_sessions[sess_uid]['username']
        # Normal reverse proxying.
        kwds = {}
        cookiejar = {}
        kwds['allow_redirects'] = False
        kwds['cookies'] = cookiejar
        req_headers = self.mod_headers(dict(request.requestHeaders.getAllRawHeaders()))
        kwds['headers'] = req_headers
        if protected:
            kwds['headers'][self.remoteUserHeader] = [username]
        if request.method in ('PUT', 'POST'):
            kwds['data'] = request.content.read()
        url = self.proxied_url + request.uri
        # Determine if a plugin wants to intercept this URL.
        interceptors = self.interceptors
        for interceptor in interceptors:
            if interceptor.should_resource_be_intercepted(url, request.method, req_headers, request):
                return interceptor.handle_resource(url, request.method, req_headers, request)
        # Check if this is a request for a websocket.
        d = self.checkForWebsocketUpgrade(request)
        if d is not None:
            return d
        # Typical reverse proxying.    
        self.log("Proxying URL => {0}".format(url))
        http_client = HTTPClient(self.proxy_agent) 
        d = http_client.request(request.method, url, **kwds)

        def process_response(response, request):
            req_resp_headers = request.responseHeaders
            resp_code = response.code
            resp_headers = response.headers
            resp_header_map = dict(resp_headers.getAllRawHeaders())
            # Rewrite Location headers for redirects as required.
            if resp_code in (301, 302, 303, 307, 308) and "Location" in resp_header_map:
                values = resp_header_map["Location"]
                if len(values) == 1:
                    location = values[0]
                    if request.isSecure():
                        proxy_scheme = 'https'
                    else:
                        proxy_scheme = 'http'
                    new_location = self.proxied_url_to_proxy_url(proxy_scheme, location)
                    if new_location is not None:
                        resp_header_map['Location'] = [new_location]
            request.setResponseCode(response.code, message=response.phrase)
            for k,v in resp_header_map.iteritems():
                if k == 'Set-Cookie':
                    v = self.mod_cookies(v)
                req_resp_headers.setRawHeaders(k, v)
            return response
            
        def mod_content(body, request):
            """
            Modify response content before returning it to the user agent.
            """
            d = None
            for content_modifier in self.content_modifiers:
                if d is None:
                    d = content_modifier.transform_content(body, request)
                else:
                    d.addCallback(content_modifier.transform_content, request)
            if d is None:
                return body
            else:
                return d
            
        d.addCallback(process_response, request)
        d.addCallback(treq.content)
        d.addCallback(mod_content, request)
        return d
예제 #57
0
class MountResource(resource.Resource):
    """
    A powerstrip pre-hook for container create.
    """
    isLeaf = True

    def __init__(self, *args, **kw):
        self._agent = Agent(reactor) # no connectionpool
        self.client = HTTPClient(self._agent)
        return resource.Resource.__init__(self, *args, **kw)

    def render_POST(self, request):
        """
        Handle a pre-hook: either create a filesystem, or move it in place.
        """
        json_parsed = json.loads(request.content.read())
        print ">>> called with", json_parsed
        pprint.pprint(os.environ)
        # BASE_URL like http://control-service/v1/ ^

        self.base_url = os.environ.get("FLOCKER_CONTROL_SERVICE_BASE_URL")
        self.ip = os.environ.get("MY_NETWORK_IDENTITY")

        def wait_until_volume_in_place(result, fs):
            """
            Called after a dataset has been created or moved in the cluster's
            desired configuration. Wait until the volume shows up in the
            cluster actual state on the right host (either having been created
            or moved).

            :return: Deferred which fires with the tuple (fs, dataset_id) --
                that is, the filesystem and the corresponding flocker dataset
                uuid that the docker client asked for -- firing only once the
                filesystem has been created/moved and mounted (iow, exists on
                the right host in the cluster state).
            """
            print "wait_until_volume_in_place while processing", fs, "got result", result
            dataset_id = result["dataset_id"]
            def dataset_exists():
                d = self.client.get(self.base_url + "/state/datasets")
                d.addCallback(treq.json_content)
                def check_dataset_exists(datasets):
                    """
                    The /v1/state/datasets API seems to show the volume as
                    being on two hosts at once during a move. We assume
                    therefore that when it settles down to only show it on one
                    host that this means the move is complete.
                    """
                    print "Got", self.ip, self.host_uuid, "datasets:", datasets
                    matching_datasets = []
                    for dataset in datasets:
                        if dataset["dataset_id"] == dataset_id:
                            matching_datasets.append(dataset)
                    if len(matching_datasets) == 1:
                        if matching_datasets[0]["primary"] == self.host_uuid:
                            return matching_datasets[0]
                    return False
                d.addCallback(check_dataset_exists)
                return d
            d = loop_until(dataset_exists)
            d.addCallback(lambda dataset: (fs, dataset))
            return d

        d = self.client.get(self.base_url + "/state/nodes")
        d.addCallback(treq.json_content)
        def find_my_uuid(nodes):
            for node in nodes:
                if node["host"] == self.ip:
                    self.host_uuid = node["uuid"]
                    break
            return self.client.get(self.base_url + "/configuration/datasets")
        d.addCallback(find_my_uuid)

        d.addCallback(treq.json_content)
        def got_dataset_configuration(configured_datasets):
            # form a mapping from names onto dataset objects
            configured_dataset_mapping = {}
            for dataset in configured_datasets:
                if dataset["metadata"].get("name"):
                    configured_dataset_mapping[dataset["metadata"].get("name")] = dataset

            # iterate over the datasets we were asked to create by the docker client
            fs_create_deferreds = []
            old_binds = []
            print "got json_parsed...", json_parsed
            if json_parsed['Name'] is not None and json_parsed['Name'] != "":
                binds = [json_parsed['Name']]
                for bind in binds:
                    fs, remainder = bind, ""
                    # TODO validation
                    # if "/" in fs:
                    #    raise Exception("Not allowed flocker filesystems more than one level deep")
                    old_binds.append((fs, remainder))
                    # if a dataset exists, and is in the right place, we're cool.
                    if fs in configured_dataset_mapping:
                        dataset = configured_dataset_mapping[fs]
                        if dataset["primary"] == self.host_uuid:
                            # check / wait for the state to match the desired
                            # configuration
                            fs_create_deferreds.append(wait_until_volume_in_place(dataset, fs=fs))
                        else:
                            # if a dataset exists, but is on the wrong server [TODO
                            # and is not being used], then move it in place.
                            d = self.client.post(
                                self.base_url + "/configuration/datasets/%s" % (
                                    dataset["dataset_id"].encode('ascii'),),
                                json.dumps({"primary": self.host_uuid}),
                                headers={'Content-Type': ['application/json']})
                            d.addCallback(treq.json_content)
                            d.addCallback(wait_until_volume_in_place, fs=fs)
                            fs_create_deferreds.append(d)
                    else:
                        # if a dataset doesn't exist at all, create it on this server.
                        d = self.client.post(self.base_url + "/configuration/datasets",
                            json.dumps({"primary": self.host_uuid, "metadata": {"name": fs}}),
                            headers={'Content-Type': ['application/json']})
                        d.addCallback(treq.json_content)
                        d.addCallback(wait_until_volume_in_place, fs=fs)
                        fs_create_deferreds.append(d)

            d = defer.gatherResults(fs_create_deferreds)
            def got_created_and_moved_datasets(list_new_datasets):
                dataset_mapping = dict(list_new_datasets)
                print "constructed dataset_mapping", dataset_mapping
                new_binds = []
                for fs, remainder in old_binds:
                    # forget about remainder...
                    new_binds.append(dataset_mapping[fs]["path"])
                new_json = {}
                if new_binds:
                    new_json["Mountpoint"] = new_binds[0]
                    new_json["Err"] = None
                else:
                    # This is how you indicate not handling this request
                    new_json["Mountpoint"] = ""
                    new_json["Err"] = "unable to handle"

                print "<<< responding with", new_json
                request.write(json.dumps(new_json))
                request.finish()
            d.addCallback(got_created_and_moved_datasets)
            return d
        d.addCallback(got_dataset_configuration)
        d.addErrback(log.err, 'while processing configured datasets')
        return server.NOT_DONE_YET
예제 #58
0
 def __init__(self, *args, **kw):
     self._agent = Agent(reactor) # no connectionpool
     self.client = HTTPClient(self._agent)
     return resource.Resource.__init__(self, *args, **kw)
예제 #59
0
파일: api.py 프로젝트: alex/treq
def _client(*args, **kwargs):
    kwargs['pool'] = default_pool(kwargs.get('reactor'),
                                  kwargs.get('pool'),
                                  kwargs.get('persistent'))

    return HTTPClient.with_config(**kwargs)
예제 #60
0
class HTTPClientTests(TestCase):
    def setUp(self):
        self.agent = mock.Mock(Agent)
        self.client = HTTPClient(self.agent)

        self.fbp_patcher = mock.patch('treq.client.FileBodyProducer')
        self.FileBodyProducer = self.fbp_patcher.start()
        self.addCleanup(self.fbp_patcher.stop)

    def assertBody(self, expected):
        body = self.FileBodyProducer.mock_calls[0][1][0]
        self.assertEqual(body.read(), expected)

    def test_request_case_insensitive_methods(self):
        self.client.request('gEt', 'http://example.com/')
        self.agent.request.assert_called_once_with(
            'GET', 'http://example.com/',
            headers=Headers({}), bodyProducer=None)

    def test_request_query_params(self):
        self.client.request('GET', 'http://example.com/',
                            params={'foo': ['bar']})

        self.agent.request.assert_called_once_with(
            'GET', 'http://example.com/?foo=bar',
            headers=Headers({}), bodyProducer=None)

    def test_request_tuple_query_values(self):
        self.client.request('GET', 'http://example.com/',
                            params={'foo': ('bar',)})

        self.agent.request.assert_called_once_with(
            'GET', 'http://example.com/?foo=bar',
            headers=Headers({}), bodyProducer=None)

    def test_request_merge_query_params(self):
        self.client.request('GET', 'http://example.com/?baz=bax',
                            params={'foo': ['bar', 'baz']})

        self.agent.request.assert_called_once_with(
            'GET', 'http://example.com/?baz=bax&foo=bar&foo=baz',
            headers=Headers({}), bodyProducer=None)

    def test_request_merge_tuple_query_params(self):
        self.client.request('GET', 'http://example.com/?baz=bax',
                            params=[('foo', 'bar')])

        self.agent.request.assert_called_once_with(
            'GET', 'http://example.com/?baz=bax&foo=bar',
            headers=Headers({}), bodyProducer=None)

    def test_request_dict_single_value_query_params(self):
        self.client.request('GET', 'http://example.com/',
                            params={'foo': 'bar'})

        self.agent.request.assert_called_once_with(
            'GET', 'http://example.com/?foo=bar',
            headers=Headers({}), bodyProducer=None)

    def test_request_data_dict(self):
        self.client.request('POST', 'http://example.com/',
                            data={'foo': ['bar', 'baz']})

        self.agent.request.assert_called_once_with(
            'POST', 'http://example.com/',
            headers=Headers(
                {'Content-Type': ['application/x-www-form-urlencoded']}),
            bodyProducer=self.FileBodyProducer.return_value)

        self.assertBody('foo=bar&foo=baz')

    def test_request_data_single_dict(self):
        self.client.request('POST', 'http://example.com/',
                            data={'foo': 'bar'})

        self.agent.request.assert_called_once_with(
            'POST', 'http://example.com/',
            headers=Headers(
                {'Content-Type': ['application/x-www-form-urlencoded']}),
            bodyProducer=self.FileBodyProducer.return_value)

        self.assertBody('foo=bar')

    def test_request_data_tuple(self):
        self.client.request('POST', 'http://example.com/',
                            data=[('foo', 'bar')])

        self.agent.request.assert_called_once_with(
            'POST', 'http://example.com/',
            headers=Headers(
                {'Content-Type': ['application/x-www-form-urlencoded']}),
            bodyProducer=self.FileBodyProducer.return_value)

        self.assertBody('foo=bar')

    def test_request_data_file(self):
        temp_fn = self.mktemp()

        with open(temp_fn, "w") as temp_file:
            temp_file.write('hello')

        self.client.request('POST', 'http://example.com/', data=file(temp_fn))

        self.agent.request.assert_called_once_with(
            'POST', 'http://example.com/',
            headers=Headers({}),
            bodyProducer=self.FileBodyProducer.return_value)

        self.assertBody('hello')

    def test_request_dict_headers(self):
        self.client.request('GET', 'http://example.com/', headers={
            'User-Agent': 'treq/0.1dev',
            'Accept': ['application/json', 'text/plain']
        })

        self.agent.request.assert_called_once_with(
            'GET', 'http://example.com/',
            headers=Headers({'User-Agent': ['treq/0.1dev'],
                             'Accept': ['application/json', 'text/plain']}),
            bodyProducer=None)