def main(reactor): aws = AWSServiceRegion(AWSCredentials()) agent = Agent(reactor) route53 = get_route53_client(agent, aws) d = route53.list_resource_record_sets(zone_id="Z2T2TSJ409GHZ9") d.addCallback(pprint) return d
def test_error_changes(self): duplicate_resource = POSTableData( sample_create_resource_record_sets_error_result.xml, b"text/xml", BAD_REQUEST, ) zone_id = "1234ABCDEF" agent = RequestTraversalAgent( static_resource({ b"2013-04-01": { b"hostedzone": { zone_id.encode("ascii"): { b"rrset": duplicate_resource, }, }, }, })) aws = AWSServiceRegion(access_key="abc", secret_key="def") client = get_route53_client(agent, aws, uncooperator()) err = self.failureResultOf( client.change_resource_record_sets( zone_id=zone_id, changes=[ create_rrset( sample_create_resource_record_sets_error_result.rrset) ], ), Route53Error) expected = { 'Code': 'InvalidChangeBatch', 'Message': "[Tried to create resource record set [name='duplicate.example.invalid.', type='CNAME'] but it already exists]", 'Type': 'Sender', } self.assertEqual(err.value.errors, [expected])
def _getClient(self): """ Build a txAWS S3 client using our stored credentials. """ creds = AWSCredentials(access_key=self.accessKey.encode('utf-8'), secret_key=self.secretKey.encode('utf-8')) region = AWSServiceRegion(creds=creds) return region.get_s3_client()
def test_creation_with_keys_and_creds(self): """ creds take precedence over individual access key/secret key pairs. """ region = AWSServiceRegion(self.creds, access_key="baz", secret_key="quux") self.assertEquals(region.creds.access_key, "foo") self.assertEquals(region.creds.secret_key, "bar")
def __init__(self, engine): self.engine = engine self.s3conn = boto.connect_s3( aws_access_key_id=settings.AWS_ACCESS_KEY_ID, aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY ) if not settings.USE_BOTO: self.txs3conn = AWSServiceRegion( access_key=settings.AWS_ACCESS_KEY_ID, secret_key=settings.AWS_SECRET_ACCESS_KEY, s3_uri=S3_US[0]['endpoint'], # s3_uri='https://s3.amazonaws.com', ).get_s3_client() self.botobucket = self.s3conn.get_bucket(settings.IMAGES_STORE)
def __init__(self, environment_name, config): super(MachineProvider, self).__init__(environment_name, config) if not config.get("ec2-uri"): ec2_uri = get_region_uri(config.get("region", "us-east-1")) else: ec2_uri = config.get("ec2-uri") self._service = AWSServiceRegion( access_key=config.get("access-key", ""), secret_key=config.get("secret-key", ""), ec2_uri=ec2_uri, s3_uri=config.get("s3-uri", "")) self.s3 = self._service.get_s3_client() self.ec2 = self._service.get_ec2_client()
def test_some_zones(self): agent = RequestTraversalAgent( static_resource({ b"2013-04-01": { b"hostedzone": Data( sample_list_hosted_zones_result.xml, b"text/xml", ), }, })) aws = AWSServiceRegion(access_key="abc", secret_key="def") client = get_route53_client(agent, aws, uncooperator()) zones = self.successResultOf(client.list_hosted_zones()) expected = [HostedZone(**sample_list_hosted_zones_result.details)] self.assertEquals(expected, zones)
def _client_for_rrsets(self, zone_id, rrsets_xml): agent = RequestTraversalAgent( static_resource({ b"2013-04-01": { b"hostedzone": { zone_id: { b"rrset": Data( rrsets_xml, b"text/xml", ) } } } })) aws = AWSServiceRegion(access_key="abc", secret_key="def") return get_route53_client(agent, aws, uncooperator())
def _finish_convergence_service(k8s_client, options, subscription_client): k8s = KubeClient(k8s=k8s_client) access_key_id = FilePath( options["aws-access-key-id-path"]).getContent().strip() secret_access_key = FilePath( options["aws-secret-access-key-path"]).getContent().strip() aws = AWSServiceRegion(creds=AWSCredentials( access_key=access_key_id, secret_key=secret_access_key, )) Message.log( event=u"convergence-service:key-notification", key_id=access_key_id.decode("ascii"), secret_key_hash=sha256(secret_access_key).hexdigest().decode("ascii"), ) # XXX I get to leave a ton of fields empty because I happen to know # they're not used in this codepath. :/ Maybe this suggests something has # gone wrong ... config = DeploymentConfiguration( domain=options["domain"].decode("ascii"), kubernetes_namespace=options["kubernetes-namespace"].decode("ascii"), subscription_manager_endpoint=URL.fromText( options["endpoint"].decode("ascii")), s3_access_key_id=access_key_id.decode("ascii"), s3_secret_key=secret_access_key.decode("ascii"), introducer_image=options["introducer-image"].decode("ascii"), storageserver_image=options["storageserver-image"].decode("ascii"), log_gatherer_furl=None, stats_gatherer_furl=None, ) return TimerService( options["interval"], divert_errors_to_log(converge, u"subscription_converger"), config, subscription_client, k8s, aws, )
def run(self): """ Run the configured method and write the HTTP response status and text to the output stream. """ region = AWSServiceRegion(access_key=self.key, secret_key=self.secret, uri=self.endpoint) query = self.query_factory(action=self.action, creds=region.creds, endpoint=region.ec2_endpoint, other_params=self.parameters) def write_response(response): print >> self.output, "URL: %s" % query.client.url print >> self.output print >> self.output, "HTTP status code: %s" % query.client.status print >> self.output print >> self.output, response def write_error(failure): if failure.check(AWSError): message = failure.value.original else: message = failure.getErrorMessage() if message.startswith("Error Message: "): message = message[len("Error Message: "):] print >> self.output, "URL: %s" % query.client.url print >> self.output if getattr(query.client, "status", None) is not None: print >> self.output, "HTTP status code: %s" % ( query.client.status,) print >> self.output print >> self.output, message if getattr(failure.value, "response", None) is not None: print >> self.output print >> self.output, failure.value.response deferred = query.submit() deferred.addCallback(write_response) deferred.addErrback(write_error) return deferred
def _finish_convergence_service( k8s_client, options, subscription_client, reactor, ): k8s = KubeClient(k8s=k8s_client) access_key_id = FilePath(options["aws-access-key-id-path"]).getContent().strip() secret_access_key = FilePath(options["aws-secret-access-key-path"]).getContent().strip() aws = AWSServiceRegion(creds=AWSCredentials( access_key=access_key_id, secret_key=secret_access_key, )) Message.log( event=u"convergence-service:key-notification", key_id=access_key_id.decode("ascii"), secret_key_hash=sha256(secret_access_key).hexdigest().decode("ascii"), ) config = DeploymentConfiguration( domain=options["domain"].decode("ascii"), kubernetes_namespace=options["kubernetes-namespace"].decode("ascii"), subscription_manager_endpoint=URL.fromText(options["endpoint"].decode("ascii")), s3_access_key_id=access_key_id.decode("ascii"), s3_secret_key=secret_access_key.decode("ascii"), introducer_image=options["introducer-image"].decode("ascii"), storageserver_image=options["storageserver-image"].decode("ascii"), log_gatherer_furl=options["log-gatherer-furl"], stats_gatherer_furl=options["stats-gatherer-furl"], ) return _convergence_service( reactor, options["interval"], config, subscription_client, k8s, aws, )
def test_some_changes(self): change_resource = POSTableData( sample_change_resource_record_sets_result.xml, b"text/xml", ) zone_id = u"ABCDEF1234" agent = RequestTraversalAgent( static_resource({ b"2013-04-01": { b"hostedzone": { zone_id.encode("ascii"): { b"rrset": change_resource, } }, }, })) aws = AWSServiceRegion(access_key="abc", secret_key="def") client = get_route53_client(agent, aws, uncooperator()) self.successResultOf( client.change_resource_record_sets( zone_id=zone_id, changes=[ create_rrset( sample_change_resource_record_sets_result.rrset), delete_rrset( sample_change_resource_record_sets_result.rrset), upsert_rrset( sample_change_resource_record_sets_result.rrset), ], )) # Ack, what a pathetic assertion. change_template = u"<Change><Action>{action}</Action><ResourceRecordSet><Name>example.invalid.</Name><Type>NS</Type><TTL>86400</TTL><ResourceRecords><ResourceRecord><Value>ns1.example.invalid.</Value></ResourceRecord><ResourceRecord><Value>ns2.example.invalid.</Value></ResourceRecord></ResourceRecords></ResourceRecordSet></Change>" changes = [ change_template.format(action=u"CREATE"), change_template.format(action=u"DELETE"), change_template.format(action=u"UPSERT"), ] expected = u"""\ <?xml version="1.0" encoding="UTF-8"?> <ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2013-04-01/"><ChangeBatch><Changes>{changes}</Changes></ChangeBatch></ChangeResourceRecordSetsRequest>""".format( changes=u"".join(changes)).encode("utf-8") self.assertEqual((expected, ), change_resource.posted)
def __init__(self, reactor): from txaws.service import AWSServiceRegion gtk.StatusIcon.__init__(self) self.set_from_stock(gtk.STOCK_NETWORK) self.set_visible(True) self.reactor = reactor self.connect("activate", self.on_activate) self.probing = False # Nested import because otherwise we get "reactor already installed". self.password_dialog = None try: creds = AWSCredentials() except ValueError: creds = self.from_gnomekeyring() self.region = AWSServiceRegion(creds) self.create_client(creds) menu = """ <ui> <menubar name="Menubar"> <menu action="Menu"> <menuitem action="Stop instances"/> </menu> </menubar> </ui> """ actions = [ ("Menu", None, "Menu"), ("Stop instances", gtk.STOCK_STOP, "_Stop instances...", None, "Stop instances", self.on_stop_instances), ] ag = gtk.ActionGroup("Actions") ag.add_actions(actions) self.manager = gtk.UIManager() self.manager.insert_action_group(ag, 0) self.manager.add_ui_from_string(menu) self.menu = self.manager.get_widget( "/Menubar/Menu/Stop instances").props.parent self.connect("popup-menu", self.on_popup_menu)
def test_creation_with_region_override(self): region = AWSServiceRegion(creds=self.creds, region=REGION_EU) self.assertEquals(region.ec2_endpoint.get_uri(), EC2_ENDPOINT_EU)
def test_creation_with_uri_and_region(self): region = AWSServiceRegion( creds=self.creds, region=REGION_EU, ec2_uri="http://foo/bar") self.assertEquals(region.ec2_endpoint.get_uri(), "http://foo/bar")
def test_creation_with_uri_backwards_compatible(self): region = AWSServiceRegion( creds=self.creds, uri="http://foo/bar") self.assertEquals(region.ec2_endpoint.get_uri(), "http://foo/bar")
# # - in case they leak out of the test suite somehow # # - in case the implementation is broken and does something destructive # # - in case malicious code is inserted somehow (eg, you run # tests on code submitted by another developer) # # As far as I can tell there's no way to isolate an API user # from _some_ of the parent account's S3 buckets. Therefore, # isolation probably involves registering a new top-level AWS # account and dedicating it to testing purposes. try: access_key = environ["TXAWS_INTEGRATION_AWS_ACCESS_KEY_ID"] secret_key = environ["TXAWS_INTEGRATION_AWS_SECRET_ACCESS_KEY"] except KeyError as e: case.skipTest("Missing {} environment variable.".format(e)) else: credentials = AWSCredentials( access_key=access_key, secret_key=secret_key, ) return AWSServiceRegion(credentials) def get_memory_service(case): return FakeAWSServiceRegion( access_key="fake access key", secret_key="fake secret key", )
def test_creation_with_keys(self): region = AWSServiceRegion(access_key="baz", secret_key="quux") self.assertEquals(region.creds.access_key, "baz") self.assertEquals(region.creds.secret_key, "quux")
def setUp(self): self.creds = AWSCredentials("foo", "bar") self.region = AWSServiceRegion(creds=self.creds)
def setUp(self): region = AWSServiceRegion() self.ec2 = region.get_ec2_client() self.s3 = region.get_s3_client()
def set_region(self, creds): from txaws.service import AWSServiceRegion self.region = AWSServiceRegion(creds)