def test_perform(self): """ ``WriteToS3`` has a performer that creates a new object with ``target_key`` and ``content`` in ``target_bucket`` """ s3 = self.useFixture(S3BucketFixture(test_case=self)) intent = WriteToS3( content=random_name(self).encode("ascii"), target_key=random_name(self), target_bucket=s3.bucket_name ) result = sync_perform(dispatcher=RealPerformers().dispatcher(), effect=Effect(intent=intent)) self.assertIs(None, result) self.assertEqual(intent.content, s3.get_object_content(key=intent.target_key))
def __init__(self, test_case): super(S3BucketFixture, self).__init__() self.test_case = test_case # Bucket names must be a valid DNS label # https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html self.bucket_name = random_name(test_case).lower().replace("_", "")[-63:]
def test_configuration(self): """ Source AMI ID, build region, and target regions can all be overridden in a chosen template. """ expected_build_region = AWS_REGIONS.EU_WEST_1 expected_publish_regions = [AWS_REGIONS.AP_NORTHEAST_1, AWS_REGIONS.AP_SOUTHEAST_1, AWS_REGIONS.AP_SOUTHEAST_2] expected_source_ami = random_name(self) intent = PackerConfigure( build_region=expected_build_region, publish_regions=expected_publish_regions, source_ami=expected_source_ami, template=u"docker", distribution=u"ubuntu-14.04", ) # Call the performer packer_configuration_path = sync_perform( dispatcher=RealPerformers(working_directory=self.make_temporary_directory()).dispatcher(), effect=Effect(intent=intent), ) with packer_configuration_path.open("r") as f: packer_configuration = json.load(f) [builder] = packer_configuration["builders"] build_region = builder["region"] build_source_ami = builder["source_ami"] publish_regions = builder["ami_regions"] [provisioner] = packer_configuration["provisioners"] self.assertEqual( (expected_build_region.value, set(c.value for c in expected_publish_regions), expected_source_ami), (build_region, set(publish_regions), build_source_ami), )
def test_upload_file(self): """ A single file can be uploaded. """ expected_content = random_name(self) local_path = self.make_temporary_file() local_path.setContent(expected_content) return self.assert_upload(local_path, FileContains(expected_content))
def test_perform(self): """ ``WriteToS3`` has a performer that creates a new object with ``target_key`` and ``content`` in ``target_bucket`` """ s3 = self.useFixture(S3BucketFixture(test_case=self)) intent = WriteToS3( content=random_name(self).encode('ascii'), target_key=random_name(self), target_bucket=s3.bucket_name, ) result = sync_perform(dispatcher=RealPerformers().dispatcher(), effect=Effect(intent=intent)) self.assertIs(None, result) self.assertEqual( intent.content, s3.get_object_content(key=intent.target_key), )
def test_perform(self): """ ``StandardOut`` has a performer that writes content to sys.stdout. """ fake_sys_module = FakeSysModule() intent = StandardOut(content=random_name(self).encode('ascii'), ) result = sync_perform( dispatcher=RealPerformers(sys_module=fake_sys_module).dispatcher(), effect=Effect(intent=intent)) self.assertIs(None, result) self.assertEqual(intent.content, fake_sys_module.stdout.getvalue())
def test_invalid_direction(self): """ The ``direction`` argument must be one of ``DOWNLOAD`` or ``UPLOAD``. """ invalid_direction = random_name(self) exception = self.assertRaises(ValueError, scp, reactor=object(), username=u"Joe", host=u"example.com", remote_path=self.make_temporary_path(), local_path=self.make_temporary_path(), direction=invalid_direction) self.assertIn(invalid_direction, unicode(exception))
def assert_upload(self, local_path, matcher): """ Assert that the ``local_path`` can be uploaded to and then downloaded from a remote SSH server and that the contents are preserved. :param FilePath local_path: The local file or directory to upload. :param matcher: A ``testtools`` matcher which will be compared to the downloaded ``FilePath.path``. :returns: A ``Deferred`` that fires when the assertion is complete. """ self.ssh_server = create_ssh_server(base_path=self.make_temporary_directory()) self.addCleanup(self.ssh_server.restore) username = u"root" host = bytes(self.ssh_server.ip) remote_file = self.ssh_server.home.child(random_name(self)) d = upload( reactor=reactor, username=username, host=host, local_path=local_path, remote_path=remote_file, port=self.ssh_server.port, identity_file=self.ssh_server.key_path, ) download_directory = self.make_temporary_directory() download_path = download_directory.child("download") d.addCallback( lambda ignored: download( reactor=reactor, username=username, host=host, remote_path=remote_file, local_path=download_path, port=self.ssh_server.port, identity_file=self.ssh_server.key_path, ) ) def check(ignored): self.assertThat(download_path.path, matcher) d.addCallback(check) return d
def test_invalid_direction(self): """ The ``direction`` argument must be one of ``DOWNLOAD`` or ``UPLOAD``. """ invalid_direction = random_name(self) exception = self.assertRaises( ValueError, scp, reactor=object(), username=u"Joe", host=u"example.com", remote_path=self.make_temporary_path(), local_path=self.make_temporary_path(), direction=invalid_direction) self.assertIn(invalid_direction, unicode(exception))
def droplet_for_test(test_case, client): """ Update a prototype set of droplet attributes with a random name and make API calls to create the droplet. :param TestCase test_case: The test for which to build and cleanup the droplet. :param pyocean.DigitalOcean client: The client with which to make DigitalOcean v2 API calls. :returns: A ``pyocean.Droplet`` instance. """ droplet_attributes = copy.deepcopy(TESTING_DROPLET_ATTRIBUTES) droplet_attributes['name'] = (test_case.id().replace('_', '-') + '-' + random_name()) droplet = retry_on_error([pending_event], client.droplet.create, droplet_attributes) test_case.addCleanup(retry_on_error, [pending_event], droplet.destroy) return droplet
def test_perform(self): """ ``StandardOut`` has a performer that writes content to sys.stdout. """ fake_sys_module = FakeSysModule() intent = StandardOut( content=random_name(self).encode('ascii'), ) result = sync_perform( dispatcher=RealPerformers( sys_module=fake_sys_module ).dispatcher(), effect=Effect(intent=intent) ) self.assertIs(None, result) self.assertEqual( intent.content, fake_sys_module.stdout.getvalue() )
def test_configuration(self): """ Source AMIs, build region, and target regions can all be overridden in a chosen template. """ expected_build_region = AWS_REGIONS.EU_WEST_1 expected_publish_regions = [ AWS_REGIONS.AP_NORTHEAST_1, AWS_REGIONS.AP_SOUTHEAST_1, AWS_REGIONS.AP_SOUTHEAST_2, ] expected_source_ami_map = { AWS_REGIONS.EU_WEST_1: random_name(self) } intent = PackerConfigure( build_region=expected_build_region, publish_regions=expected_publish_regions, source_ami_map=expected_source_ami_map, template=u"docker", ) # Call the performer packer_configuration_path = sync_perform( dispatcher=RealPerformers( working_directory=self.make_temporary_directory() ).dispatcher(), effect=Effect(intent=intent) ) with packer_configuration_path.open('r') as f: packer_configuration = json.load(f) [builder] = packer_configuration["builders"] build_region = builder['region'] build_source_ami = builder['source_ami'] publish_regions = builder['ami_regions'] [_provisioner] = packer_configuration["provisioners"] self.assertEqual( (expected_build_region.value, set(c.value for c in expected_publish_regions), expected_source_ami_map[expected_build_region]), (build_region, set(publish_regions), build_source_ami) )
def test_configuration(self): """ Source AMIs, build region, and target regions can all be overridden in a chosen template. """ expected_build_region = AWS_REGIONS.EU_WEST_1 expected_publish_regions = [ AWS_REGIONS.AP_NORTHEAST_1, AWS_REGIONS.AP_SOUTHEAST_1, AWS_REGIONS.AP_SOUTHEAST_2, ] expected_source_ami_map = { AWS_REGIONS.EU_WEST_1: random_name(self) } intent = PackerConfigure( build_region=expected_build_region, publish_regions=expected_publish_regions, source_ami_map=expected_source_ami_map, template=u"docker", ) # Call the performer packer_configuration_path = sync_perform( dispatcher=RealPerformers( working_directory=self.make_temporary_directory() ).dispatcher(), effect=Effect(intent=intent) ) with packer_configuration_path.open('r') as f: packer_configuration = json.load(f) [builder] = packer_configuration["builders"] build_region = builder['region'] build_source_ami = builder['source_ami'] publish_regions = builder['ami_regions'] [provisioner] = packer_configuration["provisioners"] self.assertEqual( (expected_build_region.value, set(c.value for c in expected_publish_regions), expected_source_ami_map[expected_build_region]), (build_region, set(publish_regions), build_source_ami) )
def droplet_for_test(test_case, client): """ Update a prototype set of droplet attributes with a random name and make API calls to create the droplet. :param TestCase test_case: The test for which to build and cleanup the droplet. :param pyocean.DigitalOcean client: The client with which to make DigitalOcean v2 API calls. :returns: A ``pyocean.Droplet`` instance. """ droplet_attributes = copy.deepcopy(TESTING_DROPLET_ATTRIBUTES) droplet_attributes['name'] = ( test_case.id().replace('_', '-') + '-' + random_name() ) droplet = retry_on_error( [pending_event], client.droplet.create, droplet_attributes ) test_case.addCleanup(retry_on_error, [pending_event], droplet.destroy) return droplet
def test_upload_content_type(self): """ A content type can be set for an uploaded file. """ filename = random_name(self) tmpdir = FilePath(self.mktemp()) tmpdir.makedirs() tmpfile = tmpdir.child(filename) tmpfile.setContent('foo') s3 = boto.connect_s3() bucket = s3.get_bucket(bucket_name) self.addCleanup(bucket.delete_key, filename) sync_perform( dispatcher=ComposedDispatcher([boto_dispatcher, base_dispatcher]), effect=Effect(UploadToS3( source_path=tmpdir, target_bucket=bucket_name, target_key=filename, file=tmpfile, content_type='application/json', )) ) key = bucket.get_key(filename) self.assertEqual('application/json', key.content_type)
def test_upload_content_type(self): """ A content type can be set for an uploaded file. """ filename = random_name(self) tmpdir = FilePath(self.mktemp()) tmpdir.makedirs() tmpfile = tmpdir.child(filename) tmpfile.setContent('foo') s3 = boto.connect_s3() bucket = s3.get_bucket(bucket_name) self.addCleanup(bucket.delete_key, filename) sync_perform(dispatcher=ComposedDispatcher( [boto_dispatcher, base_dispatcher]), effect=Effect( UploadToS3( source_path=tmpdir, target_bucket=bucket_name, target_key=filename, file=tmpfile, content_type='application/json', ))) key = bucket.get_key(filename) self.assertEqual('application/json', key.content_type)
def test_docker_compose_up_postgres(self): """ A Flocker cluster, built using the CloudFormation template, has a client node. That node has ``docker-compose`` and templates. The first template creates a PostgreSQL server on one node. The second template moves the PostgreSQL server to the second node. """ client_username = b"ubuntu" client_home = FilePath('/home').child(client_username) remote_compose_directory = client_home.child(random_name(self)) self.compose_node1 = ( remote_compose_directory.child("docker-compose-node1.yml") ) self.compose_node2 = ( remote_compose_directory.child("docker-compose-node2.yml") ) # Publish the compose files to the client. def upload_docker_compose_files(): return upload( reactor=reactor, username=client_username, host=self.client_node_ip.encode('ascii'), local_path=FilePath(__file__).parent().descendant( ['installer', 'postgres'] ), remote_path=remote_compose_directory, ) d = retry_failure( reactor=reactor, function=upload_docker_compose_files, expected=(SCPConnectionError,), # Wait 60s for the client SSH server to accept connections. steps=repeat(1, 60) ) def cleanup_container(ignored): self.addCleanup( remote_docker_compose, self.client_node_ip, self.docker_host, self.compose_node1.path, 'down' ) d.addCallback(cleanup_container) # docker-compose doesn't retry failed pulls and pulls fail all the # time. def pull_postgres(): return remote_docker_compose( self.client_node_ip, self.docker_host, self.compose_node1.path, 'pull' ) d.addCallback( lambda ignored: retry_failure( reactor=reactor, function=pull_postgres, expected=(ProcessTerminated,), steps=repeat(1, 5) ) ) # Create the PostgreSQL server on node1. A Flocker dataset will be # created and attached by way of the Flocker Docker plugin. d.addCallback( lambda ignored: remote_docker_compose( self.client_node_ip, self.docker_host, self.compose_node1.path, 'up', '-d' ) ) # Docker-compose blocks until the container is running but the the # PostgreSQL server may not be ready to receive connections. d.addCallback( lambda ignored: self._wait_for_postgres(self.agent_node1_ip) ) # Create a database and insert a record. d.addCallback( lambda ignored: remote_postgres( self.client_node_ip, self.agent_node1_ip, RECREATE_STATEMENT + INSERT_STATEMENT ) ) # Stop and then remove the container d.addCallback( lambda ignored: remote_docker_compose( self.client_node_ip, self.docker_host, self.compose_node1.path, 'down' ) ) # Start the container on the other node. d.addCallback( lambda ignored: remote_docker_compose( self.client_node_ip, self.docker_host, self.compose_node2.path, 'up', '-d' ) ) # The database server won't be immediately ready to receive # connections. d.addCallback( lambda ignored: self._wait_for_postgres(self.agent_node2_ip) ) # Select the record d.addCallback( lambda ignored: remote_postgres( self.client_node_ip, self.agent_node2_ip, SELECT_STATEMENT ) ) # There should be a record and the value should be 1. d.addCallback( lambda (process_status, process_output): self.assertEqual( "1", process_output[2].strip() ) ) return d