Example #1
0
    def test_attachments(self):
        """
        Test that the attachments are:
            - Filtered
            - Returned with the correct mountpoint
        """
        class MockConn(object):
            def __init__(self, volumes):
                self.requests = []
                self.volumes = volumes

            def get_all_volumes(self, filters):
                self.requests.append(filters)
                return self.volumes

        volume1 = self.Volume()
        volume1.attach_data = self.AttachmentSet()
        volume1.status = "in-use"
        volume1.attach_data.device = "/dev/sdg"
        volume1.size = 10

        volume2 = self.Volume()
        volume2.attach_data = self.AttachmentSet()
        volume2.attach_data.device = "/dev/sda"
        volume2.status = "in-use"
        volume2.size = 100
        volume2.iops = 100

        volume3 = self.Volume()
        volume3.status = "attaching"
        volume3.attach_data = self.AttachmentSet()
        volume3.size = 15

        volumes = [volume1, volume2, volume3]

        self.ec2_response._content = "i-1234"
        adapter = RepeatingTestAdapter(self.ec2_response)
        self.session.mount("http://169.254.169.254", adapter)

        with MockSession(self.session):
            cloud = Cloud()

        cloud._conn = MockConn(volumes)

        attachments = cloud.attachments

        with MockPathExists(["/dev/sda", "/dev/xvdg"]):
            self.assertItemsEqual(
                ["/dev/xvdg", "/dev/sda"],
                [attachment.device for attachment in attachments])

        self.assertEqual([True, True],
                         [attachment.persistent for attachment in attachments])

        self.assertEqual(1, len(cloud._conn.requests))
        self.assertDictEqual({"attachment.instance-id": "i-1234"},
                             cloud._conn.requests[0])
        self.assertSequenceEqual(
            [["EBS", "10 GB"], ["EBS", "100 GB", "100 PIOPS"]],
            [attachment.assets for attachment in attachments])
Example #2
0
    def test_attachments(self):
        """
        Test that the attachments are:
            - Filtered
            - Returned with the correct mountpoint
        """
        class MockConn(object):
            def __init__(self, volumes):
                self.requests = []
                self.volumes = volumes

            def get_all_volumes(self, filters):
                self.requests.append(filters)
                return self.volumes

        volume1 = self.Volume()
        volume1.attach_data = self.AttachmentSet()
        volume1.status = "in-use"
        volume1.attach_data.device = "/dev/sdg"
        volume1.size = 10

        volume2 = self.Volume()
        volume2.attach_data = self.AttachmentSet()
        volume2.attach_data.device = "/dev/sda"
        volume2.status = "in-use"
        volume2.size = 100
        volume2.iops = 100

        volume3 = self.Volume()
        volume3.status = "attaching"
        volume3.attach_data = self.AttachmentSet()
        volume3.size = 15

        volumes = [volume1, volume2, volume3]

        self.ec2_response._content = "i-1234"
        adapter = RepeatingTestAdapter(self.ec2_response)
        self.session.mount("http://169.254.169.254", adapter)

        with MockSession(self.session):
            cloud = Cloud()

        cloud._conn = MockConn(volumes)

        attachments = cloud.attachments

        with MockPathExists(["/dev/sda", "/dev/xvdg"]):
            self.assertItemsEqual(["/dev/xvdg", "/dev/sda"], [attachment.device for attachment in attachments])

        self.assertEqual([True, True], [attachment.persistent for attachment in attachments])

        self.assertEqual(1, len(cloud._conn.requests))
        self.assertDictEqual({"attachment.instance-id":"i-1234"}, cloud._conn.requests[0])
        self.assertSequenceEqual(
            [["EBS", "10 GB"], ["EBS", "100 GB", "100 PIOPS"]],
            [attachment.assets for attachment in attachments]
        )
Example #3
0
    def test_metadata(self):
        # Instance type
        response1 = requests.Response()
        response1.status_code = 200
        response1._content = six.b("m1.large")
        response1.encoding = "utf-8"

        # AZ
        response2 = requests.Response()
        response2.status_code = 200
        response2._content = six.b("us-east-1a")
        response2.encoding = "utf-8"
        # TODO: Add instance ID, and test request paths.

        adapter = PredictableTestAdapter(
            [self.ec2_response, response1, response2, response2])
        self.session.mount("http://169.254.169.254", adapter)

        with MockSession(self.session):
            cloud = Cloud()

        self.assertEqual("m1.large", cloud.instance_type)
        self.assertEqual("us-east-1a", cloud.availability_zone)
        self.assertEqual("us-east-1", cloud.location)
        self.assertEqual(0, len(adapter.responses))
        self.assertEqual("EC2", cloud.provider)
Example #4
0
    def test_rackspace(self):
        self.session.mount("http://169.254.169.254", UnreachableTestAdapter())
        self.session.mount("http://metadata", UnreachableTestAdapter())

        with MockSession(self.session), MockSubprocessCall(0, "Rackspace"):
            cloud = Cloud()

        self.assertEqual("RackspaceOpenCloud", cloud.__class__.__name__)
Example #5
0
    def test_metadata(self):
        # Instance type
        response1 = requests.Response()
        response1.status_code = 200
        response1._content = six.b("projects/1234/machineTypes/n1-standard-1-d")
        response1.encoding = "utf-8"

        # AZ
        response2 = requests.Response()
        response2.status_code = 200
        response2._content = six.b("projects/1234/zones/us-central1-b")
        response2.encoding = "utf-8"

        # Attachments
        response3 = requests.Response()
        response3.status_code = 200
        response3._content = six.b('[{"deviceName":"boot","index":0,"mode":"READ_WRITE","type":"EPHEMERAL"},{"deviceName":"ephemeral-disk-0","index":1,"mode":"READ_WRITE","type":"EPHEMERAL"},{"deviceName":"scalr-disk-1a043e80","index":2,"mode":"READ_WRITE","type":"PERSISTENT"}]')
        response3.encoding = "utf-8"


        adapter = PredictableTestAdapter([self.ec2_response, response1, response2, response2, response3])
        self.session.mount("http://metadata", adapter)

        with MockSession(self.session):
            cloud = Cloud()

        self.assertEqual("n1-standard-1-d", cloud.instance_type)
        self.assertEqual("us-central1-b", cloud.availability_zone)
        self.assertEqual("us-central1", cloud.location)
        self.assertEqual("GCE", cloud.provider)

        attachments = cloud.attachments

        with MockPathExists(["/dev/sda", "/dev/sdb", "/dev/sdc"]):
            self.assertSequenceEqual(["/dev/sda", "/dev/sdb", "/dev/sdc"],
                                     [attachment.device for attachment in attachments])

        self.assertSequenceEqual([False, False, True],
                                 [attachment.persistent for attachment in attachments])

        disk_structure = """{
  "creationTimestamp": "2013-09-30T01:14:23.599-07:00",
  "id": "7422554157413993697",
  "kind": "compute#disk",
  "name": "scalr-disk-1a043e80",
  "selfLink": "https://www.googleapis.com/compute/v1beta15/projects/scalr.com:scalr-labs/zones/us-central1-b/disks/scalr-disk-1a043e80",
  "sizeGb": "15",
  "status": "READY",
  "zone": "https://www.googleapis.com/compute/v1beta15/projects/scalr.com:scalr-labs/zones/us-central1-b"
}"""

        with MockSubprocessCall(0, disk_structure):
            self.assertEqual(["GCE Disk", "15 GB"], attachments[2].assets)

        self.assertEqual(0, len(adapter.responses))
Example #6
0
    def test_gce(self):
        response = requests.Response()
        response.status_code = 200
        adapter = RepeatingTestAdapter(response)

        self.session.mount("http://metadata", adapter)
        self.session.mount("http://169.254.169.254", UnreachableTestAdapter())

        with MockSession(self.session), MockSubprocessCall(0, "Not Rackspace"):
            cloud = Cloud()

        self.assertEqual("GCE", cloud.__class__.__name__)
Example #7
0
    def test_error_propagation(self):
        response = requests.Response()
        response.status_code = 200
        adapter = RepeatingTestAdapter(response)
        self.session.mount("http://metadata", adapter)

        with MockSession(self.session):
            cloud = Cloud()

        response = requests.Response()
        response.status_code = 500
        adapter = RepeatingTestAdapter(response)
        self.session.mount("http://metadata", adapter)

        self.assertRaises(CloudAPIError, getattr, cloud, "location")
Example #8
0
def main():
    parser = argparse.ArgumentParser(
        description="Benchmark your Cloud performance")
    parser.add_argument(
        "-c",
        "--config",
        help="Path to the cloudbench configuration file (defaults to {0})".
        format(DEFAULT_CONFIG_FILE),
        default=DEFAULT_CONFIG_FILE)

    args = parser.parse_args()

    config = configparser.ConfigParser({
        "fio": DEFAULT_FIO_PATH,
        "pidfile": DEFAULT_PID_FILE,
        "logfile": DEFAULT_LOG_FILE,
        "nobench": "",
        "size": DEFAULT_FILE_SIZE,
        "ramp": DEFAULT_RAMP_TIME,
        "duration": DEFAULT_DURATION,
        "retry_max": DEFAULT_RETRY_MAX,
        "retry_wait": DEFAULT_RETRY_WAIT,
        "retry_range": DEFAULT_RETRY_RANGE,
        "extra_assets": "",
    })
    config.add_section("environment")
    config.add_section("general")
    config.read(args.config)

    fio_bin = config.get("environment", "fio")
    pid_file = config.get("environment", "pidfile")
    log_file = config.get("environment", "logfile")
    no_bench = _cnf_get_list(config, "environment", "nobench")
    extra_assets = _cnf_get_list(config, "environment", "extra_assets")

    files_preserve = setup_logging(log_file)

    block_sizes = _cnf_get_list(config, "benchmarks", "blocksizes")
    depths = _cnf_get_list(config, "benchmarks", "depths")
    modes = _cnf_get_list(config, "benchmarks", "modes")

    size = config.get("general", "size")
    ramp = config.get("general", "ramp")
    duration = config.get("general", "duration")

    reporting_endpoint = config.get("reporting", "endpoint")
    reporting_username = config.get("reporting", "username")
    reporting_key = config.get("reporting", "apikey")

    # Those two may fail, but that's fine: we haven't daemonized yet.
    reporting_retry_max = int(config.get("reporting", "retry_max"))
    reporting_retry_wait = int(config.get("reporting", "retry_wait"))
    reporting_retry_range = int(config.get("reporting", "retry_range"))

    logger.info("Cloudbench v{0}: starting".format(__version__))

    with DaemonContext(files_preserve=files_preserve,
                       pidfile=lockfile.pidlockfile.PIDLockFile(pid_file)):
        try:
            cloud = Cloud()
            benchmark_volumes = identify_benchmark_volumes(cloud, no_bench)

            api = Client(reporting_endpoint,
                         APIKeyAuth(reporting_username,
                                    reporting_key), reporting_retry_max,
                         reporting_retry_wait, reporting_retry_range)

            logger.info("Provider: %s", cloud.provider)
            logger.info("Location: %s", cloud.location)
            logger.info("Instance type: %s", cloud.instance_type)
            logger.info("Number of volumes: %s", len(benchmark_volumes))
            for benchmark_volume in benchmark_volumes:
                logger.info(
                    "%s: %s, %s", benchmark_volume.device,
                    benchmark_volume.provider, "Persistent"
                    if benchmark_volume.persistent else "Ephemeral")

            start_benchmark(cloud, api, benchmark_volumes, extra_assets,
                            fio_bin, block_sizes, depths, modes, size, ramp,
                            duration)
        except Exception as e:
            logger.critical("An error occurred: %s", e)
            response = getattr(e, "response", None)

            logger.exception("Fatal Exception")

            if response is not None:
                logger.critical("HTTP Error")
                logger.critical("URL: %s", response.request.url)
                logger.critical("Status: %s %s", response.status_code,
                                response.reason)
                logger.critical("Response: %s", response.text)

            logger.warning("Cloudbench v{0}: exiting".format(__version__))
            sys.exit(1)