Ejemplo n.º 1
0
def configured_cluster_for_nodes(
    reactor, certificates, nodes, dataset_backend,
    dataset_backend_configuration, dataset_backend_config_file,
    provider=None
):
    """
    Get a ``Cluster`` with Flocker services running on the right nodes.

    :param reactor: The reactor.
    :param Certificates certificates: The certificates to install on the
        cluster.
    :param nodes: The ``ManagedNode``s on which to operate.
    :param NamedConstant dataset_backend: The ``DatasetBackend`` constant
        representing the dataset backend that the nodes will be configured to
        use when they are "started".
    :param dict dataset_backend_configuration: The backend-specific
        configuration the nodes will be given for their dataset backend.
    :param FilePath dataset_backend_config_file: A FilePath that has the
        dataset_backend info stored.

    :returns: A ``Deferred`` which fires with ``Cluster`` when it is
        configured.
    """
    # XXX: There is duplication between the values here and those in
    # f.node.agents.test.blockdevicefactory.MINIMUM_ALLOCATABLE_SIZES. We want
    # the default volume size to be greater than or equal to the minimum
    # allocatable size.
    #
    # Ideally, the minimum allocatable size (and perhaps the default volume
    # size) would be something known by an object that represents the dataset
    # backend. Unfortunately:
    #  1. There is no such object
    #  2. There is existing confusion in the code around 'openstack' and
    #     'rackspace'
    #
    # Here, we special-case Rackspace (presumably) because it has a minimum
    # allocatable size that is different from other Openstack backends.
    #
    # FLOC-2584 also discusses this.
    default_volume_size = GiB(1)
    if dataset_backend_configuration.get('auth_plugin') == 'rackspace':
        default_volume_size = RACKSPACE_MINIMUM_VOLUME_SIZE

    cluster = Cluster(
        all_nodes=pvector(nodes),
        control_node=nodes[0],
        agent_nodes=nodes,
        dataset_backend=dataset_backend,
        default_volume_size=int(default_volume_size.to_Byte().value),
        certificates=certificates,
        dataset_backend_config_file=dataset_backend_config_file
    )

    configuring = perform(
        make_dispatcher(reactor),
        configure_cluster(cluster, dataset_backend_configuration, provider)
    )
    configuring.addCallback(lambda ignored: cluster)
    return configuring
Ejemplo n.º 2
0
def configured_cluster_for_nodes(
    reactor, certificates, nodes, dataset_backend,
    dataset_backend_configuration
):
    """
    Get a ``Cluster`` with Flocker services running on the right nodes.

    :param reactor: The reactor.
    :param Certificates certificates: The certificates to install on the
        cluster.
    :param nodes: The ``ManagedNode``s on which to operate.
    :param NamedConstant dataset_backend: The ``DatasetBackend`` constant
        representing the dataset backend that the nodes will be configured to
        use when they are "started".
    :param dict dataset_backend_configuration: The backend-specific
        configuration the nodes will be given for their dataset backend.

    :returns: A ``Deferred`` which fires with ``Cluster`` when it is
        configured.
    """
    default_volume_size = GiB(1)
    if dataset_backend_configuration.get('auth_plugin') == 'rackspace':
        default_volume_size = GiB(100)

    cluster = Cluster(
        all_nodes=pvector(nodes),
        control_node=nodes[0],
        agent_nodes=nodes,
        dataset_backend=dataset_backend,
        default_volume_size=int(default_volume_size.to_Byte().value),
        certificates=certificates
    )

    configuring = perform(
        make_dispatcher(reactor),
        configure_cluster(cluster, dataset_backend_configuration)
    )
    configuring.addCallback(lambda ignored: cluster)
    return configuring
Ejemplo n.º 3
0
def parse_num(expression):
    """
    Parse a string of a dataset size 10g, 100kib etc into
    a usable integer.
    If user doesn't submit a correct size, give back
    the default size.

    :param expression: the dataset expression to parse.
    """
    if not expression:
        return DEFAULT_SIZE
    if type(expression) is unicode:
        expression = str(expression)

    def _match(exp,
               search=re.compile(
                   r'^(\d+){1}([KMGTkmgt][IiBb]){0,1}([Bb]){0,1}').search):
        return bool(search(exp))

    if _match(expression):
        unit = expression.translate(None, "1234567890.")
        num = int(expression.replace(unit, ""))
        unit = unit.lower()
        if unit == 'tb' or unit == 't' or unit == 'tib':
            return TiB(num)
        elif unit == 'gb' or unit == 'g' or unit == 'gib':
            return GiB(num)
        elif unit == 'mb' or unit == 'm' or unit == 'mib':
            return MiB(num)
        elif unit == 'kb' or unit == 'k' or unit == 'kib':
            return KiB(num)
        elif unit == '':
            return Byte(num)
        else:
            return DEFAULT_SIZE
    else:
        return DEFAULT_SIZE
Ejemplo n.º 4
0
    def parse_k8s_memory_value(memory_value):
        """Parse and convert Kubernetes specific memory value

        :param memory_value: memory value from Kubernetes manifest
        :type memory_value: str
        :raises NotImplementedError: raised if value postfix is unknown
        :return: parsed memory value
        :rtype: int
        """

        # https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
        # https://medium.com/@betz.mark/understanding-resource-limits-in-kubernetes-memory-6b41e9a955f9
        _K8S_MEMORY_SUFFIXES_FIXED = ['E', 'P', 'T', 'G', 'M', 'K']
        _K8S_MEMORY_SUFFIXES_POWER = ['Ei', 'Pi', 'Ti', 'Gi', 'Mi', 'Ki']

        if type(memory_value) is str:
            # exponential notation e.g. 3e2 = 300
            if 'e' in memory_value:
                memory_value = float(memory_value)
            # check if power-of-two notation is used
            # it is important to check power-of-two first as fixed-point comparison would also match
            elif [
                    e for e in _K8S_MEMORY_SUFFIXES_POWER
                    if (e in memory_value)
            ]:
                if 'Ki' in memory_value:
                    memory_value = memory_value.strip('Ki')
                    memory_value = KiB(float(memory_value)).to_MB().value
                elif 'Mi' in memory_value:
                    memory_value = memory_value.strip('Mi')
                    memory_value = MiB(float(memory_value)).to_MB().value
                elif 'Gi' in memory_value:
                    memory_value = memory_value.strip('Gi')
                    memory_value = GiB(float(memory_value)).to_MB().value
                elif 'Ti' in memory_value:
                    memory_value = memory_value.strip('Ti')
                    memory_value = TiB(float(memory_value)).to_MB().value
                elif 'Pi' in memory_value:
                    memory_value = memory_value.strip('Ki')
                    memory_value = PiB(float(memory_value)).to_MB().value
                elif 'Ei' in memory_value:
                    memory_value = memory_value.strip('Ei')
                    memory_value = EiB(float(memory_value)).to_MB().value
                else:
                    raise NotImplementedError(
                        'Memory value unit of {} not implemented'.format(
                            memory_value))
            # check if fixed-point integer notation is used
            elif [
                    e for e in _K8S_MEMORY_SUFFIXES_FIXED
                    if (e in memory_value)
            ]:
                if 'M' in memory_value:
                    memory_value = memory_value.strip('M')
                elif 'K' in memory_value:
                    memory_value = memory_value.strip('K')
                    memory_value = kB(float(memory_value)).to_MB().value
                elif 'G' in memory_value:
                    memory_value = memory_value.strip('G')
                    memory_value = GB(float(memory_value)).to_MB().value
                elif 'T' in memory_value:
                    memory_value = memory_value.strip('T')
                    memory_value = TB(float(memory_value)).to_MB().value
                elif 'P' in memory_value:
                    memory_value = memory_value.strip('P')
                    memory_value = PB(float(memory_value)).to_MB().value
                elif 'E' in memory_value:
                    memory_value = memory_value.strip('E')
                    memory_value = EB(float(memory_value)).to_MB().value
                else:
                    raise NotImplementedError(
                        'Memory value unit of {} not implemented'.format(
                            memory_value))
        # direct definition in bytes - convert to MB
        else:
            memory_value = memory_value / float('1e+6')

        return int(memory_value)
Ejemplo n.º 5
0
 def create_disk(self, disk_name, size_in_gibs):
     size_in_bytes = int(GiB(size_in_gibs).to_Byte().value)
     link = Vhd.create_blank_vhd(self._storage_client, self._disk_container,
                                 disk_name + '.vhd', size_in_bytes)
     return link
Ejemplo n.º 6
0
def to_gib(value):
    b = parse_string_unsafe(value).to_Byte().bytes
    return GiB(bytes=b)
Ejemplo n.º 7
0
def configured_cluster_for_nodes(
    reactor, certificates, nodes, dataset_backend,
    dataset_backend_configuration, dataset_backend_config_file,
    provider=None, logging_config=None
):
    """
    Get a ``Cluster`` with Flocker services running on the right nodes.

    :param reactor: The reactor.
    :param Certificates certificates: The certificates to install on the
        cluster.
    :param nodes: The ``ManagedNode``s on which to operate.
    :param NamedConstant dataset_backend: The ``DatasetBackend`` constant
        representing the dataset backend that the nodes will be configured to
        use when they are "started".
    :param dict dataset_backend_configuration: The backend-specific
        configuration the nodes will be given for their dataset backend.
    :param FilePath dataset_backend_config_file: A FilePath that has the
        dataset_backend info stored.
    :param bytes provider: provider of the nodes - aws, rackspace, or managed.
    :param dict logging_config: A Python logging configuration dictionary,
        following the structure of PEP 391.

    :returns: A ``Deferred`` which fires with ``Cluster`` when it is
        configured.
    """
    # XXX: There is duplication between the values here and those in
    # f.node.agents.test.blockdevicefactory.MINIMUM_ALLOCATABLE_SIZES. We want
    # the default volume size to be greater than or equal to the minimum
    # allocatable size.
    #
    # Ideally, the minimum allocatable size (and perhaps the default volume
    # size) would be something known by an object that represents the dataset
    # backend. Unfortunately:
    #  1. There is no such object
    #  2. There is existing confusion in the code around 'openstack' and
    #     'rackspace'
    #
    # Here, we special-case Rackspace (presumably) because it has a minimum
    # allocatable size that is different from other Openstack backends.
    #
    # FLOC-2584 also discusses this.
    default_volume_size = GiB(1)
    if dataset_backend_configuration.get('auth_plugin') == 'rackspace':
        default_volume_size = RACKSPACE_MINIMUM_VOLUME_SIZE

    cluster = Cluster(
        all_nodes=pvector(nodes),
        control_node=nodes[0],
        agent_nodes=nodes,
        dataset_backend=dataset_backend,
        default_volume_size=int(default_volume_size.to_Byte().value),
        certificates=certificates,
        dataset_backend_config_file=dataset_backend_config_file
    )

    configuring = perform(
        make_dispatcher(reactor),
        configure_cluster(
            cluster, dataset_backend_configuration, provider, logging_config
        )
    )
    configuring.addCallback(lambda ignored: cluster)
    return configuring
Ejemplo n.º 8
0
class CIOBlockDeviceAPIInterfaceTests(
        make_iblockdeviceapi_tests(
            blockdevice_api_factory=(
                lambda test_case: GetCioApiWithCleanup(test_case)
            ),
            minimum_allocatable_size=int(GiB(8).to_Byte().value),
            device_allocation_unit=int(GiB(8).to_Byte().value),
            unknown_blockdevice_id_factory=lambda test: unicode(uuid4())
            )
):

        def test_create_volume_gold_profile(self):
           """
           Requesting ``gold`` profile during volume creation honors
           ``gold`` attributes.
           """
           self._assert_create_volume_with_mandatory_profile(
              MandatoryProfiles.GOLD)

        def test_create_too_large_volume_with_profile(self):
          """
          Create a volume so large that none of the ``MandatoryProfiles``
          can be assigned to it.
          """
          self.assertRaises(Exception,
                            self._assert_create_volume_with_mandatory_profile,
                            MandatoryProfiles.GOLD,
                            size_GiB=1024*1024)

        def test_create_volume_silver_profile(self):
           """
           Requesting ``silver`` profile during volume creation honors
           ``silver`` attributes.
           """
           self._assert_create_volume_with_mandatory_profile(
            MandatoryProfiles.SILVER)
        
        def test_create_too_large_volume_silver_profile(self):
           """
           Too large volume (> 64TiB) for ``silver`` profile.
           """
           self.assertRaises(Exception,
                          self._assert_create_volume_with_mandatory_profile,
                          MandatoryProfiles.SILVER,
                          size_GiB=1024*1024)

        def test_create_volume_bronze_profile(self):
           """
           Requesting ``bronze`` profile during volume creation honors
           ``bronze`` attributes.
           """
           self._assert_create_volume_with_mandatory_profile(
              MandatoryProfiles.BRONZE)

        def _assert_create_volume_with_mandatory_profile(self, profile,
                                                     created_profile=None,
                                                     size_GiB=4):
           """
           Volume created with given profile has the attributes
           expected from the profile.

           :param ValueConstant profile: Name of profile to use for creation.
           :param ValueConstant created_profile: Name of the profile volume is
            expected to be created with.
           :param int size_GiB: Size of volume to be created.
           """
           if created_profile is None:
              created_profile = profile
           volume1 = self.api.create_volume_with_profile(
              dataset_id=uuid4(),
              size=self.minimum_allocatable_size * size_GiB,
              profile_name=profile.value)

        """
        Interface adherence Tests for ``CIOBlockDeviceAPI``
        """
        class CIOBlockDeviceAPIImplementationTests(SynchronousTestCase):
            """
            Implementation specific tests for ``CIOBlockDeviceAPI``.
            """
            def test_cio_api(self):
                """
Ejemplo n.º 9
0
from bitmath import GiB
from flocker.node.agents.testtools import get_blockdeviceapi_with_cleanup
from flocker.node.agents.testtools import make_iblockdeviceapi_tests
from flocker.node.agents.testtools import make_icloudapi_tests
from flocker.node.agents.testtools import require_backend
import six


@require_backend('digitalocean_flocker_plugin')
def do_blockdeviceapi_for_test(test_case):
    return get_blockdeviceapi_with_cleanup(test_case)


MIN_ALLOCATION_SIZE = GiB(1).to_Byte().value

MIN_ALLOCATION_UNIT = GiB(1).to_Byte().value


class DigitalOceanBlockDeviceAPITests(
        make_iblockdeviceapi_tests(
            blockdevice_api_factory=do_blockdeviceapi_for_test,
            unknown_blockdevice_id_factory=lambda x: six.text_type(2147483647))
):
    """Functional tests for DigitalOcean ``IBlockDeviceAPI`` implementation
        """


class DigitalOceanCloudAPITests(
        make_icloudapi_tests(
            blockdevice_api_factory=do_blockdeviceapi_for_test)):
    """Functional tests for DigitalOcean ``ICloudAPI`` implementation"""
Ejemplo n.º 10
0
FAKE_CLUSTER_ID = UUID1_STR

VOL_NAME = '{}{}_{}'.format(
    driver.VOL_NAME_FLOCKER_PREFIX,
    UUID1_STR,
    UUID1_SLUG,
)
VOL_NAME_WITH_FAKE_CLUSTER_ID = '{}{}_{}'.format(
    driver.VOL_NAME_FLOCKER_PREFIX,
    UUID1_STR,
    UUID2_SLUG_FAKE,
)

BDV1 = BlockDeviceVolume(
    blockdevice_id=unicode(UUID1_STR),
    size=int(GiB(16).to_Byte().value),
    attached_to=None,
    dataset_id=UUID(UUID1_STR)
)

BDV3 = BlockDeviceVolume(
    blockdevice_id=unicode(UUID3_STR),
    size=int(GiB(32).to_Byte().value),
    attached_to=None,
    dataset_id=UUID(UUID3_STR)
)

IS_MULTIPATH_EXIST = 'ibm_storage_flocker_driver.lib.host_actions.' \
                     'HostActions.is_multipath_active'
LIST_VOLUMES = 'ibm_storage_flocker_driver.ibm_storage_blockdevice.' \
               'IBMStorageBlockDeviceAPI.list_volumes'
    def _gibytes_to_bytes(self, size):

        return int(GiB(size).to_Byte().value)
Ejemplo n.º 12
0
Archivo: gce.py Proyecto: teazj/flocker
    def list_volumes(self):
        """
        For operations that can return long lists of results, GCE will
        require you to page through the result set, retrieving one
        page of results for each query.  You are done paging when the
        returned ``pageToken`` is ``None``.
        """
        with start_action(
            action_type=u"flocker:node:agents:gce:list_volumes",
        ) as action:
            disks = []
            page_token = None
            done = False
            while not done:
                response = self._operations.list_disks(
                    page_size=self._page_size,
                    page_token=page_token,
                )

                disks.extend(
                    response.get('items', [])
                )

                page_token = response.get('nextPageToken')
                done = not page_token

            # 'description' will not even be in the dictionary if no
            # description was specified.
            def disk_in_cluster(disk):
                if disk['name'].startswith(_PREFIX):
                    if 'description' in disk:
                        return (disk['description'] ==
                                self._disk_resource_description())
                    else:
                        Message.log(
                            message_type=u'flocker:node:agents:gce:'
                                         u'list_volumes:suspicious_disk',
                            log_level=u'ERROR',
                            message=u'Disk missing description, yet name '
                                    u'appears as if it came from the flocker '
                                    u'GCE dataset backend.',
                            disk=disk
                        )
                        return False
                return False

            ignored_volumes = []
            cluster_volumes = []
            for disk in disks:
                if disk_in_cluster(disk):
                    cluster_volumes.append(
                        BlockDeviceVolume(
                            blockdevice_id=unicode(disk['name']),
                            size=int(GiB(int(disk['sizeGb'])).to_Byte()),
                            attached_to=_extract_attached_to(disk),
                            dataset_id=_blockdevice_id_to_dataset_id(
                                disk['name'])
                        )
                    )
                else:
                    ignored_volumes.append(
                        {'name': disk['name'],
                         'description': disk.get('description')})

            Message.log(
                message_type=u'flocker:node:agents:gce:list_volumes:ignored',
                ignored_volumes=ignored_volumes
            )
            action.add_success_fields(
                cluster_volumes=list(
                    {
                        'blockdevice_id': v.blockdevice_id,
                        'size': v.size,
                        'attached_to': v.attached_to,
                        'dataset_id': unicode(v.dataset_id),
                    } for v in cluster_volumes)
            )
            return cluster_volumes
class DigitalOceanDeviceAPI(object):
    """
    A block device implementation for DigitalOcean block storage.

    The following limitation apply:

    - You need separate flocker clusters per region because volumes cannot be
      moved between regions.
    - Only five volumes can be attached to a droplet at any given time.
    - It is possible for multiple flocker clusters to coexist, but they must
      not share dataset IDs.

    :ivar six.text_type _cluster_id: ID of the cluster
    :ivar Manager _manager: The DO manager object
    :ivar Metadata _metadata: Metadata of the node running the agent (nullable)
    :ivar float _poll: Interval for polling state changes of actions, in seconds
    :ivar float _timeout: Maximum duration to wait for an action to complete
    """

    _ONE_GIB = int(GiB(1).to_Byte().value)  # This constant is used for the
    #                                         allocation unit

    _PREFIX = six.text_type("flocker-v1-")  # Prefix for volume IDs

    # We reassign the Volume and Action class as attributes to help
    # ergonomics in our test suite.
    Volume = Vol
    Action = Act

    def __init__(self, cluster_id, token):
        self._cluster_id = six.text_type(cluster_id)
        self._manager = Manager(token=token)
        self._metadata = None
        self._poll = 1
        self._timeout = 60

    @property
    def metadata(self):
        """The metadata of the node running the agent. Lazily resolved
        :return: The metadata object describing the node.
        :rtype: Metadata
        """
        if not self._metadata:
            self._metadata = Metadata()
        if not self._metadata.droplet_id:
            with start_action(action_type=six.text_type(
                    "flocker:node:agents:do:load_metadata")) as a:
                self._metadata.load()
                a.add_success_fields(
                    droplet_metadata={
                        'droplet_id': self._metadata.droplet_id,
                        'hostname': self._metadata.hostname,
                        'region': self._metadata.region
                    })
        return self._metadata

    @property
    def volume_description(self):
        """ Returns the description this flocker cluster should use

        :return: The cluster ID property string to use as a description
        :rtype: six.text_type
        """
        return six.text_type("flocker-v1-cluster-id: {cluster_id}").format(
            cluster_id=self._cluster_id)

    def allocation_unit(self):
        return self._ONE_GIB

    def compute_instance_id(self):
        return six.text_type(self.metadata.droplet_id)

    def _get_volume(self, blockdevice_id):
        """Return the DigitalOcean volume associated with this block device ID

        :param blockdevice_id: The block device ID to look up
        :return: A ``digitalocean.Volume`` instance describing the block device
        :rtype: digitalocean.Volume.Volume
        """
        with start_action(
                action_type=six.text_type("flocker:node:agents:do:get_volume"),
                blockdevice_id=blockdevice_id) as a:
            vol = self._manager.get_volume(blockdevice_id)
            a.add_success_fields(
                volume={
                    'name': vol.name,
                    'region': vol.region["slug"],
                    'description': vol.description,
                    'attached_to': vol.droplet_ids
                })
            return vol

    @classmethod
    def _unmangle_dataset(cls, vol_name):
        """Unmangles the flocker dataset from a digital ocean volume name

        :param vol_name: The name of the digitalocean volume
        :return: The dataset UUID encoded therein or None, if not a flocker
                 volume
        :rtype: UUID
        """
        if vol_name and vol_name.startswith(cls._PREFIX):
            return UUID(vol_name[len(cls._PREFIX):])
        return None

    @classmethod
    def _mangle_dataset(cls, dataset_id):
        """Mangles a flocker dataset UUID into a digital ocean volume name.

        :param dataset_id: The UUID of the dataset
        :return: The volumen name to use for the digitalocean volume
        """
        return cls._PREFIX + dataset_id.hex

    @staticmethod
    def _to_block_device_volume(do_volume):
        """Turns a digitalocean volume description into a flocker one

        :param do_volume: The digital ocean volume
        :type do_volume: digitalocean.Volume.Volume
        :return: The corresponding BlockDeviceVolume
        :rtype: BlockDeviceVolume
        """
        size = int(GiB(do_volume.size_gigabytes).to_Byte().value)
        attached = None
        if do_volume.droplet_ids:
            attached = six.text_type(do_volume.droplet_ids[0])
        dataset = DigitalOceanDeviceAPI._unmangle_dataset(do_volume.name)

        return BlockDeviceVolume(blockdevice_id=six.text_type(do_volume.id),
                                 size=size,
                                 attached_to=attached,
                                 dataset_id=dataset)

    def _categorize_do_volume(self, result_dict, vol):
        """ Reduce function to categorise whether a volume is usable.
        :param result_dict: A dictionary with three keys: ignored,
                            wrong_cluster, and okay
        :type result_dict: dict[str, list[digitalocean.Volume.Volume]]
        :param vol: A digitalocean volume
        :type vol: digitalocean.Volume.Volume
        :return: The result_dict with vol sorted into the correct slot
        :rtype: dict[str, list[digitalocean.Volume.Volume]]
        """
        if not six.text_type(vol.name).startswith(self._PREFIX):
            result_dict["ignored"].append(vol)
        elif six.text_type(vol.description) != self.volume_description:
            result_dict["wrong_cluster"].append(vol)
        else:
            result_dict["okay"].append(vol)
        return result_dict

    def _await_action_id(self, action_id):
        """Waits for an operation (specified by it's id) to complete

        :param action_id: The identifier of the action
        :type action_id: int
        :return: Whether the action was successful
        :rtype: bool
        """
        action = self.Action.get_object(self._manager.token, action_id)
        return self._await_action(action)

    def _await_action(self, action):
        """Waits for an operation to complete

        :param action: A action object to operate on
        :type action: ``digitalocean.Action.Action``
        :return: Whether the action was successful
        :rtype: bool
        """
        if action and action.status == 'completed':
            return True
        elif not action:
            return False
        with start_action(
                action_type=six.text_type('flocker:node:agents:do:await'),
                do_action_type=action.type,
                do_action_id=action.id) as ac:
            i = self._iterations_until(
                lambda x: not x or x.status != 'in-progress',
                lambda x: x.load_directly(), (action, ))

            if action.status == 'completed':
                ac.add_success_fields(iterations=i,
                                      do_action_status='completed')
            else:
                Message.log(message_type=six.text_type(
                    'flocker:node:agents:do:await:err'),
                            log_level=six.text_type('ERROR'),
                            message=six.text_type('Wait unsuccesful'),
                            iterations=i,
                            do_action_status=action.status)
                if action.status == 'in-progress':
                    raise DOException('Wait timeout')
                else:
                    raise DOException(
                        six.text_type('Action failed ({r})').format(
                            r=action.status))

        return action and action.status == 'completed'

    def _iterations_until(self, completed, update_state, argument):
        """ Poll for a state change to complete callable-s

        :param completed: A callable accepting argument, returning true if the
        state change has successfully completed.
        :param update_state: The action to execute in order to poll for a state
        change
        :param argument: The arguments on which to execute both the check and
        the action. Probably a tuple.
        :return: The number of iterations taken
        :rtype: int
        """

        if completed(*argument):
            return 0
        s = scheduler(time.time, time.sleep)
        i = 0
        started_at = time.time()
        while not completed(*argument) and not self._has_timed_out(started_at):
            delta = max(
                0, min(self._poll, self._timeout - (time.time() - started_at)))
            s.enter(delta, 0, update_state, argument)
            s.run()
            i += 1
        return i

    def _has_timed_out(self, from_time):
        """ Compare the current time to from_time and check for timeout.

        :param from_time: The time when the operaiton was started
        :return: Whether a timeout has occurred
        :rtype: bool
        """
        return time.time() - from_time >= self._timeout

    def list_volumes(self):
        with start_action(action_type=six.text_type(
                "flocker:node:agents:do:list_volumes")) as a:
            res = reduce(
                self._categorize_do_volume, self._manager.get_all_volumes(),
                dict(wrong_cluster=list(), ignored=list(), okay=list()))

            if res["ignored"]:
                ty = six.text_type(
                    "flocker:node:agents:do:list_volumes:ignored")
                msg = six.text_type("Ignored {num} unrelated volumes").format(
                    num=len(res["ignored"]))
                Message.log(message_type=ty,
                            log_level=six.text_type("INFO"),
                            message=msg,
                            ignored_volumes=res["ignored"])

            if res["wrong_cluster"]:
                ty = six.text_type("flocker:node:agents:do") \
                     + six.text_type(":list_volumes:suspicious_disk")
                msg = six.text_type("Volume follows naming convention but") \
                    + six.text_type(" is not owned by our cluster.")
                for volume in res["wrong_cluster"]:
                    Message.log(message_type=ty,
                                log_level=six.text_type("ERROR"),
                                message=msg,
                                volume=volume.name,
                                description=volume.description)

            volumes = map(self._to_block_device_volume, res["okay"])
            a.add_success_fields(cluster_volumes=list(
                {
                    'blockdevice_id': v.blockdevice_id,
                    'size': v.size,
                    'attached_to': v.attached_to,
                    'dataset_id': six.text_type(v.dataset_id),
                } for v in volumes))
            return volumes

    def create_volume(self, dataset_id, size):
        gib = Byte(size).to_GiB()
        with start_action(action_type=six.text_type(
                "flocker:node:agents:do:create_volume"),
                          dataset_id=six.text_type(dataset_id),
                          size=size) as a:
            vol = self.Volume(token=self._manager.token)
            vol.name = self._mangle_dataset(dataset_id)
            vol.size_gigabytes = int(gib.value)
            vol.region = self.metadata.region
            vol.description = self.volume_description
            vol.create()
            a.add_success_fields(volume={
                'blockdevice_id': vol.id,
                'region': vol.region
            })
            return self._to_block_device_volume(vol)

    def destroy_volume(self, blockdevice_id):
        with start_action(action_type=six.text_type(
                "flocker:node:agents:do:destroy_volume"),
                          blockdevice_id=blockdevice_id):
            try:
                vol = self._get_volume(blockdevice_id)
                if vol.droplet_ids:
                    # need to detach prior to deletion
                    ty = six.text_type('flocker:node:agents:do') + \
                         six.text_type(':destroy:detach_needed')
                    Message.log(message_type=ty,
                                log_level=six.text_type('INFO'),
                                message=six.text_type(
                                    'Volume needs to be detached first'),
                                volume=vol.id,
                                attached_to=vol.droplet_ids[0])
                    r = vol.detach(vol.droplet_ids[0], vol.region['slug'])
                    self._await_action_id(r['action']['id'])

                vol.destroy()
            except NotFoundError as _:
                raise UnknownVolume(blockdevice_id)

    def attach_volume(self, blockdevice_id, attach_to):
        with start_action(action_type=six.text_type(
                "flocker:node:agents:do:attach_volume"),
                          blockdevice_id=blockdevice_id,
                          droplet_id=attach_to):
            try:
                vol = self._get_volume(blockdevice_id)
                if vol.droplet_ids:
                    raise AlreadyAttachedVolume(blockdevice_id)
                r = vol.attach(attach_to, vol.region["slug"])
                if self._await_action_id(r['action']['id']):
                    vol.droplet_ids = [attach_to]
                return self._to_block_device_volume(vol)
            except NotFoundError as _:
                raise UnknownVolume(blockdevice_id)

    def detach_volume(self, blockdevice_id):
        with start_action(action_type=six.text_type(
                "flocker:node:agents:do:detach_volume"),
                          blockdevice_id=blockdevice_id) as a:
            try:
                vol = self._get_volume(blockdevice_id)
                if not vol.droplet_ids:
                    raise UnattachedVolume(blockdevice_id)
                detach_from = vol.droplet_ids[0]
                region = vol.region["slug"]
                r = vol.detach(detach_from, region)

                if self._await_action_id(r['action']['id']):
                    vol.droplet_ids = None
                a.add_success_fields(detached_from={
                    'droplet_id': detach_from,
                    'region': region
                })
                return self._to_block_device_volume(vol)
            except NotFoundError as _:
                raise UnknownVolume(blockdevice_id)

    def get_device_path(self, blockdevice_id):
        try:
            vol = self._get_volume(blockdevice_id)
            path = FilePath(
                six.text_type("/dev/disk/by-id/scsi-0DO_Volume_{name}").format(
                    name=vol.name))

            # Even if we are not attached, the agent needs to know the
            # expected path for the convergence algorithm
            # FIXME: The functional tests seem to indicate otherwise
            if not vol.droplet_ids:
                # return path
                raise UnattachedVolume(blockdevice_id)

            # But if we are attached, we might need to resolve the symlink
            # noinspection PyBroadException
            try:
                return path.realpath()
            except Exception as _:
                return path

        except NotFoundError as _:
            raise UnknownVolume(blockdevice_id)

    def list_live_nodes(self):
        return map(
            lambda x: six.text_type(x.id),
            filter(lambda x: x.status == 'active',
                   self._manager.get_all_droplets()))

    def start_node(self, compute_instance_id):
        droplet = self._manager.get_droplet(compute_instance_id)
        if droplet.status != 'active':
            action = droplet.power_on(return_dict=False)
            self._await_action(action)
Ejemplo n.º 14
0
from klein import Klein

from ..restapi import structured
from ..control._config import dataset_id_from_name
from ..apiclient import DatasetAlreadyExists

SCHEMA_BASE = FilePath(__file__).sibling(b'schema')
SCHEMAS = {
    b'/types.json':
    yaml.safe_load(SCHEMA_BASE.child(b'types.yml').getContent()),
    b'/endpoints.json':
    yaml.safe_load(SCHEMA_BASE.child(b'endpoints.yml').getContent()),
}

# The default size of a created volume:
DEFAULT_SIZE = int(GiB(100).to_Byte().value)


def _endpoint(name):
    """
    Decorator factory for API endpoints, adding appropriate JSON in/out
    encoding.

    :param unicode name: The name of the endpoint in the schema.

    :return: Decorator for a method.
    """
    def decorator(f):
        @wraps(f)
        @structured(
            inputSchema={},
Ejemplo n.º 15
0
 def allocation_unit(self):
     """
     Return a fixed allocation_unit for now; one which we observe
     to work on AWS.
     """
     return int(GiB(1).to_Byte().value)
Ejemplo n.º 16
0
SCHEMAS = {
    b'/types.json': yaml.safe_load(
        SCHEMA_BASE.child(b'types.yml').getContent()),
    b'/endpoints.json': yaml.safe_load(
        SCHEMA_BASE.child(b'endpoints.yml').getContent()),
    }
# Metadata field we use to store volume names:
NAME_FIELD = u"name"

# The default size of a created volume. Pick a number that isn't the same
# as devicemapper loopback size (100GiB) so we don't trigger
# https://clusterhq.atlassian.net/browse/FLOC-2889 and that is large
# enough to hit Rackspace minimums. This is, obviously, not ideal.
DEFAULT_SIZE = RACKSPACE_MINIMUM_VOLUME_SIZE
if DEFAULT_SIZE == DEVICEMAPPER_LOOPBACK_SIZE:
    DEFAULT_SIZE = DEFAULT_SIZE + GiB(1)


# A function to help parse size expressions for size opt
def parse_num(expression):
    """
    Parse a string of a dataset size 10g, 100kib etc into
    a usable integer.
    If user doesn't submit a correct size, give back
    the default size.

    :param expression: the dataset expression to parse.
    """
    if not expression:
        return DEFAULT_SIZE
    if type(expression) is unicode:
Ejemplo n.º 17
0
def test_sbatch_arguments_missing_required():
    with pytest.raises(ValueError):
        SbatchArguments(params=AllocationParameters(nodes=1,
                                                    cores=2,
                                                    memory_per_node=GiB(1)))
Ejemplo n.º 18
0
            'Please set FLOCKER_FUNCTIONAL_TEST environment variable to '
            'run storage backend functional tests.'
        )

    try:
        api = get_blockdeviceapi(provider)
    except InvalidConfig as e:
        raise SkipTest(str(e))
    test_case.addCleanup(detach_destroy_volumes, api)
    return api


DEVICE_ALLOCATION_UNITS = {
    # Our redhat-openstack test platform uses a ScaleIO backend which
    # allocates devices in 8GiB intervals
    'redhat-openstack': GiB(8),
}


def get_device_allocation_unit():
    """
    Return a provider specific device allocation unit.

    This is mostly OpenStack / Cinder specific and represents the
    interval that will be used by Cinder storage provider i.e
    You ask Cinder for a 1GiB or 7GiB volume.
    The Cinder driver creates an 8GiB block device.
    The operating system sees an 8GiB device when it is attached.
    Cinder API reports a 1GiB or 7GiB volume.

    :returns: An ``int`` allocation size in bytes for a
class HedvigProfiledBlockDeviceAPIInterfaceTests(
        make_iprofiledblockdeviceapi_tests(profiled_blockdevice_api_factory=(
            lambda test_case: GetTestHedvigStorage(test_case)),
                                           dataset_size=int(
                                               GiB(1).to_Byte().value))):
    """
Ejemplo n.º 20
0
            'Please set FLOCKER_FUNCTIONAL_TEST environment variable to '
            'run storage backend functional tests.'
        )

    try:
        api = get_blockdeviceapi(provider)
    except InvalidConfig as e:
        raise SkipTest(str(e))
    test_case.addCleanup(detach_destroy_volumes, api)
    return api


DEVICE_ALLOCATION_UNITS = {
    # Our redhat-openstack test platform uses a ScaleIO backend which
    # allocates devices in 8GiB intervals
    'redhat-openstack': GiB(8),
}


def get_device_allocation_unit():
    """
    Return a provider specific device allocation unit.

    This is mostly OpenStack / Cinder specific and represents the
    interval that will be used by Cinder storage provider i.e
    You ask Cinder for a 1GiB or 7GiB volume.
    The Cinder driver creates an 8GiB block device.
    The operating system sees an 8GiB device when it is attached.
    Cinder API reports a 1GiB or 7GiB volume.

    :returns: An ``int`` allocation size in bytes for a
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################

from uuid import uuid4
from bitmath import GiB, MiB
from flocker.node.agents.test.test_blockdevice import (
    make_iblockdeviceapi_tests, )
from ibm_storage_flocker_driver.testtools_ibm_storage_flocker_driver import \
    get_ibm_storage_blockdevice_api_for_test

# Smallest volume to create in tests
MIN_ALLOCATION_SIZE = int(GiB(1).to_Byte().value)

# Minimum unit of volume allocation
MIN_ALLOCATION_UNIT = int(MiB(1).to_Byte().value)


class IBMStorageBlockDeviceAPITests(
        make_iblockdeviceapi_tests(
            blockdevice_api_factory=(
                lambda test_case: get_ibm_storage_blockdevice_api_for_test(
                    uuid4(), test_case)),
            minimum_allocatable_size=MIN_ALLOCATION_SIZE,
            device_allocation_unit=MIN_ALLOCATION_UNIT,
            unknown_blockdevice_id_factory=lambda test: unicode(uuid4()))):
    """
    Basic interface tests for ``IBMStorageBlockDeviceAPITests``
Ejemplo n.º 22
0
from ..control._config import dataset_id_from_name
from ..apiclient import DatasetAlreadyExists

SCHEMA_BASE = FilePath(__file__).sibling(b'schema')
SCHEMAS = {
    b'/types.json':
    yaml.safe_load(SCHEMA_BASE.child(b'types.yml').getContent()),
    b'/endpoints.json':
    yaml.safe_load(SCHEMA_BASE.child(b'endpoints.yml').getContent()),
}

# The default size of a created volume. Pick a number that isn't the same
# as devicemapper loopback size (100GiB) so we don't trigger
# https://clusterhq.atlassian.net/browse/FLOC-2889 and that is large
# enough to hit Rackspace minimums. This is, obviously, not ideal.
DEFAULT_SIZE = int(GiB(75).to_Byte().value)


def _endpoint(name, ignore_body=False):
    """
    Decorator factory for API endpoints, adding appropriate JSON in/out
    encoding.

    :param unicode name: The name of the endpoint in the schema.
    :param ignore_body: If true, ignore the contents of the body for all
        HTTP methods, including ``POST``. By default the body is only
        ignored for ``GET`` and ``HEAD``.

    :return: Decorator for a method.
    """
    def decorator(f):
Ejemplo n.º 23
0
from twisted.internet.error import ConnectionDone
from twisted.internet import reactor
from twisted.trial.unittest import SynchronousTestCase, SkipTest
from twisted.internet.protocol import Factory, ProcessProtocol, Protocol
from twisted.test.proto_helpers import MemoryReactor
from twisted.python.procutils import which
from twisted.trial.unittest import TestCase
from twisted.python.logfile import LogFile

from .. import __version__
from ..common.script import (FlockerScriptRunner, ICommandLineScript)

# This is currently set to the minimum size for a SATA based Rackspace Cloud
# Block Storage volume. See:
# * http://www.rackspace.com/knowledge_center/product-faq/cloud-block-storage
REALISTIC_BLOCKDEVICE_SIZE = int(GiB(100).to_Byte().value)


@implementer(IProcessTransport)
class FakeProcessTransport(object):
    """
    Mock process transport to observe signals sent to a process.

    @ivar signals: L{list} of signals sent to process.
    """
    def __init__(self):
        self.signals = []

    def signalProcess(self, signal):
        self.signals.append(signal)
Ejemplo n.º 24
0
    IFlockerAPIV1Client, FakeFlockerClient, Dataset, DatasetAlreadyExists,
    DatasetState, FlockerClient, ResponseError, _LOG_HTTP_REQUEST,
    Lease, LeaseAlreadyHeld,
)
from ...ca import rest_api_context_factory
from ...ca.testtools import get_credential_sets
from ...testtools import find_free_port
from ...control._persistence import ConfigurationPersistenceService
from ...control._clusterstate import ClusterStateService
from ...control.httpapi import create_api_service
from ...control import NodeState, NonManifestDatasets, Dataset as ModelDataset
from ...restapi._logging import JSON_REQUEST
from ...restapi import _infrastructure as rest_api


DATASET_SIZE = int(GiB(1).to_Byte().value)


def make_clientv1_tests():
    """
    Create a ``TestCase`` for testing ``IFlockerAPIV1Client``.

    The presumption is that the state of datasets is completely under
    control of this process. So when testing a real client it will be
    talking to a in-process server.

    The ``TestCase`` should have two 0-argument methods:

    create_client: Returns a ``IFlockerAPIV1Client`` provider.
    synchronize_state: Make state match the configuration.
    """
def vmax_allocation_unit(size_in_gb):
    return EMCVmaxBlockDeviceAPI.vmax_round_allocation(
        int(GiB(size_in_gb).to_Byte().value))