Exemplo n.º 1
0
def test_node_tunnel_fall_back_when_local_port_taken():
    """Checks that a tunnel will fall back to a random port if local port is
        taken."""
    user = USER_53
    with ExitStack() as stack:
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(reset_environment(user))
        stack.enter_context(set_password(get_test_user_password(user)))

        cluster = show_cluster(name=TEST_CLUSTER)
        nodes = cluster.allocate_nodes(nodes=1,
                                       cores=1,
                                       memory_per_node=MiB(100),
                                       walltime=Walltime(minutes=30))
        stack.enter_context(cancel_on_exit(nodes))

        node = nodes[0]
        nodes.wait(timeout=SLURM_WAIT_TIMEOUT)

        there = get_free_remote_port(node=node)
        here = get_free_local_port()

        tunnel_1 = node.tunnel(there=there, here=here)
        stack.enter_context(close_tunnel_on_exit(tunnel_1))
        assert tunnel_1.here == here

        tunnel_2 = node.tunnel(there=there, here=here)
        stack.enter_context(close_tunnel_on_exit(tunnel_2))
        assert tunnel_2.here != here
Exemplo n.º 2
0
    def test_file_logging_rotation_5_files(self):
        """
        Only 5 logfiles are kept.
        """
        logfile = FilePath(self.mktemp()).child('foo.log')
        logfile.parent().makedirs()
        # This file will become foo.log.1
        with logfile.open('w') as f:
            f.write(b'0')
            f.truncate(int(MiB(100).to_Byte().value))
        # These file extensions will be incremented
        for i in range(1, 5):
            sibling = logfile.sibling(logfile.basename() + u'.' + unicode(i))
            with sibling.open('w') as f:
                f.write(bytes(i))

        d = self.run_script(EliotScript, options=['--logfile', logfile.path])

        def verify_logfiles(stdout_messages, logfile):
            logfile_dir = logfile.parent()
            self.assertEqual(
                # The contents of the files will now be an integer one less
                # than the integer in the file name.
                map(bytes, range(0, 4)),
                list(
                    logfile_dir.child('foo.log.{}'.format(i)).open().read(1)
                    for i in range(1, 5)))

        d.addCallback(verify_logfiles, logfile=logfile)

        return d
    def upload(self, files, dataset_id, blocking=True):
        """
        A function which uploads the given json files to the given dataset ID using the ingestor.

        Args:
            files (list): A list of strings which are the full path and names of the files being uploaded.
            dataset_id (str): The dataset ID that is being uploaded to.
            blocking (bool): Whether or not to block and wait for a report of the upload success or failure.

        Returns:
            response (str): The response from the Experience platform stating whether a batch succeeded or failed.
        """
        if not self.validate(dataset_id):
            exit(0)
        batch_id = self.ingestor.start_batch(dataset_id, self.ims_org,
                                             self.access_token, self.api_key)
        for fileName in files:
            if os.path.getsize(fileName) <= MiB(256).to_Byte():
                self.ingestor.upload(fileName, batch_id, dataset_id,
                                     self.ims_org, self.access_token,
                                     self.api_key)
            else:
                self.ingestor.upload_large(fileName, batch_id, dataset_id,
                                           self.ims_org, self.access_token,
                                           self.api_key)
        return self.ingestor.finish_upload(batch_id, self.ims_org,
                                           self.access_token, self.api_key,
                                           self.cataloguer, blocking)
Exemplo n.º 4
0
def check_remote_key_and_node_access(stack: ExitStack, user: str):
    public_key_value = get_public_key_value()

    cluster = show_cluster(name=TEST_CLUSTER)
    node = cluster.get_access_node()
    with set_password(get_test_user_password(user)):
        assert node.run('whoami') == user
    assert node.run('whoami') == user

    node.run("grep '{public_key_value}' ~/.ssh/authorized_keys".format(
        public_key_value=public_key_value))

    with pytest.raises(RuntimeError):
        node.run(
            "grep '{public_key_value}' ~/.ssh/authorized_keys.idact".format(
                public_key_value=public_key_value))

    nodes = cluster.allocate_nodes(nodes=2,
                                   cores=1,
                                   memory_per_node=MiB(100),
                                   walltime=Walltime(minutes=30))
    stack.enter_context(cancel_on_exit(nodes))
    print(nodes)

    nodes.wait(timeout=SLURM_WAIT_TIMEOUT)
    node.run("grep '{public_key_value}' ~/.ssh/authorized_keys.idact".format(
        public_key_value=public_key_value))

    # Access to node without password works.
    assert nodes[0].run('whoami') == user

    check_direct_access_from_access_node_does_not_work(nodes[0])
Exemplo n.º 5
0
def test_remove_runtime_dir_test():
    user = USER_15
    with ExitStack() as stack:
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(reset_environment(user))
        stack.enter_context(set_password(get_test_user_password(user)))

        cluster = show_cluster(name=TEST_CLUSTER)
        nodes = cluster.allocate_nodes(nodes=1,
                                       cores=1,
                                       memory_per_node=MiB(100),
                                       walltime=Walltime(minutes=30))
        stack.enter_context(cancel_on_exit(nodes))
        node = nodes[0]
        try:
            nodes.wait(timeout=SLURM_WAIT_TIMEOUT)
            assert nodes.running()

            check_will_remove_empty(node=node)
            check_will_ignore_non_existent(node=node)
            check_will_remove_files(node=node)
            check_will_not_remove_dotfiles(node=node)
            check_will_not_remove_nested_dirs(node=node)
        finally:
            node.run("rm -rf *")
Exemplo n.º 6
0
def test_dask_deployment_with_redeploy_failure():
    user = USER_42
    with ExitStack() as stack:
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(reset_environment(user))
        stack.enter_context(set_password(get_test_user_password(user)))

        cluster = show_cluster(name=TEST_CLUSTER)
        nodes = cluster.allocate_nodes(nodes=2,
                                       cores=1,
                                       memory_per_node=MiB(100),
                                       walltime=Walltime(minutes=30))
        stack.enter_context(cancel_on_exit(nodes))

        assert idact.detail.dask.deploy_dask_impl.validate_worker
        stored_validate_worker = \
            idact.detail.dask.deploy_dask_impl.validate_worker

        def fake_validate_worker(worker: DaskWorkerDeployment):
            print("Fake worker validation.")
            raise ValueError("Fake worker validation fail.")

        try:
            idact.detail.dask.deploy_dask_impl.validate_worker = \
                fake_validate_worker

            with pytest.raises(RuntimeError):
                with deploy_dask_on_testing_cluster(nodes):
                    pass

        finally:
            idact.detail.dask.deploy_dask_impl.validate_worker = \
                stored_validate_worker
Exemplo n.º 7
0
def test_node_tunnel_stress():
    user = USER_40
    with ExitStack() as stack:
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(reset_environment(user))
        stack.enter_context(set_password(get_test_user_password(user)))

        cluster = show_cluster(name=TEST_CLUSTER)
        nodes = cluster.allocate_nodes(nodes=1,
                                       cores=1,
                                       memory_per_node=MiB(100))
        stack.enter_context(cancel_on_exit(nodes))
        run_tunnel_stress_test(stack=stack, user=user, nodes=nodes)
Exemplo n.º 8
0
def test_node_tunnel():
    """Allocates a node and creates a tunnel."""
    user = USER_5
    with ExitStack() as stack:
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(reset_environment(user))
        stack.enter_context(set_password(get_test_user_password(user)))

        cluster = show_cluster(name=TEST_CLUSTER)
        nodes = cluster.allocate_nodes(nodes=1,
                                       cores=1,
                                       memory_per_node=MiB(100),
                                       walltime=Walltime(minutes=30))
        run_tunnel_test(user=user, nodes=nodes)
Exemplo n.º 9
0
 def to_scaleio_size(size_mb):
     """
     :param int size_mb: The size of the volume in megabytes
         that has not been reduced to ScaleIO restrictions.
     :return int: A granularity of ALLOCATION_GRANULARITY in MBytes
     """
     # Should we use self.allocation_unit()
     # instead of ALLOCATION_GRANULARITY?
     div = round(int(MiB(size_mb).to_GiB().value) / ALLOCATION_GRANULARITY)
     if div > 0:
         return size_mb
     else:
         # Minimum is 8GB
         # should we use self.allocation_unit()?
         return int(GiB(8).to_MiB().value)
Exemplo n.º 10
0
def test_node_tunnel_fall_back_when_local_port_free_but_fails():
    """Checks that a tunnel will fall back to a random port if local port is
        is initially free, but tunnel cannot be created anyway (e.g. another
        process binds to it at the last moment)."""
    user = USER_54
    with ExitStack() as stack:
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(reset_environment(user))
        stack.enter_context(set_password(get_test_user_password(user)))

        cluster = show_cluster(name=TEST_CLUSTER)
        nodes = cluster.allocate_nodes(nodes=1,
                                       cores=1,
                                       memory_per_node=MiB(100),
                                       walltime=Walltime(minutes=30))
        stack.enter_context(cancel_on_exit(nodes))

        node = nodes[0]
        nodes.wait(timeout=SLURM_WAIT_TIMEOUT)

        there = get_free_remote_port(node=node)
        here = get_free_local_port()

        real_build_tunnel = idact.detail.nodes.node_impl.build_tunnel
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        tries = [0]

        def fake_build_tunnel(*args, **kwargs) -> TunnelInternal:
            tries[0] += 1
            if tries[0] == 1:
                raise RuntimeError("Fake failure.")
            if tries[0] != 2:
                assert False

            return real_build_tunnel(*args, **kwargs)

        try:
            idact.detail.nodes.node_impl.build_tunnel = fake_build_tunnel
            tunnel = node.tunnel(there=there, here=here)
            stack.enter_context(close_tunnel_on_exit(tunnel))
            assert tries[0] == 2
            assert tunnel.here != here
        finally:
            idact.detail.nodes.node_impl.build_tunnel = real_build_tunnel
            sock.close()
Exemplo n.º 11
0
def test_jupyter_deployment():
    user = USER_6
    with ExitStack() as stack:
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(reset_environment(user))
        stack.enter_context(set_password(get_test_user_password(user)))

        cluster = show_cluster(name=TEST_CLUSTER)
        nodes = cluster.allocate_nodes(nodes=1,
                                       cores=1,
                                       memory_per_node=MiB(100),
                                       walltime=Walltime(minutes=30))
        stack.enter_context(cancel_on_exit(nodes))

        with deploy_jupyter(nodes):
            pass
Exemplo n.º 12
0
 def test_logfile_override(self):
     """
     If `--logfile` is supplied, the Eliot logging destination wraps
     ``twisted.python.logfile.LogFile``.
     """
     options = self.options()
     # The command may otherwise give a UsageError
     # "Wrong number of arguments." if there are arguments required.
     # See https://clusterhq.atlassian.net/browse/FLOC-184 about a solution
     # which does not involve patching.
     self.patch(options, "parseArgs", lambda: None)
     expected_path = FilePath(self.mktemp()).path
     options.parseOptions(['--logfile={}'.format(expected_path)])
     logfile = options.eliot_destination.file
     self.assertEqual(
         (LogFile, expected_path, int(MiB(100).to_Byte().value), 5),
         (logfile.__class__, logfile.path, logfile.rotateLength,
          logfile.maxRotatedFiles))
Exemplo n.º 13
0
def test_node_tunnel_public_key():
    """Allocates a node and creates a tunnel, uses public key authentication.
    """
    user = USER_13
    with ExitStack() as stack:
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(
            reset_environment(user=user, auth=AuthMethod.PUBLIC_KEY))

        cluster = show_cluster(name=TEST_CLUSTER)

        with set_password(get_test_user_password(user)):
            nodes = cluster.allocate_nodes(nodes=1,
                                           cores=1,
                                           memory_per_node=MiB(100),
                                           walltime=Walltime(minutes=30))
        run_tunnel_test(user=user, nodes=nodes)
Exemplo n.º 14
0
def test_able_to_reach_nodes_when_using_password_based_authentication():
    """It should be possible to connect to compute nodes even when using
        password-based authentication, because local public key is authorized
        for the compute nodes after initial connection.
        However, direct connection from access node should fail.
        Password is still used between the client and the access node."""
    user = USER_10
    with ExitStack() as stack:
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(reset_environment(user=user, auth=AuthMethod.ASK))
        stack.enter_context(set_password(get_test_user_password(user)))
        stack.enter_context(disable_pytest_stdin())
        cluster = show_cluster(TEST_CLUSTER)
        node = cluster.get_access_node()

        nodes = cluster.allocate_nodes(nodes=2,
                                       cores=1,
                                       memory_per_node=MiB(100),
                                       walltime=Walltime(minutes=30))
        stack.enter_context(cancel_on_exit(nodes))
        print(nodes)

        nodes.wait(timeout=SLURM_WAIT_TIMEOUT)

        compute_node = nodes[0]
        assert isinstance(compute_node, NodeInternal)

        public_key_value = get_public_key_value()

        # Local key was installed for the deployed sshd, allowing access
        # between the access node and compute nodes.
        assert nodes[0].run('whoami') == user

        # Local key was not installed for the access node
        with pytest.raises(RuntimeError):
            node.run("grep '{public_key_value}' ~/.ssh/authorized_keys".format(
                public_key_value=public_key_value))

        # But it was installed for compute nodes.
        node.run("grep '{public_key_value}'"
                 " ~/.ssh/authorized_keys.idact".format(
                     public_key_value=public_key_value))

        check_direct_access_from_access_node_does_not_work(nodes[0])
Exemplo n.º 15
0
def test_basic():
    user = USER_1
    with ExitStack() as stack:
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(reset_environment(user))
        stack.enter_context(set_password(get_test_user_password(user)))

        clusters = show_clusters()
        print(clusters)

        assert len(clusters) == 1

        cluster = show_cluster(name=TEST_CLUSTER)
        print(cluster)

        assert clusters[TEST_CLUSTER] == cluster

        nodes = cluster.allocate_nodes(nodes=2,
                                       cores=1,
                                       memory_per_node=MiB(100),
                                       walltime=Walltime(minutes=30),
                                       native_args={'--partition': 'debug'})
        with cancel_on_exit(nodes):
            assert len(nodes) == 2
            assert nodes[0] in nodes
            print(nodes)
            assert str(nodes) == repr(nodes)

            nodes.wait(timeout=SLURM_WAIT_TIMEOUT)
            assert nodes.running()

            print(nodes)
            print(nodes[0])

            assert nodes[0].run('whoami') == user
            assert nodes[1].run('whoami') == user

        assert not nodes.running()
        with pytest.raises(RuntimeError):
            nodes.wait()
        with pytest.raises(RuntimeError):
            nodes[0].run('whoami')
Exemplo n.º 16
0
    def test_file_logging_rotation_at_100MiB(self):
        """
        Logfiles are rotated when they reach 100MiB.
        """
        logfile = FilePath(self.mktemp()).child('foo.log')
        logfile.parent().makedirs()
        with logfile.open('w') as f:
            f.truncate(int(MiB(100).to_Byte().value - 1))

        d = self.run_script(EliotScript, options=['--logfile', logfile.path])

        def verify_logfiles(stdout_messages, logfile):
            self.assertEqual(
                set([logfile, logfile.sibling(logfile.basename() + u'.1')]),
                set(logfile.parent().children())
            )
        d.addCallback(verify_logfiles, logfile=logfile)

        return d
Exemplo n.º 17
0
def test_dask_deployment_with_redeploy_on_validation_failure():
    user = USER_41
    with ExitStack() as stack:
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(reset_environment(user))
        stack.enter_context(set_password(get_test_user_password(user)))

        cluster = show_cluster(name=TEST_CLUSTER)
        nodes = cluster.allocate_nodes(nodes=2,
                                       cores=1,
                                       memory_per_node=MiB(100),
                                       walltime=Walltime(minutes=30))
        stack.enter_context(cancel_on_exit(nodes))

        assert idact.detail.dask.deploy_dask_impl.validate_worker
        stored_validate_worker = \
            idact.detail.dask.deploy_dask_impl.validate_worker

        fake_validation_counter = [0]

        # pylint: disable=unused-argument
        def fake_validate_worker(worker: DaskWorkerDeployment):
            current_count = fake_validation_counter[0]
            fake_validation_counter[0] = current_count + 1

            print("Fake worker validation.")
            if current_count == 0:
                raise RuntimeError("Fake worker validation: First node fail.")
            print("Deciding the worker is valid.")

        try:
            idact.detail.dask.deploy_dask_impl.validate_worker = \
                fake_validate_worker

            with deploy_dask_on_testing_cluster(nodes):
                pass

            assert fake_validation_counter[0] == 3

        finally:
            idact.detail.dask.deploy_dask_impl.validate_worker = \
                stored_validate_worker
Exemplo n.º 18
0
def test_dask_deployment_with_absolute_scratch_path():
    user = USER_24
    with ExitStack() as stack:
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(reset_environment(user))
        stack.enter_context(set_password(get_test_user_password(user)))

        cluster = show_cluster(name=TEST_CLUSTER)
        cluster.config.scratch = '/home/user-24'

        nodes = cluster.allocate_nodes(nodes=1,
                                       cores=1,
                                       memory_per_node=MiB(100),
                                       walltime=Walltime(minutes=10))
        stack.enter_context(cancel_on_exit(nodes))

        with deploy_dask_on_testing_cluster(nodes):
            pass
Exemplo n.º 19
0
def test_dask_deployment_with_setup_actions():
    user = USER_18
    with ExitStack() as stack:
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(reset_environment(user))
        stack.enter_context(set_password(get_test_user_password(user)))

        cluster = show_cluster(name=TEST_CLUSTER)
        nodes = cluster.allocate_nodes(nodes=2,
                                       cores=1,
                                       memory_per_node=MiB(100),
                                       walltime=Walltime(minutes=30))
        stack.enter_context(cancel_on_exit(nodes))

        cluster.config.setup_actions.dask = ['echo ABC > file.txt',
                                             'mv file.txt file2.txt']
        with deploy_dask_on_testing_cluster(nodes) as node:
            assert node.run("cat file2.txt") == "ABC"
Exemplo n.º 20
0
    def test_parsenum_all_sizes(self, expr, size):
        """
        Send standard size expressions to ``parse_num`` in
        many sizes, we expect to get correct size results.

        :param expr str: A string representing the size expression
        :param size int: A string representing the volume size
        """
        if expr is "KB":
            expected_size = int(KiB(size).to_Byte())
        elif expr is "MB":
            expected_size = int(MiB(size).to_Byte())
        elif expr is "GB":
            expected_size = int(GiB(size).to_Byte())
        elif expr is "TB":
            expected_size = int(TiB(size).to_Byte())
        else:
            expected_size = int(Byte(size).to_Byte())
        return self.assertEqual(expected_size,
                                int(parse_num(str(size) + expr).to_Byte()))
Exemplo n.º 21
0
def test_allocation_should_default_to_port_22_if_port_info_file_is_missing():
    user = USER_61
    with ExitStack() as stack:
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(reset_environment(user))
        stack.enter_context(set_password(get_test_user_password(user)))

        cluster = show_cluster(name=TEST_CLUSTER)

        node = cluster.get_access_node()
        nodes = cluster.allocate_nodes(memory_per_node=MiB(100))
        stack.enter_context(cancel_on_exit(nodes))

        retry(lambda: node.run("rm ~/.idact/sshd_ports/alloc-*/*"),
              retries=SLURM_WAIT_TIMEOUT,
              seconds_between_retries=1)

        nodes.wait(timeout=SLURM_WAIT_TIMEOUT)

        assert nodes.running()
        assert nodes[0].port == 22
Exemplo n.º 22
0
def test_generic_deployment():
    user = USER_7
    with ExitStack() as stack:
        stack.enter_context(disable_pytest_stdin())
        stack.enter_context(set_up_key_location(user))
        stack.enter_context(reset_environment(user))
        stack.enter_context(set_password(get_test_user_password(user)))

        cluster = show_cluster(name=TEST_CLUSTER)
        print(cluster)
        nodes = cluster.allocate_nodes(nodes=1,
                                       cores=1,
                                       memory_per_node=MiB(100),
                                       walltime=Walltime(minutes=30))
        stack.enter_context(cancel_on_exit(nodes))
        node = nodes[0]

        nodes.wait(timeout=SLURM_WAIT_TIMEOUT)
        assert nodes.running()

        assert isinstance(node, NodeInternal)
        runtime_dir = create_runtime_dir(node=node)
        stack.enter_context(
            remove_runtime_dir_on_failure(node=node, runtime_dir=runtime_dir))
        script_contents = "echo ABC && sleep 30"

        assert isinstance(node, NodeInternal)
        deployment = deploy_generic(node=node,
                                    script_contents=script_contents,
                                    runtime_dir=runtime_dir)
        with cancel_on_exit(deployment):
            print(deployment)

            node.run("kill -0 {pid}".format(pid=deployment.pid))

        with pytest.raises(RuntimeError):
            node.run("kill -0 {pid}".format(pid=deployment.pid))
Exemplo n.º 23
0
def parse_num(expression):
    """
    Parse a string of a dataset size 10g, 100kib etc into
    a usable integer.
    If user doesn't submit a correct size, give back
    the default size.

    :param expression: the dataset expression to parse.
    """
    if not expression:
        return DEFAULT_SIZE
    if type(expression) is unicode:
        expression = str(expression)

    def _match(exp,
               search=re.compile(
                   r'^(\d+){1}([KMGTkmgt][IiBb]){0,1}([Bb]){0,1}').search):
        return bool(search(exp))

    if _match(expression):
        unit = expression.translate(None, "1234567890.")
        num = int(expression.replace(unit, ""))
        unit = unit.lower()
        if unit == 'tb' or unit == 't' or unit == 'tib':
            return TiB(num)
        elif unit == 'gb' or unit == 'g' or unit == 'gib':
            return GiB(num)
        elif unit == 'mb' or unit == 'm' or unit == 'mib':
            return MiB(num)
        elif unit == 'kb' or unit == 'k' or unit == 'kib':
            return KiB(num)
        elif unit == '':
            return Byte(num)
        else:
            return DEFAULT_SIZE
    else:
        return DEFAULT_SIZE
Exemplo n.º 24
0
def is_allowed_file_size(f):
    """ check if given file size is less or equal as max allowed in config
    """
    max_size = get_storage_max_size()
    return Byte(get_file_size(f)) <= MiB(float(max_size))
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################

from uuid import uuid4
from bitmath import GiB, MiB
from flocker.node.agents.test.test_blockdevice import (
    make_iblockdeviceapi_tests, )
from ibm_storage_flocker_driver.testtools_ibm_storage_flocker_driver import \
    get_ibm_storage_blockdevice_api_for_test

# Smallest volume to create in tests
MIN_ALLOCATION_SIZE = int(GiB(1).to_Byte().value)

# Minimum unit of volume allocation
MIN_ALLOCATION_UNIT = int(MiB(1).to_Byte().value)


class IBMStorageBlockDeviceAPITests(
        make_iblockdeviceapi_tests(
            blockdevice_api_factory=(
                lambda test_case: get_ibm_storage_blockdevice_api_for_test(
                    uuid4(), test_case)),
            minimum_allocatable_size=MIN_ALLOCATION_SIZE,
            device_allocation_unit=MIN_ALLOCATION_UNIT,
            unknown_blockdevice_id_factory=lambda test: unicode(uuid4()))):
    """
    Basic interface tests for ``IBMStorageBlockDeviceAPITests``
    """
Exemplo n.º 26
0
from zope.interface import implementer

from twisted.python.filepath import FilePath
from twisted.python.components import proxyForInterface

from .blockdevice import (
    BlockDeviceVolume,
    IBlockDeviceAPI,
    UnknownInstanceID,
    AlreadyAttachedVolume,
    UnattachedVolume,
    allocated_size,
    get_blockdevice_volume,
)

LOOPBACK_ALLOCATION_UNIT = int(MiB(1).to_Byte().value)
# Enough space for the ext4 journal:
LOOPBACK_MINIMUM_ALLOCATABLE_SIZE = int(MiB(16).to_Byte().value)


def _blockdevicevolume_from_dataset_id(dataset_id, size, attached_to=None):
    """
    Create a new ``BlockDeviceVolume`` with a ``blockdevice_id`` derived
    from the given ``dataset_id``.

    This is for convenience of implementation of the loopback backend (to
    avoid needing a separate data store for mapping dataset ids to block
    device ids and back again).

    Parameters accepted have the same meaning as the attributes of
    ``BlockDeviceVolume``.
Exemplo n.º 27
0
def test_bit_size_var_default(value):
    def_val = MiB(2)
    var = BitSizeVar('V', def_val)
    assert var.read(value) == def_val
Exemplo n.º 28
0
try:
    from eliot.journald import JournaldDestination
except ImportError as e:
    # This platform doesn't have journald.
    JournaldDestination = None
    _missing_journald_reason = str(e)
    del e

__all__ = [
    'flocker_standard_options',
    'ICommandLineScript',
    'FlockerScriptRunner',
    'main_for_service',
]

LOGFILE_LENGTH = int(MiB(100).to_Byte().value)
LOGFILE_COUNT = 5


def flocker_standard_options(cls):
    """Add various standard command line options to flocker commands.

    :param type cls: The `class` to decorate.
    :return: The decorated `class`.
    """
    original_init = cls.__init__
    original_postOptions = cls.postOptions

    def __init__(self, *args, **kwargs):
        """Set the default verbosity to `0`
Exemplo n.º 29
0
def to_mib(value):
    return MiB(bytes=to_bytes(value))
Exemplo n.º 30
0
    def parse_k8s_memory_value(memory_value):
        """Parse and convert Kubernetes specific memory value

        :param memory_value: memory value from Kubernetes manifest
        :type memory_value: str
        :raises NotImplementedError: raised if value postfix is unknown
        :return: parsed memory value
        :rtype: int
        """

        # https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
        # https://medium.com/@betz.mark/understanding-resource-limits-in-kubernetes-memory-6b41e9a955f9
        _K8S_MEMORY_SUFFIXES_FIXED = ['E', 'P', 'T', 'G', 'M', 'K']
        _K8S_MEMORY_SUFFIXES_POWER = ['Ei', 'Pi', 'Ti', 'Gi', 'Mi', 'Ki']

        if type(memory_value) is str:
            # exponential notation e.g. 3e2 = 300
            if 'e' in memory_value:
                memory_value = float(memory_value)
            # check if power-of-two notation is used
            # it is important to check power-of-two first as fixed-point comparison would also match
            elif [
                    e for e in _K8S_MEMORY_SUFFIXES_POWER
                    if (e in memory_value)
            ]:
                if 'Ki' in memory_value:
                    memory_value = memory_value.strip('Ki')
                    memory_value = KiB(float(memory_value)).to_MB().value
                elif 'Mi' in memory_value:
                    memory_value = memory_value.strip('Mi')
                    memory_value = MiB(float(memory_value)).to_MB().value
                elif 'Gi' in memory_value:
                    memory_value = memory_value.strip('Gi')
                    memory_value = GiB(float(memory_value)).to_MB().value
                elif 'Ti' in memory_value:
                    memory_value = memory_value.strip('Ti')
                    memory_value = TiB(float(memory_value)).to_MB().value
                elif 'Pi' in memory_value:
                    memory_value = memory_value.strip('Ki')
                    memory_value = PiB(float(memory_value)).to_MB().value
                elif 'Ei' in memory_value:
                    memory_value = memory_value.strip('Ei')
                    memory_value = EiB(float(memory_value)).to_MB().value
                else:
                    raise NotImplementedError(
                        'Memory value unit of {} not implemented'.format(
                            memory_value))
            # check if fixed-point integer notation is used
            elif [
                    e for e in _K8S_MEMORY_SUFFIXES_FIXED
                    if (e in memory_value)
            ]:
                if 'M' in memory_value:
                    memory_value = memory_value.strip('M')
                elif 'K' in memory_value:
                    memory_value = memory_value.strip('K')
                    memory_value = kB(float(memory_value)).to_MB().value
                elif 'G' in memory_value:
                    memory_value = memory_value.strip('G')
                    memory_value = GB(float(memory_value)).to_MB().value
                elif 'T' in memory_value:
                    memory_value = memory_value.strip('T')
                    memory_value = TB(float(memory_value)).to_MB().value
                elif 'P' in memory_value:
                    memory_value = memory_value.strip('P')
                    memory_value = PB(float(memory_value)).to_MB().value
                elif 'E' in memory_value:
                    memory_value = memory_value.strip('E')
                    memory_value = EB(float(memory_value)).to_MB().value
                else:
                    raise NotImplementedError(
                        'Memory value unit of {} not implemented'.format(
                            memory_value))
        # direct definition in bytes - convert to MB
        else:
            memory_value = memory_value / float('1e+6')

        return int(memory_value)