def test_parse_string_unsafe_github_issue_60(self): """parse_string_unsafe can parse the examples reported in issue #60 https://github.com/tbielawa/bitmath/issues/60 """ issue_input1 = '7.5KB' _parsed1 = bitmath.parse_string_unsafe(issue_input1) expected_result1 = bitmath.kB(7.5) self.assertEqual( _parsed1, expected_result1) issue_input2 = '4.7MB' _parsed2 = bitmath.parse_string_unsafe(issue_input2) expected_result2 = bitmath.MB(4.7) self.assertEqual( _parsed2, expected_result2) issue_input3 = '4.7M' _parsed3 = bitmath.parse_string_unsafe(issue_input3) expected_result3 = bitmath.MB(4.7) self.assertEqual( _parsed3, expected_result3)
def test_parse_unsafe_NIST(self): """parse_string_unsafe can parse all accepted NIST inputs""" # Begin with the kilo unit because it's the most tricky (SI # defines the unit as a lower-case 'k') kilo_inputs = [ '100ki', '100Ki', '100kib', '100KiB', '100kiB' ] expected_kilo_result = bitmath.KiB(100) for ki in kilo_inputs: _parsed = bitmath.parse_string_unsafe(ki) self.assertEqual(_parsed, expected_kilo_result) self.assertIs(type(_parsed), type(expected_kilo_result)) # Now check for other easier to parse prefixes other_inputs = [ '100gi', '100Gi', '100gib', '100giB', '100GiB' ] expected_gig_result = bitmath.GiB(100) for gi in other_inputs: _parsed = bitmath.parse_string_unsafe(gi) self.assertEqual(_parsed, expected_gig_result) self.assertIs(type(_parsed), type(expected_gig_result))
def test_parse_string_unsafe_request_NIST(self): """parse_string_unsafe can convert to NIST on request""" unsafe_input = "100M" _parsed = bitmath.parse_string_unsafe(unsafe_input, system=bitmath.NIST) expected = bitmath.MiB(100) self.assertEqual(_parsed, expected) self.assertIs(type(_parsed), type(expected)) unsafe_input2 = "100k" _parsed2 = bitmath.parse_string_unsafe(unsafe_input2, system=bitmath.NIST) expected2 = bitmath.KiB(100) self.assertEqual(_parsed2, expected2) self.assertIs(type(_parsed2), type(expected2)) unsafe_input3 = "100" _parsed3 = bitmath.parse_string_unsafe(unsafe_input3, system=bitmath.NIST) expected3 = bitmath.Byte(100) self.assertEqual(_parsed3, expected3) self.assertIs(type(_parsed3), type(expected3)) unsafe_input4 = "100kb" _parsed4 = bitmath.parse_string_unsafe(unsafe_input4, system=bitmath.NIST) expected4 = bitmath.KiB(100) self.assertEqual(_parsed4, expected4) self.assertIs(type(_parsed4), type(expected4))
def test_parse_unsafe_handles_SI_K_unit(self): """parse_string_unsafe can parse the upper/lowercase SI 'thousand' (k)""" thousand_lower = "100k" thousand_upper = "100K" expected_result = bitmath.kB(100) self.assertEqual( bitmath.parse_string_unsafe(thousand_lower), expected_result) self.assertEqual( bitmath.parse_string_unsafe(thousand_upper), expected_result)
def test_parse_unsafe_good_number_input(self): """parse_string_unsafe can parse unitless number inputs""" number_input = 100 string_input = "100" expected_result = bitmath.Byte(100) self.assertEqual( bitmath.parse_string_unsafe(number_input), expected_result) self.assertEqual( bitmath.parse_string_unsafe(string_input), expected_result)
def test_parse_unsafe_NIST_units(self): """parse_string_unsafe can parse abbreviated NIST units (Gi, Ki, ...)""" nist_input = "100 Gi" expected_result = bitmath.GiB(100) self.assertEqual( bitmath.parse_string_unsafe(nist_input), expected_result)
def convert_size(size: Union[str, int, float], unit: str, to_unit: Type[bitmath.Bitmath]) -> float: """ Parses an arbitrary `size` value (str, int, float) in a specified unit (str), converts it to `to_unit` (a subclass of bitmath.Bitmath) and returns the resulting value as a float. """ return to_unit.from_other( bitmath.parse_string_unsafe("{} {}".format( size, unit, ))).value
def test_parse_unsafe_invalid_input(self): """parse_string_unsafe explodes when given invalid units""" invalid_input_str = "kitties!" with self.assertRaises(ValueError): bitmath.parse_string_unsafe(invalid_input_str) with self.assertRaises(ValueError): bitmath.parse_string_unsafe('100 CiB') with self.assertRaises(ValueError): bitmath.parse_string_unsafe('100 J')
def get_statefulset_pvc( owner_references: Optional[List[V1OwnerReference]], node_spec: Dict[str, Any]) -> List[V1PersistentVolumeClaim]: size = format_bitmath( bitmath.parse_string_unsafe(node_spec["resources"]["disk"]["size"])) storage_class_name = node_spec["resources"]["disk"]["storageClass"] return [ V1PersistentVolumeClaim( metadata=V1ObjectMeta(name=f"data{i}", owner_references=owner_references), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements(requests={"storage": size}), storage_class_name=storage_class_name, ), ) for i in range(node_spec["resources"]["disk"]["count"]) ]
def get_statefulset_crate_env( node_spec: Dict[str, Any], jmx_port: int, prometheus_port: int, ssl: Optional[Dict[str, Any]], ) -> List[V1EnvVar]: crate_env = [ V1EnvVar( name="CRATE_HEAP_SIZE", value=str( int( bitmath.parse_string_unsafe( node_spec["resources"]["memory"]).bytes * node_spec["resources"]["heapRatio"])), ), V1EnvVar( name="CRATE_JAVA_OPTS", value=" ".join( get_statefulset_crate_env_java_opts(jmx_port, prometheus_port)), ), ] if ssl is not None: crate_env.extend([ V1EnvVar( name="KEYSTORE_KEY_PASSWORD", value_from=V1EnvVarSource(secret_key_ref=V1SecretKeySelector( key=ssl["keystoreKeyPassword"]["secretKeyRef"]["key"], name=ssl["keystoreKeyPassword"]["secretKeyRef"]["name"], )), ), V1EnvVar( name="KEYSTORE_PASSWORD", value_from=V1EnvVarSource(secret_key_ref=V1SecretKeySelector( key=ssl["keystorePassword"]["secretKeyRef"]["key"], name=ssl["keystorePassword"]["secretKeyRef"]["name"], )), ), ]) return crate_env
def get_statefulset_containers( node_spec: Dict[str, Any], http_port: int, jmx_port: int, postgres_port: int, prometheus_port: int, transport_port: int, crate_image: str, crate_command: List[str], crate_env: List[V1EnvVar], crate_volume_mounts: List[V1VolumeMount], ) -> List[V1Container]: # There is no official release of 0.6, so let's use our own build # from commit 1498107. Also, because it's a private registry, let's use the # official release during tests so we don't need Docker secrets. # https://github.com/free/sql_exporter/commit/1498107 sql_exporter_image = "cloud.registry.cr8.net/crate/sql-exporter:1498107" if config.TESTING: sql_exporter_image = "githubfree/sql_exporter:latest" return [ V1Container( command=[ "/bin/sql_exporter", "-config.file=/config/sql-exporter.yaml", "-web.listen-address=:9399", "-web.metrics-path=/metrics", ], image=sql_exporter_image, name="sql-exporter", ports=[V1ContainerPort(container_port=9399, name="sql-exporter")], volume_mounts=[ V1VolumeMount( mount_path="/config", name="crate-sql-exporter", read_only=True, ), ], ), V1Container( command=crate_command, env=crate_env, image=crate_image, name="crate", ports=[ V1ContainerPort(container_port=http_port, name="http"), V1ContainerPort(container_port=jmx_port, name="jmx"), V1ContainerPort(container_port=postgres_port, name="postgres"), V1ContainerPort(container_port=prometheus_port, name="prometheus"), V1ContainerPort(container_port=transport_port, name="transport"), ], readiness_probe=V1Probe( http_get=V1HTTPGetAction(path="/ready", port=prometheus_port), initial_delay_seconds=30, period_seconds=10, ), resources=V1ResourceRequirements( limits={ "cpu": str(node_spec["resources"]["cpus"]), "memory": format_bitmath( bitmath.parse_string_unsafe( node_spec["resources"]["memory"])), }, requests={ "cpu": str(node_spec["resources"]["cpus"]), "memory": format_bitmath( bitmath.parse_string_unsafe( node_spec["resources"]["memory"])), }, ), volume_mounts=crate_volume_mounts, ), ]
def upload_chunk(self, app_config, input_fp, start_offset=0, length=-1): """ Uploads a chunk of data found in the given input file-like interface. start_offset and length are optional and should match a range header if any was given. Returns the total number of bytes uploaded after this upload has completed. Raises a BlobUploadException if the upload failed. """ assert start_offset is not None assert length is not None if start_offset > 0 and start_offset > self.blob_upload.byte_count: logger.error( "start_offset provided greater than blob_upload.byte_count") raise BlobRangeMismatchException() # Ensure that we won't go over the allowed maximum size for blobs. max_blob_size = bitmath.parse_string_unsafe( self.settings.maximum_blob_size) uploaded = bitmath.Byte(length + start_offset) if length > -1 and uploaded > max_blob_size: raise BlobTooLargeException(uploaded=uploaded.bytes, max_allowed=max_blob_size.bytes) location_set = {self.blob_upload.location_name} upload_error = None with CloseForLongOperation(app_config): if start_offset > 0 and start_offset < self.blob_upload.byte_count: # Skip the bytes which were received on a previous push, which are already stored and # included in the sha calculation overlap_size = self.blob_upload.byte_count - start_offset input_fp = StreamSlice(input_fp, overlap_size) # Update our upload bounds to reflect the skipped portion of the overlap start_offset = self.blob_upload.byte_count length = max(length - overlap_size, 0) # We use this to escape early in case we have already processed all of the bytes the user # wants to upload. if length == 0: return self.blob_upload.byte_count input_fp = wrap_with_handler(input_fp, self.blob_upload.sha_state.update) if self.extra_blob_stream_handlers: for handler in self.extra_blob_stream_handlers: input_fp = wrap_with_handler(input_fp, handler) # If this is the first chunk and we're starting at the 0 offset, add a handler to gunzip the # stream so we can determine the uncompressed size. We'll throw out this data if another chunk # comes in, but in the common case the docker client only sends one chunk. size_info = None if start_offset == 0 and self.blob_upload.chunk_count == 0: size_info, fn = calculate_size_handler() input_fp = wrap_with_handler(input_fp, fn) start_time = time.time() length_written, new_metadata, upload_error = self.storage.stream_upload_chunk( location_set, self.blob_upload.upload_id, start_offset, length, input_fp, self.blob_upload.storage_metadata, content_type=BLOB_CONTENT_TYPE, ) if upload_error is not None: logger.error("storage.stream_upload_chunk returned error %s", upload_error) raise BlobUploadException(upload_error) # Update the chunk upload time and push bytes metrics. chunk_upload_duration.labels( list(location_set)[0]).observe(time.time() - start_time) pushed_bytes_total.inc(length_written) # Ensure we have not gone beyond the max layer size. new_blob_bytes = self.blob_upload.byte_count + length_written new_blob_size = bitmath.Byte(new_blob_bytes) if new_blob_size > max_blob_size: raise BlobTooLargeException(uploaded=new_blob_size, max_allowed=max_blob_size.bytes) # If we determined an uncompressed size and this is the first chunk, add it to the blob. # Otherwise, we clear the size from the blob as it was uploaded in multiple chunks. uncompressed_byte_count = self.blob_upload.uncompressed_byte_count if size_info is not None and self.blob_upload.chunk_count == 0 and size_info.is_valid: uncompressed_byte_count = size_info.uncompressed_size elif length_written > 0: # Otherwise, if we wrote some bytes and the above conditions were not met, then we don't # know the uncompressed size. uncompressed_byte_count = None self.blob_upload = registry_model.update_blob_upload( self.blob_upload, uncompressed_byte_count, new_metadata, new_blob_bytes, self.blob_upload.chunk_count + 1, self.blob_upload.sha_state, ) if self.blob_upload is None: raise BlobUploadException("Could not complete upload of chunk") return new_blob_bytes
def get_resource_utilization(vim: KubernetesVim): config = client.Configuration() config.host = vim.url config.api_key = {"authorization": "Bearer " + vim.service_token} try: ca_cert = b64decode(vim.ccc, validate=True) except (Base64DecodingError, ValueError): raise VimConnectionError( "Error decoding the cluster CA certificate. " "Make sure it is a valid base64-encoded string.") # Write ca certificate to a temp file for urllib ca_cert_file = NamedTemporaryFile(mode="w+b", delete=False) ca_cert_file.write(ca_cert) ca_cert_file_path = Path(ca_cert_file.name) ca_cert_file.close() config.verify_ssl = True config.ssl_ca_cert = ca_cert_file_path try: api = client.CoreV1Api(client.ApiClient(config)) cores_total = 0 cores_used = 0 memory_total = bitmath.MB() memory_used = bitmath.MB() # Aggregate available resources for all nodes for node in api.list_node().items: allocatable = node.status.allocatable cores_total += float(allocatable["cpu"]) memory_total += bitmath.parse_string_unsafe(allocatable["memory"]) # Aggregate resource requests for all containers in all pods (like `kubectl # describe nodes` does) for pod in api.list_pod_for_all_namespaces().items: for container in pod.spec.containers: resource_requests = container.resources.requests if resource_requests is not None: if "cpu" in resource_requests: cores_used += ( float(resource_requests["cpu"].replace("m", "")) / 1000) if "memory" in resource_requests: memory_used += bitmath.parse_string_unsafe( resource_requests["memory"]) return { "cores": { "used": round(cores_used, 3), "total": cores_total }, "memory": { "used": math.ceil(memory_used.value), "total": math.floor(memory_total.value), }, } except (ApiException, MaxRetryError) as e: if isinstance(e, ApiException) and str(e).startswith("(401)"): raise VimConnectionError( "Authorization error. Please check the service token.") raise VimConnectionError(str(e)) finally: # Remove the temporary ca certificate ca_cert_file_path.unlink()
def _parse_to_bytes(size: str) -> int: return int(bitmath.parse_string_unsafe(size).to_Byte())
def to_bytes(value): return parse_string_unsafe(value).to_Byte().bytes
def _write_file(path: str, size: str) -> None: """Write sparse file.""" with open(path, "wb") as f: bytes_ = int(bitmath.parse_string_unsafe(size).to_Byte()) f.seek(bytes_) f.write(str.encode("0"))
def to_gib(value): b = parse_string_unsafe(value).to_Byte().bytes return GiB(bytes=b)
def test_parse_unsafe_bad_input_type(self): """parse_string_unsafe can identify invalid input types""" with self.assertRaises(ValueError): invalid_input = {'keyvalue': 'store'} bitmath.parse_string_unsafe(invalid_input)