Beispiel #1
0
	def write(self, filename_or_fd, arcname=None, compress_type=None, *,
		compress_options=None, dir_fd=None, follow_symlinks=True,
		_default_open_flags=os.O_RDONLY | os.O_CLOEXEC
	):
		"""Adds a file to this archive.

		Like zipfile.ZipFile.write() with the following additions:

		 - "filename_or_fd" may be an int that represents an open file descriptor.
		 - Supports a "dir_fd" argument.
		 - Supports symbolic links.
		"""

		if arcname is None:
			arcname = os.fspath(filename_or_fd)

		if isinstance(filename_or_fd, int):
			open_flags = 0
			fd = filename_or_fd
		else:
			open_flags = _default_open_flags
			if not follow_symlinks:
				open_flags |= os.O_NOFOLLOW
			try:
				fd = os.open(filename_or_fd, open_flags, dir_fd=dir_fd)
			except OSError as ex:
				if not (ex.errno == errno.ELOOP and open_flags & os.O_NOFOLLOW):
					raise
				open_flags |= os.O_PATH
				fd = os.open(filename_or_fd, open_flags, dir_fd=dir_fd)

		fd = FileDescriptor(fd)
		with fd:
			fd_stat = os.fstat(fd.fd)

			if compress_type is not None:
				compress_options = self._parse_compress_options(
					compress_options, compress_type)
			elif (stat.S_ISDIR(fd_stat.st_mode) or
				fd_stat.st_size < self.compression_size_threshold
			):
				compress_type = zipfile.ZIP_STORED
				compress_options = None
			else:
				compress_type = self.compression
				compress_options = self.compress_options

			if stat.S_ISLNK(fd_stat.st_mode):
				target = os.readlink(b"", dir_fd=fd.fd)
				fd.close()
				info = zipfile.ZipInfo(arcname, time.localtime(fd_stat.st_mtime))
				info.external_attr |= (fd_stat.st_mode & 0xFFFF) << 16
				info.compress_type = compress_type
				info.compress_options = compress_options
				self.writestr(info, target, compress_type)
			else:
				assert not open_flags & os.O_PATH
				info = zipfile.ZipInfo.from_file(fd.fd, arcname)
				info.compress_type = compress_type
				info.compress_options = compress_options
				super().write(fd.release(), info, compress_type)

		return info
import time
import zipfile
from zipfile_infolist import print_info
msg = b'This data did not exist in a file.'
with zipfile.ZipFile('writestr_zipinfo.zip',
                     mode='w',
                    ) as zf:
    info = zipfile.ZipInfo('from_string.txt',
                           date_time=time.localtime(time.time()),
                          )
    info.compress_type = zipfile.ZIP_DEFLATED
    info.comment = b'Remarks go here'
    info.create_system = 0
    zf.writestr(info, msg)
print_info('writestr_zipinfo.zip')
Beispiel #3
0
def make_zip_of_tests(options,
                      test_parameters,
                      make_graph,
                      make_test_inputs,
                      extra_convert_options=ExtraConvertOptions(),
                      use_frozen_graph=False,
                      expected_tf_failures=0):
    """Helper to make a zip file of a bunch of TensorFlow models.

  This does a cartesian product of the dictionary of test_parameters and
  calls make_graph() for each item in the cartesian product set.
  If the graph is built successfully, then make_test_inputs() is called to
  build expected input/output value pairs. The model is then converted to
  tflite, and the examples are serialized with the tflite model into a zip
  file (2 files per item in the cartesian product set).

  Args:
    options: An Options instance.
    test_parameters: Dictionary mapping to lists for each parameter.
      e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}`
    make_graph: function that takes current parameters and returns tuple
      `[input1, input2, ...], [output1, output2, ...]`
    make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,
      `output_tensors` and returns tuple `(input_values, output_values)`.
    extra_convert_options: Additional convert options.
    use_frozen_graph: Whether or not freeze graph before convertion.
    expected_tf_failures: Number of times tensorflow is expected to fail in
      executing the input graphs. In some cases it is OK for TensorFlow to fail
      because the one or more combination of parameters is invalid.

  Raises:
    RuntimeError: if there are converter errors that can't be ignored.
  """
    zip_path = os.path.join(options.output_path, options.zip_to_output)
    parameter_count = 0
    for parameters in test_parameters:
        parameter_count += functools.reduce(
            operator.mul, [len(values) for values in parameters.values()])

    all_parameter_count = parameter_count
    if options.multi_gen_state:
        all_parameter_count += options.multi_gen_state.parameter_count
    if not options.no_tests_limit and all_parameter_count > _MAX_TESTS_PER_ZIP:
        raise RuntimeError(
            "Too many parameter combinations for generating '%s'.\n"
            "There are at least %d combinations while the upper limit is %d.\n"
            "Having too many combinations will slow down the tests.\n"
            "Please consider splitting the test into multiple functions.\n" %
            (zip_path, all_parameter_count, _MAX_TESTS_PER_ZIP))
    if options.multi_gen_state:
        options.multi_gen_state.parameter_count = all_parameter_count

    # TODO(aselle): Make this allow multiple inputs outputs.
    if options.multi_gen_state:
        archive = options.multi_gen_state.archive
    else:
        archive = zipfile.PyZipFile(zip_path, "w")
    zip_manifest = []
    convert_report = []
    converter_errors = 0

    processed_labels = set()

    if options.make_tf_ptq_tests:
        # For cases with fully_quantize is True, also generates a case with
        # fully_quantize is False. Marks these cases as suitable for PTQ tests.
        parameter_count = 0
        for parameters in test_parameters:
            if True in parameters.get("fully_quantize", []):
                parameters.update({
                    "fully_quantize": [True, False],
                    "tf_ptq": [True]
                })
                # TODO(b/199054047): Support 16x8 quantization in TF Quantization.
                parameters.update({"quant_16x8": [False]})
                parameter_count += functools.reduce(
                    operator.mul,
                    [len(values) for values in parameters.values()])

    if options.make_edgetpu_tests:
        extra_convert_options.inference_input_type = tf.uint8
        extra_convert_options.inference_output_type = tf.uint8
        # Only count parameters when fully_quantize is True.
        parameter_count = 0
        for parameters in test_parameters:
            if True in parameters.get("fully_quantize",
                                      []) and False in parameters.get(
                                          "quant_16x8", [False]):
                parameter_count += functools.reduce(operator.mul, [
                    len(values) for key, values in parameters.items()
                    if key != "fully_quantize" and key != "quant_16x8"
                ])

    label_base_path = zip_path
    if options.multi_gen_state:
        label_base_path = options.multi_gen_state.label_base_path

    i = 1
    for parameters in test_parameters:
        keys = parameters.keys()
        for curr in itertools.product(*parameters.values()):
            label = label_base_path.replace(".zip", "_") + (",".join(
                "%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", ""))
            if label[0] == "/":
                label = label[1:]

            zip_path_label = label
            if len(os.path.basename(zip_path_label)) > 245:
                zip_path_label = label_base_path.replace(".zip", "_") + str(i)

            i += 1
            if label in processed_labels:
                # Do not populate data for the same label more than once. It will cause
                # errors when unzipping.
                continue
            processed_labels.add(label)

            param_dict = dict(zip(keys, curr))

            if options.make_tf_ptq_tests and not param_dict.get(
                    "tf_ptq", False):
                continue

            if options.make_edgetpu_tests and (
                    not param_dict.get("fully_quantize", False)
                    or param_dict.get("quant_16x8", False)):
                continue

            def generate_inputs_outputs(tflite_model_binary,
                                        min_value=0,
                                        max_value=255):
                """Generate input values and output values of the given tflite model.

        Args:
          tflite_model_binary: A serialized flatbuffer as a string.
          min_value: min value for the input tensor.
          max_value: max value for the input tensor.

        Returns:
          (input_values, output_values): Maps of input values and output values
          built.
        """
                interpreter = tf.lite.Interpreter(
                    model_content=tflite_model_binary)
                interpreter.allocate_tensors()

                input_details = interpreter.get_input_details()
                input_values = {}
                for input_detail in input_details:
                    input_value = create_tensor_data(input_detail["dtype"],
                                                     input_detail["shape"],
                                                     min_value=min_value,
                                                     max_value=max_value)
                    interpreter.set_tensor(input_detail["index"], input_value)
                    input_values.update({
                        _normalize_input_name(input_detail["name"]):
                        input_value
                    })

                interpreter.invoke()

                output_details = interpreter.get_output_details()
                output_values = {}
                for output_detail in output_details:
                    output_values.update({
                        _normalize_output_name(output_detail["name"]):
                        interpreter.get_tensor(output_detail["index"])
                    })

                return input_values, output_values

            def build_example(label, param_dict_real, zip_path_label):
                """Build the model with parameter values set in param_dict_real.

        Args:
          label: Label of the model
          param_dict_real: Parameter dictionary (arguments to the factories
            make_graph and make_test_inputs)
          zip_path_label: Filename in the zip

        Returns:
          (tflite_model_binary, report) where tflite_model_binary is the
          serialized flatbuffer as a string and report is a dictionary with
          keys `tflite_converter_log` (log of conversion), `tf_log` (log of tf
          conversion), `converter` (a string of success status of the
          conversion), `tf` (a string success status of the conversion).
        """

                np.random.seed(RANDOM_SEED)
                report = {
                    "tflite_converter": report_lib.NOTRUN,
                    "tf": report_lib.FAILED
                }

                # Build graph
                report["tf_log"] = ""
                report["tflite_converter_log"] = ""
                tf.reset_default_graph()

                with tf.Graph().as_default():
                    with tf.device("/cpu:0"):
                        try:
                            inputs, outputs = make_graph(param_dict_real)
                            inputs = [x for x in inputs if x is not None]
                        except (tf.errors.UnimplementedError,
                                tf.errors.InvalidArgumentError, ValueError):
                            report["tf_log"] += traceback.format_exc()
                            return None, report

                    sess = tf.Session()
                    try:
                        baseline_inputs, baseline_outputs = (make_test_inputs(
                            param_dict_real, sess, inputs, outputs))
                        baseline_inputs = [
                            x for x in baseline_inputs if x is not None
                        ]
                        # Converts baseline inputs/outputs to maps. The signature input and
                        # output names are set to be the same as the tensor names.
                        input_names = [
                            _normalize_input_name(x.name) for x in inputs
                        ]
                        output_names = [
                            _normalize_output_name(x.name) for x in outputs
                        ]
                        baseline_input_map = dict(
                            zip(input_names, baseline_inputs))
                        baseline_output_map = dict(
                            zip(output_names, baseline_outputs))
                    except (tf.errors.UnimplementedError,
                            tf.errors.InvalidArgumentError, ValueError):
                        report["tf_log"] += traceback.format_exc()
                        return None, report
                    report["tflite_converter"] = report_lib.FAILED
                    report["tf"] = report_lib.SUCCESS

                    # Builds a saved model with the default signature key.
                    input_names, tensor_info_inputs = _get_tensor_info(
                        inputs, "input_", _normalize_input_name)
                    output_tensors, tensor_info_outputs = _get_tensor_info(
                        outputs, "output_", _normalize_output_name)
                    input_tensors = [(name, t.shape, t.dtype)
                                     for name, t in zip(input_names, inputs)]

                    inference_signature = (
                        tf.saved_model.signature_def_utils.build_signature_def(
                            inputs=tensor_info_inputs,
                            outputs=tensor_info_outputs,
                            method_name="op_test"))
                    saved_model_dir = tempfile.mkdtemp("op_test")
                    saved_model_tags = [tf.saved_model.tag_constants.SERVING]
                    signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
                    builder = tf.saved_model.builder.SavedModelBuilder(
                        saved_model_dir)
                    builder.add_meta_graph_and_variables(
                        sess,
                        saved_model_tags,
                        signature_def_map={
                            signature_key: inference_signature,
                        },
                        strip_default_attrs=True)
                    builder.save(as_text=False)
                    # pylint: disable=g-long-ternary
                    graph_def = freeze_graph(
                        sess,
                        tf.global_variables() + inputs +
                        outputs) if use_frozen_graph else sess.graph_def

                if "split_tflite_lstm_inputs" in param_dict_real:
                    extra_convert_options.split_tflite_lstm_inputs = param_dict_real[
                        "split_tflite_lstm_inputs"]
                tflite_model_binary, converter_log = options.tflite_convert_function(
                    options,
                    saved_model_dir,
                    input_tensors,
                    output_tensors,
                    extra_convert_options=extra_convert_options,
                    test_params=param_dict_real)
                report["tflite_converter"] = (report_lib.SUCCESS if
                                              tflite_model_binary is not None
                                              else report_lib.FAILED)
                report["tflite_converter_log"] = converter_log

                if options.save_graphdefs:
                    zipinfo = zipfile.ZipInfo(zip_path_label + ".pbtxt")
                    archive.writestr(zipinfo,
                                     text_format.MessageToString(graph_def),
                                     zipfile.ZIP_DEFLATED)

                if tflite_model_binary:
                    if options.make_edgetpu_tests:
                        # Set proper min max values according to input dtype.
                        baseline_input_map, baseline_output_map = generate_inputs_outputs(
                            tflite_model_binary, min_value=0, max_value=255)
                    zipinfo = zipfile.ZipInfo(zip_path_label + ".bin")
                    archive.writestr(zipinfo, tflite_model_binary,
                                     zipfile.ZIP_DEFLATED)

                    example = {
                        "inputs": baseline_input_map,
                        "outputs": baseline_output_map
                    }

                    example_fp = StringIO()
                    write_examples(example_fp, [example])
                    zipinfo = zipfile.ZipInfo(zip_path_label + ".inputs")
                    archive.writestr(zipinfo, example_fp.getvalue(),
                                     zipfile.ZIP_DEFLATED)

                    example_fp2 = StringIO()
                    write_test_cases(example_fp2, zip_path_label + ".bin",
                                     [example])
                    zipinfo = zipfile.ZipInfo(zip_path_label + "_tests.txt")
                    archive.writestr(zipinfo, example_fp2.getvalue(),
                                     zipfile.ZIP_DEFLATED)

                    zip_manifest_label = zip_path_label + " " + label
                    if zip_path_label == label:
                        zip_manifest_label = zip_path_label

                    zip_manifest.append(zip_manifest_label + "\n")

                return tflite_model_binary, report

            _, report = build_example(label, param_dict, zip_path_label)

            if report["tflite_converter"] == report_lib.FAILED:
                ignore_error = False
                if not options.known_bugs_are_errors:
                    for pattern, bug_number in options.known_bugs.items():
                        if re.search(pattern, label):
                            print("Ignored converter error due to bug %s" %
                                  bug_number)
                            ignore_error = True
                if not ignore_error:
                    converter_errors += 1
                    print(
                        "-----------------\nconverter error!\n%s\n-----------------\n"
                        % report["tflite_converter_log"])

            convert_report.append((param_dict, report))

    if not options.no_conversion_report:
        report_io = StringIO()
        report_lib.make_report_table(report_io, zip_path, convert_report)
        if options.multi_gen_state:
            zipinfo = zipfile.ZipInfo("report_" +
                                      options.multi_gen_state.test_name +
                                      ".html")
            archive.writestr(zipinfo, report_io.getvalue())
        else:
            zipinfo = zipfile.ZipInfo("report.html")
            archive.writestr(zipinfo, report_io.getvalue())

    if options.multi_gen_state:
        options.multi_gen_state.zip_manifest.extend(zip_manifest)
    else:
        zipinfo = zipfile.ZipInfo("manifest.txt")
        archive.writestr(zipinfo, "".join(zip_manifest), zipfile.ZIP_DEFLATED)

    # Log statistics of what succeeded
    total_conversions = len(convert_report)
    tf_success = sum(1 for x in convert_report
                     if x[1]["tf"] == report_lib.SUCCESS)
    converter_success = sum(1 for x in convert_report
                            if x[1]["tflite_converter"] == report_lib.SUCCESS)
    percent = 0
    if tf_success > 0:
        percent = float(converter_success) / float(tf_success) * 100.
    tf.logging.info(("Archive %s Considered %d graphs, %d TF evaluated graphs "
                     " and %d converted graphs (%.1f%%"), zip_path,
                    total_conversions, tf_success, converter_success, percent)

    tf_failures = parameter_count - tf_success

    if tf_failures / parameter_count > 0.8:
        raise RuntimeError(
            ("Test for '%s' is not very useful. "
             "TensorFlow fails in %d percent of the cases.") %
            (zip_path, int(100 * tf_failures / parameter_count)))

    if tf_failures != expected_tf_failures and not (
            options.make_edgetpu_tests or options.make_tf_ptq_tests):
        raise RuntimeError(
            ("Expected TF to fail %d times while generating '%s', "
             "but that happened %d times") %
            (expected_tf_failures, zip_path, tf_failures))

    if not options.ignore_converter_errors and converter_errors > 0:
        raise RuntimeError("Found %d errors while generating models" %
                           converter_errors)
Beispiel #4
0
def HermeticZipInfo(*args, **kwargs):
    """Creates a ZipInfo with a constant timestamp and external_attr."""
    ret = zipfile.ZipInfo(*args, **kwargs)
    ret.date_time = (2001, 1, 1, 0, 0, 0)
    ret.external_attr = (0o644 << 16)
    return ret
Beispiel #5
0
def _compress_zip(
        directory,
        quiet=True,
        verbose=False,
        display_progress=False,  # pylint: disable=too-many-locals
        flat=False,
        output=None):
    """Compress directory in a .zip file

    :param directory:        directory to add to the archive
    :param archive_basepath: output archive basepath (without extension)
    :param quiet:            quiet mode (print nothing)

    :return: path to the generated archive (archive_basepath.zip)

    """

    if quiet and verbose:
        mess = """Unconsistent arguments: both 'quiet' and 'verbose' options are set.
Please set only one of these two options to 'True'
"""
        raise ValueError(mess)
    ui.debug("Compressing", directory, "to", output)
    archive = zipfile.ZipFile(output, "w", zipfile.ZIP_DEFLATED)
    # a list of tuple src, arcname to be added in the archive
    to_add = list()
    for root, directories, filenames in os.walk(directory):
        entries = directories
        entries.extend(filenames)
        for entry in entries:
            full_path = os.path.join(root, entry)
            # Do not zip ourselves
            if full_path == output:
                continue
            rel_path = os.path.relpath(full_path, directory)
            if flat:
                arcname = rel_path
            else:
                arcname = os.path.join(os.path.basename(directory), rel_path)
            to_add.append((full_path, arcname))

    for i, (full_path, arcname) in enumerate(to_add):
        if os.path.islink(full_path):
            content = os.readlink(full_path)
            attr = zipfile.ZipInfo(arcname)
            attr.create_system = 3
            # long type of hex val of '0xA1ED0000L',
            # say, symlink attr magic..
            attr.external_attr = 2716663808L
            zip_call = archive.writestr
        elif os.path.isdir(full_path):
            continue
        else:
            attr = full_path
            content = arcname
            zip_call = archive.write
        if not quiet and not display_progress:
            rel_path = os.path.relpath(full_path, directory)
            sys.stdout.write("adding {0}\n".format(rel_path))
            sys.stdout.flush()
        if display_progress:
            ui.info_progress(i, len(to_add), "Done")
        zip_call(attr, content)

    archive.close()
    return output
    def write(self, stream, nodes, mode=WorkspaceWriter.OutputMode.BinaryMode):
        application = Application.getInstance()
        machine_manager = application.getMachineManager()

        mesh_writer = application.getMeshFileHandler().getWriter("3MFWriter")

        if not mesh_writer:  # We need to have the 3mf mesh writer, otherwise we can't save the entire workspace
            self.setInformation(catalog.i18nc("@error:zip", "3MF Writer plug-in is corrupt."))
            Logger.error("3MF Writer class is unavailable. Can't write workspace.")
            return False

        # Indicate that the 3mf mesh writer should not close the archive just yet (we still need to add stuff to it).
        mesh_writer.setStoreArchive(True)
        mesh_writer.write(stream, nodes, mode)

        archive = mesh_writer.getArchive()
        if archive is None:  # This happens if there was no mesh data to write.
            archive = zipfile.ZipFile(stream, "w", compression = zipfile.ZIP_DEFLATED)

        global_stack = machine_manager.activeMachine

        try:
            # Add global container stack data to the archive.
            self._writeContainerToArchive(global_stack, archive)

            # Also write all containers in the stack to the file
            for container in global_stack.getContainers():
                self._writeContainerToArchive(container, archive)

            # Check if the machine has extruders and save all that data as well.
            for extruder_stack in global_stack.extruderList:
                self._writeContainerToArchive(extruder_stack, archive)
                for container in extruder_stack.getContainers():
                    self._writeContainerToArchive(container, archive)
        except PermissionError:
            self.setInformation(catalog.i18nc("@error:zip", "No permission to write the workspace here."))
            Logger.error("No permission to write workspace to this stream.")
            return False

        # Write preferences to archive
        original_preferences = Application.getInstance().getPreferences() #Copy only the preferences that we use to the workspace.
        temp_preferences = Preferences()
        for preference in {"general/visible_settings", "cura/active_mode", "cura/categories_expanded", "metadata/setting_version"}:
            temp_preferences.addPreference(preference, None)
            temp_preferences.setValue(preference, original_preferences.getValue(preference))
        preferences_string = StringIO()
        temp_preferences.writeToFile(preferences_string)
        preferences_file = zipfile.ZipInfo("Cura/preferences.cfg")
        try:
            archive.writestr(preferences_file, preferences_string.getvalue())

            # Save Cura version
            version_file = zipfile.ZipInfo("Cura/version.ini")
            version_config_parser = configparser.ConfigParser(interpolation = None)
            version_config_parser.add_section("versions")
            version_config_parser.set("versions", "cura_version", application.getVersion())
            version_config_parser.set("versions", "build_type", application.getBuildType())
            version_config_parser.set("versions", "is_debug_mode", str(application.getIsDebugMode()))

            version_file_string = StringIO()
            version_config_parser.write(version_file_string)
            archive.writestr(version_file, version_file_string.getvalue())

            self._writePluginMetadataToArchive(archive)

            # Close the archive & reset states.
            archive.close()
        except PermissionError:
            self.setInformation(catalog.i18nc("@error:zip", "No permission to write the workspace here."))
            Logger.error("No permission to write workspace to this stream.")
            return False
        except EnvironmentError as e:
            self.setInformation(catalog.i18nc("@error:zip", "The operating system does not allow saving a project file to this location or with this file name."))
            Logger.error("EnvironmentError when writing workspace to this stream: {err}".format(err = str(e)))
            return False
        mesh_writer.setStoreArchive(False)
        return True
Beispiel #7
0
    def _WriteModules(self, fileName, finder):
        self.constantsModule.Create(finder)
        modules = [m for m in finder.modules \
                if m.name not in self.excludeModules]
        modules.sort(key = lambda m: m.name)

        if not self.silent:
            self._PrintReport(fileName, modules)
        finder.ReportMissingModules()

        targetDir = os.path.dirname(fileName)
        self._CreateDirectory(targetDir)

        # Prepare zip file
        if self.compress:
            compress_type = zipfile.ZIP_DEFLATED
        else:
            compress_type = zipfile.ZIP_STORED
        outFile = zipfile.PyZipFile(fileName, "w", compress_type)

        filesToCopy = []
        ignorePatterns = shutil.ignore_patterns("*.py", "*.pyc", "*.pyo",
                "__pycache__")
        for module in modules:

            # determine if the module should be written to the file system;
            # a number of packages make the assumption that files that they
            # require will be found in a location relative to where
            # they are located on disk; these packages will fail with strange
            # errors when they are written to a zip file instead
            includeInFileSystem = module.WillBeStoredInFileSystem()

            # if the module refers to a package, check to see if this package
            # should be included in the zip file or should be written to the
            # file system; if the package should be written to the file system,
            # any non-Python files are copied at this point if the target
            # directory does not already exist
            if includeInFileSystem and module.path is not None and \
                    module.file is not None:
                parts = module.name.split(".")
                targetPackageDir = os.path.join(targetDir, *parts)
                sourcePackageDir = os.path.dirname(module.file)
                if not os.path.exists(targetPackageDir):
                    print("Copying data from package", module.name + "...")
                    shutil.copytree(sourcePackageDir, targetPackageDir,
                            ignore = ignorePatterns)

            # if an extension module is found in a package that is to be
            # included in a zip file, copy the actual file to the build
            # directory because shared libraries cannot be loaded from a
            # zip file
            if module.code is None and module.file is not None \
                    and not includeInFileSystem:
                parts = module.name.split(".")[:-1]
                parts.append(os.path.basename(module.file))
                target = os.path.join(targetDir, ".".join(parts))
                filesToCopy.append((module, target))

            # starting with Python 3.3 the pyc file format contains the source
            # size; it is not actually used for anything except determining if
            # the file is up to date so we can safely set this value to zero
            if module.code is not None:
                if module.file is not None and os.path.exists(module.file):
                    st = os.stat(module.file)
                    mtime = int(st.st_mtime)
                    size = st.st_size & 0xFFFFFFFF
                else:
                    mtime = int(time.time())
                    size = 0
                if sys.version_info[:2] < (3, 7):
                    header = MAGIC_NUMBER + struct.pack("<ii", mtime, size)
                else:
                    header = MAGIC_NUMBER + struct.pack("<iii", 0, mtime, size)
                data = header + marshal.dumps(module.code)

            # if the module should be written to the file system, do so
            if includeInFileSystem and module.file is not None:
                parts = module.name.split(".")
                if module.code is None:
                    parts.pop()
                    parts.append(os.path.basename(module.file))
                    targetName = os.path.join(targetDir, *parts)
                    self._CopyFile(module.file, targetName,
                                   copyDependentFiles=True,
                                   relativeSource=True)
                else:
                    if module.path is not None:
                        parts.append("__init__")
                    targetName = os.path.join(targetDir, *parts) + ".pyc"
                    open(targetName, "wb").write(data)

            # otherwise, write to the zip file
            elif module.code is not None:
                zipTime = time.localtime(mtime)[:6]
                fileName = "/".join(module.name.split("."))
                if module.path:
                    fileName += "/__init__"
                zinfo = zipfile.ZipInfo(fileName + ".pyc", zipTime)
                zinfo.compress_type = compress_type
                outFile.writestr(zinfo, data)

            # put the distribution files metadata in the zip file
            if module.dist_files:
                for filepath, arcname in module.dist_files:
                    if arcname not in outFile.namelist():
                        outFile.write(filepath, arcname)

        # write any files to the zip file that were requested specially
        for sourceFileName, targetFileName in finder.zip_includes:
            if os.path.isdir(sourceFileName):
                for dirPath, _, fileNames in os.walk(sourceFileName):
                    basePath = dirPath[len(sourceFileName):]
                    targetPath = targetFileName + basePath.replace("\\", "/")
                    for name in fileNames:
                        outFile.write(os.path.join(dirPath, name),
                                targetPath + "/" + name)
            else:
                outFile.write(sourceFileName, targetFileName)

        outFile.close()

        # Copy Python extension modules from the list built above.
        origPath = os.environ["PATH"]
        for module, target in filesToCopy:
            try:
                if module.parent is not None:
                    path = os.pathsep.join([origPath] + module.parent.path)
                    os.environ["PATH"] = path
                self._CopyFile(module.file, target,
                               copyDependentFiles=True,
                               relativeSource=True)
            finally:
                os.environ["PATH"] = origPath
Beispiel #8
0
def create_py3_base_library(libzip_filename, graph):
    """
    Package basic Python modules into .zip file. The .zip file with basic
    modules is necessary to have on PYTHONPATH for initializing libpython3
    in order to run the frozen executable with Python 3.
    """
    # Import strip_paths_in_code locally to avoid cyclic import between
    # building.utils and depend.utils (this module); building.utils
    # imports depend.bindepend, which in turn imports depend.utils.
    from ..building.utils import strip_paths_in_code
    # Construct regular expression for matching modules that should be bundled
    # into base_library.zip.
    # Excluded are plain 'modules' or 'submodules.ANY_NAME'.
    # The match has to be exact - start and end of string not substring.
    regex_modules = '|'.join([r'(^%s$)' % x for x in PY3_BASE_MODULES])
    regex_submod = '|'.join([r'(^%s\..*$)' % x for x in PY3_BASE_MODULES])
    regex_str = regex_modules + '|' + regex_submod
    module_filter = re.compile(regex_str)

    try:
        # Remove .zip from previous run.
        if os.path.exists(libzip_filename):
            os.remove(libzip_filename)
        logger.debug('Adding python files to base_library.zip')
        # Class zipfile.PyZipFile is not suitable for PyInstaller needs.
        with zipfile.ZipFile(libzip_filename, mode='w') as zf:
            zf.debug = 3
            # Sort the graph nodes by identifier to ensure repeatable builds
            graph_nodes = list(graph.flatten())
            graph_nodes.sort(key=lambda item: item.identifier)
            for mod in graph_nodes:
                if type(mod) in (modulegraph.SourceModule,
                                 modulegraph.Package):
                    # Bundling just required modules.
                    if module_filter.match(mod.identifier):
                        st = os.stat(mod.filename)
                        timestamp = int(st.st_mtime)
                        size = st.st_size & 0xFFFFFFFF
                        # Name inside the archive. The ZIP format
                        # specification requires forward slashes as
                        # directory separator.
                        # TODO use .pyo suffix if optimize flag is enabled.
                        if type(mod) is modulegraph.Package:
                            new_name = mod.identifier.replace('.', '/') \
                                + '/__init__.pyc'
                        else:
                            new_name = mod.identifier.replace('.', '/') \
                                + '.pyc'

                        # Write code to a file.
                        # This code is similar to py_compile.compile().
                        with io.BytesIO() as fc:
                            # Prepare all data in byte stream file-like object.
                            fc.write(BYTECODE_MAGIC)
                            if is_py37:
                                # Additional bitfield according to PEP 552
                                # 0b01 means hash based but don't check the hash
                                fc.write(struct.pack('<I', 0b01))
                                with open(mod.filename, 'rb') as fs:
                                    source_bytes = fs.read()
                                source_hash = importlib_source_hash(
                                    source_bytes)
                                fc.write(source_hash)
                            else:
                                fc.write(struct.pack('<II', timestamp, size))
                            code = strip_paths_in_code(mod.code)  # Strip paths
                            marshal.dump(code, fc)
                            # Use a ZipInfo to set timestamp for deterministic build
                            info = zipfile.ZipInfo(new_name)
                            zf.writestr(info, fc.getvalue())

    except Exception as e:
        logger.error('base_library.zip could not be created!')
        raise
Beispiel #9
0
def transform(wldoc, verbose=False, style=None, html_toc=False,
              sample=None, cover=None, flags=None, hyphenate=False, ilustr_path='', output_type='epub'):
    """ produces a EPUB file

    sample=n: generate sample e-book (with at least n paragraphs)
    cover: a cover.Cover factory or True for default
    flags: less-advertising, without-fonts, working-copy
    """

    def transform_file(wldoc, chunk_counter=1, first=True, sample=None):
        """ processes one input file and proceeds to its children """

        replace_characters(wldoc.edoc.getroot())

        hyphenator = set_hyph_language(wldoc.edoc.getroot()) if hyphenate else None
        hyphenate_and_fix_conjunctions(wldoc.edoc.getroot(), hyphenator)

        # every input file will have a TOC entry,
        # pointing to starting chunk
        toc = TOC(wldoc.book_info.title, "part%d.html" % chunk_counter)
        chars = set()
        if first:
            # write book title page
            html_tree = xslt(wldoc.edoc, get_resource('epub/xsltTitle.xsl'), outputtype=output_type)
            chars = used_chars(html_tree.getroot())
            html_string = etree.tostring(
                html_tree, pretty_print=True, xml_declaration=True,
                encoding="utf-8",
                doctype='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"' +
                        ' "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
            )
            zip.writestr('OPS/title.html', squeeze_whitespace(html_string))
            # add a title page TOC entry
            toc.add(u"Strona tytułowa", "title.html")
        elif wldoc.book_info.parts:
            # write title page for every parent
            if sample is not None and sample <= 0:
                chars = set()
                html_string = open(get_resource('epub/emptyChunk.html')).read()
            else:
                html_tree = xslt(wldoc.edoc, get_resource('epub/xsltChunkTitle.xsl'))
                chars = used_chars(html_tree.getroot())
                html_string = etree.tostring(
                    html_tree, pretty_print=True, xml_declaration=True,
                    encoding="utf-8",
                    doctype='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"' +
                            ' "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
                )
            zip.writestr('OPS/part%d.html' % chunk_counter, squeeze_whitespace(html_string))
            add_to_manifest(manifest, chunk_counter)
            add_to_spine(spine, chunk_counter)
            chunk_counter += 1

        if len(wldoc.edoc.getroot()) > 1:
            # rdf before style master
            main_text = wldoc.edoc.getroot()[1]
        else:
            # rdf in style master
            main_text = wldoc.edoc.getroot()[0]
            if main_text.tag == RDFNS('RDF'):
                main_text = None

        if main_text is not None:
            for chunk_xml in chop(main_text):
                empty = False
                if sample is not None:
                    if sample <= 0:
                        empty = True
                    else:
                        sample -= len(chunk_xml.xpath('//strofa|//akap|//akap_cd|//akap_dialog'))
                chunk_html, chunk_toc, chunk_chars = transform_chunk(chunk_xml, chunk_counter, annotations, empty)

                toc.extend(chunk_toc)
                chars = chars.union(chunk_chars)
                zip.writestr('OPS/part%d.html' % chunk_counter, squeeze_whitespace(chunk_html))
                add_to_manifest(manifest, chunk_counter)
                add_to_spine(spine, chunk_counter)
                chunk_counter += 1

        for child in wldoc.parts():
            child_toc, chunk_counter, chunk_chars, sample = transform_file(
                child, chunk_counter, first=False, sample=sample)
            toc.append(child_toc)
            chars = chars.union(chunk_chars)

        return toc, chunk_counter, chars, sample

    document = deepcopy(wldoc)
    del wldoc

    if flags:
        for flag in flags:
            document.edoc.getroot().set(flag, 'yes')

    document.clean_ed_note()
    document.clean_ed_note('abstrakt')

    # add editors info
    editors = document.editors()
    if editors:
        document.edoc.getroot().set('editors', u', '.join(sorted(
            editor.readable() for editor in editors)))
    if document.book_info.funders:
        document.edoc.getroot().set('funders', u', '.join(
            document.book_info.funders))
    if document.book_info.thanks:
        document.edoc.getroot().set('thanks', document.book_info.thanks)

    opf = xslt(document.book_info.to_etree(), get_resource('epub/xsltContent.xsl'))
    manifest = opf.find('.//' + OPFNS('manifest'))
    guide = opf.find('.//' + OPFNS('guide'))
    spine = opf.find('.//' + OPFNS('spine'))

    output_file = NamedTemporaryFile(prefix='librarian', suffix='.epub', delete=False)
    zip = zipfile.ZipFile(output_file, 'w', zipfile.ZIP_DEFLATED)

    functions.reg_mathml_epub(zip)

    if os.path.isdir(ilustr_path):
        for i, filename in enumerate(os.listdir(ilustr_path)):
            file_path = os.path.join(ilustr_path, filename)
            zip.write(file_path, os.path.join('OPS', filename))
            image_id = 'image%s' % i
            manifest.append(etree.fromstring(
                '<item id="%s" href="%s" media-type="%s" />' % (image_id, filename, guess_type(file_path)[0])))

    # write static elements
    mime = zipfile.ZipInfo()
    mime.filename = 'mimetype'
    mime.compress_type = zipfile.ZIP_STORED
    mime.extra = b''
    zip.writestr(mime, b'application/epub+zip')
    zip.writestr(
        'META-INF/container.xml',
        b'<?xml version="1.0" ?>'
        b'<container version="1.0" '
        b'xmlns="urn:oasis:names:tc:opendocument:xmlns:container">'
        b'<rootfiles><rootfile full-path="OPS/content.opf" '
        b'media-type="application/oebps-package+xml" />'
        b'</rootfiles></container>'
    )
    zip.write(get_resource('res/wl-logo-small.png'),
              os.path.join('OPS', 'logo_wolnelektury.png'))
    zip.write(get_resource('res/jedenprocent.png'),
              os.path.join('OPS', 'jedenprocent.png'))
    if not style:
        style = get_resource('epub/style.css')
    zip.write(style, os.path.join('OPS', 'style.css'))

    if cover:
        if cover is True:
            cover = make_cover

        cover_file = BytesIO()
        bound_cover = cover(document.book_info)
        bound_cover.save(cover_file)
        cover_name = 'cover.%s' % bound_cover.ext()
        zip.writestr(os.path.join('OPS', cover_name), cover_file.getvalue())
        del cover_file

        cover_tree = etree.parse(get_resource('epub/cover.html'))
        cover_tree.find('//' + XHTMLNS('img')).set('src', cover_name)
        zip.writestr('OPS/cover.html', etree.tostring(
            cover_tree, pretty_print=True, xml_declaration=True,
            encoding="utf-8",
            doctype='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" ' +
                    '"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
        ))

        if bound_cover.uses_dc_cover:
            if document.book_info.cover_by:
                document.edoc.getroot().set('data-cover-by', document.book_info.cover_by)
            if document.book_info.cover_source:
                document.edoc.getroot().set('data-cover-source', document.book_info.cover_source)

        manifest.append(etree.fromstring(
            '<item id="cover" href="cover.html" media-type="application/xhtml+xml" />'))
        manifest.append(etree.fromstring(
            '<item id="cover-image" href="%s" media-type="%s" />' % (cover_name, bound_cover.mime_type())))
        spine.insert(0, etree.fromstring('<itemref idref="cover"/>'))
        opf.getroot()[0].append(etree.fromstring('<meta name="cover" content="cover-image"/>'))
        guide.append(etree.fromstring('<reference href="cover.html" type="cover" title="Okładka"/>'))

    annotations = etree.Element('annotations')

    toc_file = etree.fromstring(
        b'<?xml version="1.0" encoding="utf-8"?><!DOCTYPE ncx PUBLIC '
        b'"-//NISO//DTD ncx 2005-1//EN" '
        b'"http://www.daisy.org/z3986/2005/ncx-2005-1.dtd">'
        b'<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" xml:lang="pl" '
        b'version="2005-1"><head></head><docTitle></docTitle><navMap>'
        b'</navMap></ncx>'
    )
    nav_map = toc_file[-1]

    if html_toc:
        manifest.append(etree.fromstring(
            '<item id="html_toc" href="toc.html" media-type="application/xhtml+xml" />'))
        spine.append(etree.fromstring(
            '<itemref idref="html_toc" />'))
        guide.append(etree.fromstring('<reference href="toc.html" type="toc" title="Spis treści"/>'))

    toc, chunk_counter, chars, sample = transform_file(document, sample=sample)

    if len(toc.children) < 2:
        toc.add(u"Początek utworu", "part1.html")

    # Last modifications in container files and EPUB creation
    if len(annotations) > 0:
        toc.add("Przypisy", "annotations.html")
        manifest.append(etree.fromstring(
            '<item id="annotations" href="annotations.html" media-type="application/xhtml+xml" />'))
        spine.append(etree.fromstring(
            '<itemref idref="annotations" />'))
        replace_by_verse(annotations)
        html_tree = xslt(annotations, get_resource('epub/xsltAnnotations.xsl'))
        chars = chars.union(used_chars(html_tree.getroot()))
        zip.writestr('OPS/annotations.html', etree.tostring(
            html_tree, pretty_print=True, xml_declaration=True,
            encoding="utf-8",
            doctype='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" ' +
                    '"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
        ))

    toc.add("Wesprzyj Wolne Lektury", "support.html")
    manifest.append(etree.fromstring(
        '<item id="support" href="support.html" media-type="application/xhtml+xml" />'))
    spine.append(etree.fromstring(
        '<itemref idref="support" />'))
    html_string = open(get_resource('epub/support.html'), 'rb').read()
    chars.update(used_chars(etree.fromstring(html_string)))
    zip.writestr('OPS/support.html', squeeze_whitespace(html_string))

    toc.add("Strona redakcyjna", "last.html")
    manifest.append(etree.fromstring(
        '<item id="last" href="last.html" media-type="application/xhtml+xml" />'))
    spine.append(etree.fromstring(
        '<itemref idref="last" />'))
    html_tree = xslt(document.edoc, get_resource('epub/xsltLast.xsl'), outputtype=output_type)
    chars.update(used_chars(html_tree.getroot()))
    zip.writestr('OPS/last.html', squeeze_whitespace(etree.tostring(
        html_tree, pretty_print=True, xml_declaration=True,
        encoding="utf-8",
        doctype='<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" ' +
                '"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
    )))

    if not flags or 'without-fonts' not in flags:
        # strip fonts
        tmpdir = mkdtemp('-librarian-epub')
        try:
            cwd = os.getcwd()
        except OSError:
            cwd = None

        os.chdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'font-optimizer'))
        for fname in 'DejaVuSerif.ttf', 'DejaVuSerif-Bold.ttf', 'DejaVuSerif-Italic.ttf', 'DejaVuSerif-BoldItalic.ttf':
            optimizer_call = ['perl', 'subset.pl', '--chars',
                              ''.join(chars).encode('utf-8'),
                              get_resource('fonts/' + fname),
                              os.path.join(tmpdir, fname)]
            env = {"PERL_USE_UNSAFE_INC": "1"}
            if verbose:
                print("Running font-optimizer")
                subprocess.check_call(optimizer_call, env=env)
            else:
                dev_null = open(os.devnull, 'w')
                subprocess.check_call(optimizer_call, stdout=dev_null, stderr=dev_null, env=env)
            zip.write(os.path.join(tmpdir, fname), os.path.join('OPS', fname))
            manifest.append(etree.fromstring(
                '<item id="%s" href="%s" media-type="application/x-font-truetype" />' % (fname, fname)))
        rmtree(tmpdir)
        if cwd is not None:
            os.chdir(cwd)
    zip.writestr('OPS/content.opf', etree.tostring(opf, pretty_print=True,
                 xml_declaration=True, encoding="utf-8"))
    title = document.book_info.title
    attributes = "dtb:uid", "dtb:depth", "dtb:totalPageCount", "dtb:maxPageNumber"
    for st in attributes:
        meta = toc_file.makeelement(NCXNS('meta'))
        meta.set('name', st)
        meta.set('content', '0')
        toc_file[0].append(meta)
    toc_file[0][0].set('content', str(document.book_info.url))
    toc_file[0][1].set('content', str(toc.depth()))
    set_inner_xml(toc_file[1], ''.join(('<text>', title, '</text>')))

    # write TOC
    if html_toc:
        toc.add(u"Spis treści", "toc.html", index=1)
        zip.writestr('OPS/toc.html', toc.html().encode('utf-8'))
    toc.write_to_xml(nav_map)
    zip.writestr('OPS/toc.ncx', etree.tostring(toc_file, pretty_print=True,
                 xml_declaration=True, encoding="utf-8"))
    zip.close()

    return OutputFile.from_filename(output_file.name)
Beispiel #10
0
      def build_example(label, param_dict_real, zip_path_label):
        """Build the model with parameter values set in param_dict_real.

        Args:
          label: Label of the model
          param_dict_real: Parameter dictionary (arguments to the factories
            make_graph and make_test_inputs)
          zip_path_label: Filename in the zip

        Returns:
          (tflite_model_binary, report) where tflite_model_binary is the
          serialized flatbuffer as a string and report is a dictionary with
          keys `toco_log` (log of toco conversion), `tf_log` (log of tf
          conversion), `toco` (a string of success status of the conversion),
          `tf` (a string success status of the conversion).
        """

        np.random.seed(RANDOM_SEED)
        report = {"converter": report_lib.NOTRUN, "tf": report_lib.FAILED}

        # Build graph
        report["tf_log"] = ""
        report["converter_log"] = ""
        tf.reset_default_graph()

        with tf.Graph().as_default():
          with tf.device("/cpu:0"):
            try:
              inputs, outputs = make_graph(param_dict_real)
            except (tf.errors.UnimplementedError,
                    tf.errors.InvalidArgumentError, ValueError):
              report["tf_log"] += traceback.format_exc()
              return None, report

          sess = tf.Session()
          try:
            baseline_inputs, baseline_outputs = (
                make_test_inputs(param_dict_real, sess, inputs, outputs))
          except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
                  ValueError):
            report["tf_log"] += traceback.format_exc()
            return None, report
          report["converter"] = report_lib.FAILED
          report["tf"] = report_lib.SUCCESS
          # Convert graph to toco
          input_tensors = [(input_tensor.name.split(":")[0], input_tensor.shape,
                            input_tensor.dtype) for input_tensor in inputs]
          output_tensors = [_normalize_output_name(out.name) for out in outputs]
          # pylint: disable=g-long-ternary
          graph_def = freeze_graph(
              sess,
              tf.global_variables() + inputs +
              outputs) if use_frozen_graph else sess.graph_def

        if "split_tflite_lstm_inputs" in param_dict_real:
          extra_toco_options.split_tflite_lstm_inputs = param_dict_real[
              "split_tflite_lstm_inputs"]
        tflite_model_binary, toco_log = options.tflite_convert_function(
            options,
            graph_def,
            input_tensors,
            output_tensors,
            extra_toco_options=extra_toco_options,
            test_params=param_dict_real)
        report["converter"] = (
            report_lib.SUCCESS
            if tflite_model_binary is not None else report_lib.FAILED)
        report["converter_log"] = toco_log

        if options.save_graphdefs:
          zipinfo = zipfile.ZipInfo(zip_path_label + ".pbtxt")
          archive.writestr(zipinfo, text_format.MessageToString(graph_def),
                           zipfile.ZIP_DEFLATED)

        if tflite_model_binary:
          if options.make_edgetpu_tests:
            # Set proper min max values according to input dtype.
            baseline_inputs, baseline_outputs = generate_inputs_outputs(
                tflite_model_binary, min_value=0, max_value=255)
          zipinfo = zipfile.ZipInfo(zip_path_label + ".bin")
          archive.writestr(zipinfo, tflite_model_binary, zipfile.ZIP_DEFLATED)
          example = {"inputs": baseline_inputs, "outputs": baseline_outputs}

          example_fp = StringIO()
          write_examples(example_fp, [example])
          zipinfo = zipfile.ZipInfo(zip_path_label + ".inputs")
          archive.writestr(zipinfo, example_fp.getvalue(), zipfile.ZIP_DEFLATED)

          example_fp2 = StringIO()
          write_test_cases(example_fp2, zip_path_label + ".bin", [example])
          zipinfo = zipfile.ZipInfo(zip_path_label + "_tests.txt")
          archive.writestr(zipinfo, example_fp2.getvalue(),
                           zipfile.ZIP_DEFLATED)

          zip_manifest_label = zip_path_label + " " + label
          if zip_path_label == label:
            zip_manifest_label = zip_path_label

          zip_manifest.append(zip_manifest_label + "\n")

        return tflite_model_binary, report
def zipdir(dirPath=None, zipFilePath=None, includeDirInZip=False):
    """
    Attribution:  I wish I could remember where I found this on the
    web.  To the unknown sharer of knowledge - thank you.

    Create a zip archive from a directory.

    Note that this function is designed to put files in the zip archive with
    either no parent directory or just one parent directory, so it will trim any
    leading directories in the filesystem paths and not include them inside the
    zip archive paths. This is generally the case when you want to just take a
    directory and make it into a zip file that can be extracted in different
    locations.

    Keyword arguments:

    dirPath -- string path to the directory to archive. This is the only
    required argument. It can be absolute or relative, but only one or zero
    leading directories will be included in the zip archive.

    zipFilePath -- string path to the output zip file. This can be an absolute
    or relative path. If the zip file already exists, it will be updated. If
    not, it will be created. If you want to replace it from scratch, delete it
    prior to calling this function. (default is computed as dirPath + ".zip")

    includeDirInZip -- boolean indicating whether the top level directory should
    be included in the archive or omitted. (default True)

"""
    if not zipFilePath:
        zipFilePath = dirPath + ".zip"
    if not os.path.isdir(dirPath):
        raise OSError("dirPath argument must point to a directory. "
                      "'%s' does not." % dirPath)
    parentDir, dirToZip = os.path.split(dirPath)

    #Little nested function to prepare the proper archive path
    def trimPath(path):
        archivePath = path.replace(parentDir, "", 1)
        if parentDir:
            archivePath = archivePath.replace(os.path.sep, "", 1)
        if not includeDirInZip:
            archivePath = archivePath.replace(dirToZip + os.path.sep, "", 1)
        return os.path.normcase(archivePath)

    outFile = zipfile.ZipFile(zipFilePath,
                              "w",
                              compression=zipfile.ZIP_DEFLATED)
    for (archiveDirPath, dirNames, fileNames) in os.walk(dirPath):
        for fileName in fileNames:
            filePath = os.path.join(archiveDirPath, fileName)
            outFile.write(filePath, trimPath(filePath))
        #Make sure we get empty directories as well
        if not fileNames and not dirNames:
            zipInfo = zipfile.ZipInfo(trimPath(archiveDirPath) + "/")
            #some web sites suggest doing
            #zipInfo.external_attr = 16
            #or
            #zipInfo.external_attr = 48
            #Here to allow for inserting an empty directory.  Still TBD/TODO.
            outFile.writestr(zipInfo, "")
    outFile.close()
Beispiel #12
0
#!/usr/bin/env python

import sys
import zipfile

date_options = [
  (1990, 4, 19, 11, 0, 0),
  (1984, 2, 5, 10, 0, 0),
]

zip = zipfile.ZipFile(sys.argv[1], 'w')
zip.writestr(zipfile.ZipInfo('file.txt', date_options[int(sys.argv[2])]), 'data')
zip.close()
Beispiel #13
0
    def _write_workflow(workflow: Dict[Text, Any], package_path: Text = None):
        """Dump pipeline workflow into yaml spec and write out in the format specified by the user.

    Args:
      workflow: Workflow spec of the pipline, dict.
      package_path: file path to be written. If not specified, a yaml_text string
        will be returned.
    """
        # yaml_text = dump_yaml(workflow)
        yaml.Dumper.ignore_aliases = lambda *args: True
        yaml_text = yaml.dump(workflow,
                              default_flow_style=False)  # Tekton change

        # Use regex to replace all the Argo variables to Tekton variables. For variables that are unique to Argo,
        # we raise an Error to alert users about the unsupported variables. Here is the list of Argo variables.
        # https://github.com/argoproj/argo/blob/master/docs/variables.md
        # Since Argo variables can be used in anywhere in the yaml, we need to dump and then parse the whole yaml
        # using regular expression.
        tekton_var_regex_rules = [{
            'argo_rule': '{{inputs.parameters.([^ \t\n.:,;{}]+)}}',
            'tekton_rule': '$(inputs.params.\g<1>)'
        }, {
            'argo_rule': '{{outputs.parameters.([^ \t\n.:,;{}]+).path}}',
            'tekton_rule': '$(results.\g<1>.path)'
        }, {
            'argo_rule': '{{workflow.uid}}',
            'tekton_rule': '$(context.pipelineRun.uid)'
        }, {
            'argo_rule': '{{workflow.name}}',
            'tekton_rule': '$(context.pipelineRun.name)'
        }, {
            'argo_rule':
            '{{workflow.namespace}}',
            'tekton_rule':
            '$(context.pipelineRun.namespace)'
        }, {
            'argo_rule': '{{workflow.parameters.([^ \t\n.:,;{}]+)}}',
            'tekton_rule': '$(params.\g<1>)'
        }]
        for regex_rule in tekton_var_regex_rules:
            yaml_text = re.sub(regex_rule['argo_rule'],
                               regex_rule['tekton_rule'], yaml_text)

        unsupported_vars = re.findall(r"{{[^ \t\n.:,;{}]+\.[^ \t\n:,;{}]+}}",
                                      yaml_text)
        if unsupported_vars:
            raise ValueError(
                'These Argo variables are not supported in Tekton Pipeline: %s'
                % ", ".join(str(v) for v in set(unsupported_vars)))

        if '{{pipelineparam' in yaml_text:
            raise RuntimeError(
                'Internal compiler error: Found unresolved PipelineParam. '
                'Please create a new issue at https://github.com/kubeflow/kfp-tekton/issues '
                'attaching the pipeline DSL code and the pipeline YAML.')

        if package_path is None:
            return yaml_text

        if package_path.endswith('.tar.gz') or package_path.endswith('.tgz'):
            from contextlib import closing
            from io import BytesIO
            with tarfile.open(package_path, "w:gz") as tar:
                with closing(BytesIO(yaml_text.encode())) as yaml_file:
                    tarinfo = tarfile.TarInfo('pipeline.yaml')
                    tarinfo.size = len(yaml_file.getvalue())
                    tar.addfile(tarinfo, fileobj=yaml_file)
        elif package_path.endswith('.zip'):
            with zipfile.ZipFile(package_path, "w") as zip:
                zipinfo = zipfile.ZipInfo('pipeline.yaml')
                zipinfo.compress_type = zipfile.ZIP_DEFLATED
                zip.writestr(zipinfo, yaml_text)
        elif package_path.endswith('.yaml') or package_path.endswith('.yml'):
            with open(package_path, 'w') as yaml_file:
                yaml_file.write(yaml_text)
        else:
            raise ValueError(
                'The output path %s should end with one of the following formats: '
                '[.tar.gz, .tgz, .zip, .yaml, .yml]' % package_path)
 def _zip_writestr(self,file_name,data):
     info = zipfile.ZipInfo(file_name)
     info.compress_type = zipfile.ZIP_DEFLATED
     info.external_attr = 2175008768
     self.archive.writestr(info,data)
     return True
Beispiel #15
0
        if not line or line[0] == '#': continue
        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
        if not m:
          print "failed to parse password file: ", line
        else:
          result[m.group(2)] = m.group(1)
      f.close()
    except IOError, e:
      if e.errno != errno.ENOENT:
        print "error reading password file: ", str(e)
    return result


def ZipWriteStr(zip, filename, data, perms=0644, compression=None):
  # use a fixed timestamp so the output is repeatable.
  zinfo = zipfile.ZipInfo(filename=filename,
                          date_time=(2009, 1, 1, 0, 0, 0))
  if compression is None:
    zinfo.compress_type = zip.compression
  else:
    zinfo.compress_type = compression
  zinfo.external_attr = perms << 16
  zip.writestr(zinfo, data)


class DeviceSpecificParams(object):
  module = None
  def __init__(self, **kwargs):
    """Keyword arguments to the constructor become attributes of this
    object, which is passed to all functions in the device-specific
    module."""
    for k, v in kwargs.iteritems():
Beispiel #16
0
 def test_unless_you_make_your_own_zipinfo(self):
     info = zipfile.ZipInfo(self.make_file())
     archive = self.make_open_archive()
     archive.add_contents(info, 'foo.txt')
     archive.close()
     self.assertRaises(AssertionError, self.check_world_readable, archive)
Beispiel #17
0
 def test_writestr_extended_local_header_issue1202(self):
     with zipfile.ZipFile(TESTFN2, 'w') as orig_zip:
         for data in 'abcdefghijklmnop':
             zinfo = zipfile.ZipInfo(data)
             zinfo.flag_bits |= 0x08  # Include an extended local header.
             orig_zip.writestr(zinfo, data)
Beispiel #18
0
def create_zipinfo(filename,
                   mtime=None,
                   dir=False,
                   executable=False,
                   symlink=False,
                   comment=None):
    """Create a instance of `ZipInfo`.

    :param filename: file name of the entry
    :param mtime: modified time of the entry
    :param dir: if `True`, the entry is a directory
    :param executable: if `True`, the entry is a executable file
    :param symlink: if `True`, the entry is a symbolic link
    :param comment: comment of the entry
    """
    zipinfo = zipfile.ZipInfo()

    # The general purpose bit flag 11 is used to denote
    # UTF-8 encoding for path and comment. Only set it for
    # non-ascii files for increased portability.
    # See http://www.pkware.com/documents/casestudies/APPNOTE.TXT
    if any(ord(c) >= 128 for c in filename):
        zipinfo.flag_bits |= 0x0800
    zipinfo.filename = filename.encode('utf-8')

    if mtime is not None:
        mtime = to_datetime(mtime, utc)
        zipinfo.date_time = mtime.utctimetuple()[:6]
        # The "extended-timestamp" extra field is used for the
        # modified time of the entry in unix time. It avoids
        # extracting wrong modified time if non-GMT timezone.
        # See http://www.opensource.apple.com/source/zip/zip-6/unzip/unzip
        #     /proginfo/extra.fld
        zipinfo.extra += struct.pack(
            '<hhBl',
            0x5455,  # extended-timestamp extra block type
            1 + 4,  # size of this block
            1,  # modification time is present
            to_timestamp(mtime))  # time of last modification

    # external_attr is 4 bytes in size. The high order two
    # bytes represent UNIX permission and file type bits,
    # while the low order two contain MS-DOS FAT file
    # attributes, most notably bit 4 marking directories.
    if dir:
        if not zipinfo.filename.endswith('/'):
            zipinfo.filename += '/'
        zipinfo.compress_type = zipfile.ZIP_STORED
        zipinfo.external_attr = 0o40755 << 16  # permissions drwxr-xr-x
        zipinfo.external_attr |= 0x10  # MS-DOS directory flag
    else:
        zipinfo.compress_type = zipfile.ZIP_DEFLATED
        zipinfo.external_attr = 0o644 << 16  # permissions -r-wr--r--
        if executable:
            zipinfo.external_attr |= 0o755 << 16  # -rwxr-xr-x
        if symlink:
            zipinfo.compress_type = zipfile.ZIP_STORED
            zipinfo.external_attr |= 0o120000 << 16  # symlink file type

    if comment:
        zipinfo.comment = comment.encode('utf-8')

    return zipinfo
Beispiel #19
0
 def add_file(self, io, aname):
     if aname not in self.__zf.namelist():
         self.__zf.writestr(zipfile.ZipInfo(aname), io.getvalue(),
                            zipfile.ZIP_DEFLATED)
         io.seek(0)
     return self
Beispiel #20
0
def download_plot(two_way_streets=False):
    loom_db = flask.current_app.loom_db

    if flask.request.method == 'POST':
        kwargs = {
            'saved_data': None,
            'n_processes': None,
            'search_radius': None,
            'plot_two_way_streets': None
        }
        kwargs_string_valued = {
            'process_uuid': None,
            'data_name': None,
            'progress_log': None
        }
        set_kwargs_from_request(
            kwargs,
            kwargs_string_valued,
            flask.request.form,
        )
    else:
        raise RuntimeError

    n_processes = kwargs['n_processes']
    search_radius = kwargs['search_radius']
    process_uuid = kwargs['process_uuid']
    data_name = kwargs['data_name']
    saved_data = kwargs['saved_data']

    result_queue = loom_db.get_result_queue(
        process_uuid,
        create=False,
    )
    if result_queue is not None:
        spectral_network_data = result_queue.get()
    else:
        full_data_dir = get_full_data_dir(
            process_uuid=process_uuid,
            data_name=data_name,
            saved_data=saved_data,
        )
        spectral_network_data = SpectralNetworkData(data_dir=full_data_dir)
    spectral_network_data.reset_z_rotation()

    data = {}
    if two_way_streets is False:
        plot_file_name = 'loom_plot_{}.html'.format(process_uuid)
        zip_file_prefix = plot_file_name
        data[plot_file_name] = render_plot_template(spectral_network_data,
                                                    download=True,
                                                    **kwargs)

    else:
        plot_range = eval(flask.request.form['plot_range'])
        zip_file_prefix = 'loom_streets_{}'.format(process_uuid)
        spectral_network_data.find_two_way_streets(
            n_processes=n_processes,
            search_radius=search_radius,
            improve=True,
            #replace=False,
        )
        #        for i, trees in enumerate(soliton_tree_data):
        #            for j, tree in enumerate(trees):
        #                soliton_tree_plot = SolitonTreePlot(
        #                    plot_range=plot_range,
        #                )
        #                # Make a plot title.
        #                Z = tree.Z
        #                title = (
        #                    'SN #{}, tree #{}, '.format(i, j) +
        #                    'Z = ({:.6}) + ({:.6})i'.format(Z.real, Z.imag)
        #                )
        #                soliton_tree_plot.draw(
        #                    title=title,
        #                    sw_data=spectral_network_data.sw_data,
        #                    soliton_tree=soliton_tree_data[i][j],
        #                )
        #                fp = BytesIO()
        #                soliton_tree_plot.figure.savefig(fp, format='pdf')
        #                fp.seek(0)
        #                file_name = '{}_{}.pdf'.format(i, j)
        #                data[file_name] = fp.read()
        for i, tree in enumerate(spectral_network_data.soliton_trees):
            soliton_tree_plot = SolitonTreePlot(plot_range=plot_range, )
            # Make a plot title.
            Z = tree.Z()
            title = (
                'Soliton tree #{} @ theta = {:.4}, '.format(i, tree.phase) +
                'Z = ({:.6}) + ({:.6})i'.format(Z.real, Z.imag))
            soliton_tree_plot.draw(
                title=title,
                sw_data=spectral_network_data.sw_data,
                soliton_tree=tree,
            )
            fp = BytesIO()
            soliton_tree_plot.figure.savefig(fp, format='pdf')
            fp.seek(0)
            file_name = 'soliton_tree_{}.pdf'.format(i)
            data[file_name] = fp.read()

    zip_fp = BytesIO()
    with zipfile.ZipFile(zip_fp, 'w') as zfp:
        for file_name, data_str in data.iteritems():
            zip_info = zipfile.ZipInfo(file_name)
            zip_info.date_time = time.localtime(time.time())[:6]
            zip_info.compress_type = zipfile.ZIP_DEFLATED
            zip_info.external_attr = 040664 << 16L
            zfp.writestr(zip_info, data_str)
    zip_fp.seek(0)

    if result_queue is not None:
        # Put back the data in the queue for a future use.
        result_queue.put(spectral_network_data)

    return flask.send_file(
        zip_fp,
        attachment_filename='{}.zip'.format(zip_file_prefix),
        as_attachment=True,
    )
Beispiel #21
0
    def handle_request(filepath=''):
        """Handle an HTTP request (HEAD, GET, POST).
        """
        # replace SCRIPT_NAME with the custom if set
        if config['app']['base']:
            request.environ['SCRIPT_NAME'] = config['app']['base']

        query = request.values

        action = query.get('a', default='view')
        action = query.get('action', default=action)

        format = query.get('f')
        format = query.get('format', default=format)

        # handle authorization
        auth_result = handle_authorization(action=action, format=format)
        if auth_result is not None:
            return auth_result

        # determine primary variables
        #
        # filepath: the URL path below app base (not percent encoded)
        # localpath: the file system path corresponding to filepath
        # localtargetpath: localpath with symbolic link resolved
        # mimetype: the mimetype from localtargetpath
        # archivefile: the file system path of the ZIP archive file, or None
        # subarchivepath: the URL path below archivefile (not percent encoded)
        localpath = os.path.abspath(
            os.path.join(runtime['root'], filepath.strip('/\\')))
        localtargetpath = os.path.realpath(localpath)
        archivefile, subarchivepath = get_archive_path(filepath, localpath)
        mimetype, _ = mimetypes.guess_type(localtargetpath)

        # handle action
        if action == 'static':
            if format:
                return http_error(400, "Action not supported.", format=format)

            for i in runtime['statics']:
                f = os.path.join(i, filepath)
                if os.path.lexists(f):
                    return static_file(filepath, root=i)
            else:
                return http_error(404)

        elif action == 'source':
            if format:
                return http_error(400, "Action not supported.", format=format)

            if archivefile:
                response = handle_subarchive_path(
                    os.path.realpath(archivefile),
                    subarchivepath,
                    mimetype,
                    list_directory=False)
            else:
                response = static_file(filepath,
                                       root=runtime['root'],
                                       mimetype=mimetype)

            # show as inline plain text
            # @TODO: Chromium (80) seems to ignore header mimetype for certain types
            #        like image and zip
            encoding = query.get('e', 'utf-8')
            encoding = query.get('encoding', default=encoding)
            response.headers.set('Content-Type',
                                 'text/plain; charset=' + quote(encoding))
            response.headers.set('Content-Disposition', 'inline')

            return response

        elif action in ('exec', 'browse'):
            if not is_local_access():
                return http_error(400,
                                  "Command can only run on local device.",
                                  format=format)

            if not os.path.lexists(localpath):
                return http_error(404, "File does not exist.", format=format)

            if action == 'browse':
                util.view_in_explorer(localpath)

            elif action == 'exec':
                util.launch(localpath)

            if format:
                return http_response('Command run successfully.',
                                     format=format)

            return http_response(status=204)

        elif action == 'token':
            return http_response(token_handler.acquire(), format=format)

        elif action == 'list':
            if not format:
                return http_error(400, "Action not supported.", format=format)

            if os.path.isdir(localpath):
                recursive = query.get('recursive', type=bool)
                return handle_directory_listing(localtargetpath,
                                                recursive=recursive,
                                                format=format)

            return http_error(400, "This is not a directory.", format=format)

        elif action == 'config':
            if not format:
                return http_error(400, "Action not supported.", format=format)

            data = config.dump_object()

            # filter values for better security
            data = {k: v for k, v in data.items() if k in ('app', 'book')}
            data['app'] = {
                k: v
                for k, v in data['app'].items() if k in ('name', 'theme')
            }

            # add and rewrite values for client to better know the server
            data['app']['base'] = request.script_root
            data['app']['is_local'] = is_local_access()
            data['VERSION'] = __version__
            data['WSB_DIR'] = WSB_DIR
            data['WSB_LOCAL_CONFIG'] = WSB_LOCAL_CONFIG

            return http_response(data, format=format)

        elif action == 'edit':
            if format:
                return http_error(400, "Action not supported.", format=format)

            if os.path.lexists(localpath) and not os.path.isfile(localpath):
                return http_error(400, "Found a non-file here.", format=format)

            if archivefile:
                with zipfile.ZipFile(archivefile, 'r') as zip:
                    try:
                        info = zip.getinfo(subarchivepath)
                    except:
                        body = b''
                    else:
                        body = zip.read(info)
            else:
                try:
                    with open(localpath, 'rb') as f:
                        body = f.read()
                        f.close()
                except FileNotFoundError:
                    body = b''

            encoding = query.get('e')
            encoding = query.get('encoding', default=encoding)

            try:
                body = body.decode(encoding or 'UTF-8')
            except (LookupError, UnicodeDecodeError):
                encoding = 'ISO-8859-1'
                body = body.decode(encoding)

            body = render_template(
                'edit.html',
                sitename=runtime['name'],
                is_local=is_local_access(),
                base=request.script_root,
                path=request.path,
                body=body,
                encoding=encoding,
            )

            return http_response(body, format=format)

        elif action == 'editx':
            if format:
                return http_error(400, "Action not supported.", format=format)

            if os.path.lexists(localpath) and not os.path.isfile(localpath):
                return http_error(400, "Found a non-file here.", format=format)

            if not mimetype in ("text/html", "application/xhtml+xml"):
                return http_error(400,
                                  "This is not an HTML file.",
                                  format=format)

            if archivefile:
                with zipfile.ZipFile(archivefile, 'r') as zip:
                    try:
                        info = zip.getinfo(subarchivepath)
                    except:
                        return http_error(404,
                                          "File does not exist.",
                                          format=format)
            else:
                if not os.path.lexists(localpath):
                    return http_error(404,
                                      "File does not exist.",
                                      format=format)

            body = render_template(
                'editx.html',
                sitename=runtime['name'],
                is_local=is_local_access(),
                base=request.script_root,
                path=request.path,
            )

            return http_response(body, format=format)

        elif action in ('lock', 'unlock', 'mkdir', 'save', 'delete', 'move',
                        'copy'):
            if request.method != 'POST':
                headers = {
                    'Allow': 'POST',
                }
                return http_error(405,
                                  'Method "{}" not allowed.'.format(
                                      request.method),
                                  format=format,
                                  headers=headers)

            # validate and revoke token
            token = query.get('token') or ''

            if not token_handler.validate(token):
                return http_error(400, 'Invalid access token.', format=format)

            token_handler.delete(token)

            # validate localpath
            if action not in ('lock', 'unlock'):
                if os.path.abspath(localpath) == runtime['root']:
                    return http_error(403,
                                      "Unable to operate the root directory.",
                                      format=format)

            # validate targetpath
            if action in ('lock', 'unlock'):
                name = query.get('name')
                if name is None:
                    return http_error(400,
                                      "Lock name is not specified.",
                                      format=format)

                targetpath = os.path.join(runtime['locks'], name)
                if not targetpath.startswith(os.path.join(
                        runtime['locks'], '')):
                    return http_error(400,
                                      'Invalid lock name "{}".'.format(name),
                                      format=format)

            # handle action

            # action lock
            # name: name of the lock file.
            # chkt: recheck until the lock file not exist or fail out when time out.
            # chks: how long to treat the lock file as stale.
            if action == 'lock':
                check_stale = query.get('chks', 300, type=int)
                check_timeout = query.get('chkt', 5, type=int)
                check_expire = time.time() + check_timeout
                check_delta = min(check_timeout, 0.1)

                while True:
                    try:
                        os.makedirs(targetpath)
                    except FileExistsError:
                        t = time.time()

                        if t >= check_expire or not os.path.isdir(targetpath):
                            return http_error(
                                500,
                                'Unable to acquire lock "{}".'.format(name),
                                format=format)

                        try:
                            lock_expire = os.stat(
                                targetpath).st_mtime + check_stale
                        except FileNotFoundError:
                            # Lock removed by another process during the short interval.
                            # Try acquire again.
                            continue

                        if t >= lock_expire:
                            # Lock expired. Touch rather than remove and make for atomicity.
                            try:
                                Path(targetpath).touch()
                            except:
                                traceback.print_exc()
                                return http_error(
                                    500,
                                    'Unable to regenerate stale lock "{}".'.
                                    format(name),
                                    format=format)
                            else:
                                break

                        time.sleep(check_delta)
                    except:
                        traceback.print_exc()
                        return http_error(
                            500,
                            'Unable to create lock "{}".'.format(name),
                            format=format)
                    else:
                        break

            elif action == 'unlock':
                try:
                    os.rmdir(targetpath)
                except FileNotFoundError:
                    pass
                except:
                    traceback.print_exc()
                    return http_error(
                        500,
                        'Unable to remove lock "{}".'.format(name),
                        format=format)

            elif action == 'mkdir':
                if os.path.lexists(localpath) and not os.path.isdir(localpath):
                    return http_error(400,
                                      "Found a non-directory here.",
                                      format=format)

                if archivefile:
                    try:
                        zip = zipfile.ZipFile(archivefile, 'a')
                        subarchivepath = subarchivepath + '/'

                        try:
                            info = zip.getinfo(subarchivepath)
                        except KeyError:
                            # subarchivepath does not exist
                            info = zipfile.ZipInfo(subarchivepath,
                                                   time.localtime())
                            zip.writestr(info,
                                         b'',
                                         compress_type=zipfile.ZIP_STORED)
                    except:
                        traceback.print_exc()
                        return http_error(500,
                                          "Unable to write to this ZIP file.",
                                          format=format)

                else:
                    try:
                        os.makedirs(localpath, exist_ok=True)
                    except OSError:
                        traceback.print_exc()
                        return http_error(500,
                                          "Unable to create a directory here.",
                                          format=format)

            elif action == 'save':
                if os.path.lexists(
                        localpath) and not os.path.isfile(localpath):
                    return http_error(400,
                                      "Found a non-file here.",
                                      format=format)

                if archivefile:
                    try:
                        zip0 = zip = zipfile.ZipFile(archivefile, 'a')

                        try:
                            # if subarchivepath exists, open a new zip file for writing.
                            try:
                                info = zip.getinfo(subarchivepath)
                            except KeyError:
                                info = zipfile.ZipInfo(subarchivepath,
                                                       time.localtime())
                            else:
                                info.date_time = time.localtime()
                                temp_path = archivefile + '.' + str(time_ns())
                                zip = zipfile.ZipFile(temp_path, 'w')

                            try:
                                # write to the zip file
                                file = request.files.get('upload')
                                if file is not None:
                                    with zip.open(info, 'w',
                                                  force_zip64=True) as fp:
                                        stream = file.stream
                                        while True:
                                            s = stream.read(8192)
                                            if not s: break
                                            fp.write(s)
                                        fp.close()
                                else:
                                    bytes = query.get('text',
                                                      '').encode('ISO-8859-1')
                                    zip.writestr(
                                        info,
                                        bytes,
                                        compress_type=zipfile.ZIP_DEFLATED,
                                        compresslevel=9)

                                # copy zip0 content to zip
                                if zip is not zip0:
                                    for info in zip0.infolist():
                                        if info.filename == subarchivepath:
                                            continue
                                        zip.writestr(
                                            info,
                                            zip0.read(info),
                                            compress_type=info.compress_type,
                                            compresslevel=None
                                            if info.compress_type
                                            == zipfile.ZIP_STORED else 9)
                            except:
                                # remove the generated zip file if writing fails
                                if zip is not zip0:
                                    zip.close()
                                    os.remove(zip.filename)

                                raise
                            else:
                                # replace zip0 with the generated zip file
                                if zip is not zip0:
                                    zip0.close()
                                    zip.close()

                                    temp_path = archivefile + '.' + str(
                                        time_ns() + 1)
                                    os.rename(archivefile, temp_path)
                                    os.rename(zip.filename, archivefile)
                                    os.remove(temp_path)
                        finally:
                            zip0.close()
                    except:
                        traceback.print_exc()
                        return http_error(500,
                                          "Unable to write to this ZIP file.",
                                          format=format)

                else:
                    try:
                        os.makedirs(os.path.dirname(localpath), exist_ok=True)
                    except:
                        traceback.print_exc()
                        return http_error(500,
                                          "Unable to write to this path.",
                                          format=format)

                    try:
                        file = request.files.get('upload')
                        if file is not None:
                            file.save(localpath)
                        else:
                            bytes = query.get('text', '').encode('ISO-8859-1')
                            with open(localpath, 'wb') as f:
                                f.write(bytes)
                                f.close()
                    except:
                        traceback.print_exc()
                        return http_error(500,
                                          "Unable to write to this file.",
                                          format=format)

            elif action == 'delete':
                if archivefile:
                    try:
                        zip0 = zipfile.ZipFile(archivefile, 'r')
                        temp_path = archivefile + '.' + str(time_ns())
                        zip = zipfile.ZipFile(temp_path, 'w')

                        try:
                            deleted = False
                            for info in zip0.infolist():
                                if (info.filename == subarchivepath
                                        or info.filename.startswith(
                                            subarchivepath + '/')):
                                    deleted = True
                                    continue

                                zip.writestr(
                                    info,
                                    zip0.read(info),
                                    compress_type=info.compress_type,
                                    compresslevel=None if info.compress_type
                                    == zipfile.ZIP_STORED else 9)
                        except:
                            # remove the generated zip file if writing fails
                            zip.close()
                            os.remove(zip.filename)

                            raise
                        else:
                            zip0.close()
                            zip.close()

                            if not deleted:
                                os.remove(zip.filename)
                                return http_error(
                                    404,
                                    "Entry does not exist in this ZIP file.",
                                    format=format)

                            # replace zip0 with the generated zip file
                            temp_path = archivefile + '.' + str(time_ns() + 1)
                            os.rename(archivefile, temp_path)
                            os.rename(zip.filename, archivefile)
                            os.remove(temp_path)
                    except:
                        traceback.print_exc()
                        return http_error(500,
                                          "Unable to write to this ZIP file.",
                                          format=format)

                else:
                    if not os.path.lexists(localpath):
                        return http_error(404,
                                          "File does not exist.",
                                          format=format)

                    if os.path.islink(localpath):
                        try:
                            os.remove(localpath)
                        except:
                            traceback.print_exc()
                            return http_error(500,
                                              "Unable to delete this link.",
                                              format=format)
                    elif os.path.isfile(localpath):
                        try:
                            os.remove(localpath)
                        except:
                            traceback.print_exc()
                            return http_error(500,
                                              "Unable to delete this file.",
                                              format=format)
                    elif os.path.isdir(localpath):
                        try:
                            try:
                                # try rmdir for a possible windows directory junction,
                                # which is not detected by os.path.islink
                                os.rmdir(localpath)
                            except OSError:
                                # directory not empty
                                shutil.rmtree(localpath)
                        except:
                            traceback.print_exc()
                            return http_error(
                                500,
                                "Unable to delete this directory.",
                                format=format)

            elif action == 'move':
                if archivefile:
                    return http_error(400,
                                      "File is inside an archive file.",
                                      format=format)

                if not os.path.lexists(localpath):
                    return http_error(404,
                                      "File does not exist.",
                                      format=format)

                target = query.get('target')

                if target is None:
                    return http_error(400,
                                      'Target is not specified.',
                                      format=format)

                targetpath = os.path.normpath(
                    os.path.join(runtime['root'], target.strip('/')))

                if not targetpath.startswith(os.path.join(runtime['root'],
                                                          '')):
                    return http_error(
                        403,
                        "Unable to operate beyond the root directory.",
                        format=format)

                if os.path.lexists(targetpath):
                    return http_error(
                        400,
                        'Found something at target "{}".'.format(target),
                        format=format)

                ta, tsa = get_archive_path(target, targetpath)
                if ta:
                    return http_error(400,
                                      "Move target is inside an archive file.",
                                      format=format)

                os.makedirs(os.path.dirname(targetpath), exist_ok=True)

                try:
                    os.rename(localpath, targetpath)
                except:
                    traceback.print_exc()
                    return http_error(
                        500,
                        'Unable to move to target "{}".'.format(target),
                        format=format)

            elif action == 'copy':
                if archivefile:
                    return http_error(400,
                                      "File is inside an archive file.",
                                      format=format)

                if not os.path.lexists(localpath):
                    return http_error(404,
                                      "File does not exist.",
                                      format=format)

                target = query.get('target')

                if target is None:
                    return http_error(400,
                                      'Target is not specified.',
                                      format=format)

                targetpath = os.path.normpath(
                    os.path.join(runtime['root'], target.strip('/')))

                if not targetpath.startswith(os.path.join(runtime['root'],
                                                          '')):
                    return http_error(
                        403,
                        "Unable to operate beyond the root directory.",
                        format=format)

                if os.path.lexists(targetpath):
                    return http_error(
                        400,
                        'Found something at target "{}".'.format(target),
                        format=format)

                ta, tsa = get_archive_path(target, targetpath)
                if ta:
                    return http_error(400,
                                      "Copy target is inside an archive file.",
                                      format=format)

                os.makedirs(os.path.dirname(targetpath), exist_ok=True)

                try:
                    try:
                        shutil.copytree(localpath, targetpath)
                    except NotADirectoryError:
                        shutil.copy2(localpath, targetpath)
                except:
                    traceback.print_exc()
                    return http_error(
                        500,
                        'Unable to copy to target "{}".'.format(target),
                        format=format)

            if format:
                return http_response('Command run successfully.',
                                     format=format)

            return http_response(status=204)

        # "view" or undefined actions
        elif action == 'view':
            # show file information for other output formats
            if format:
                info = util.file_info(localpath)
                data = {
                    'name': info.name,
                    'type': info.type,
                    'size': info.size,
                    'last_modified': info.last_modified,
                    'mime': mimetype,
                }
                return http_response(data, format=format)

            # handle directory
            if os.path.isdir(localpath):
                return handle_directory_listing(localtargetpath)

            # handle file
            elif os.path.isfile(localpath):
                # view archive file
                if mimetype in ("application/html+zip", "application/x-maff"):
                    return handle_archive_viewing(localtargetpath, mimetype)

                # view markdown
                if mimetype == "text/markdown":
                    return handle_markdown_output(filepath, localtargetpath)

                # convert meta refresh to 302 redirect
                if localtargetpath.lower().endswith('.htm'):
                    target = util.parse_meta_refresh(localtargetpath).target

                    if target is not None:
                        # Keep several chars as javascript encodeURI do,
                        # plus "%" as target may have already been escaped.
                        new_url = urljoin(
                            request.url, quote(target,
                                               ";,/?:@&=+$-_.!~*'()#%"))
                        return redirect(new_url)

                # show static file for other cases
                response = static_file(filepath,
                                       root=runtime['root'],
                                       mimetype=mimetype)

            # handle sub-archive path
            elif archivefile:
                response = handle_subarchive_path(
                    os.path.realpath(archivefile), subarchivepath, mimetype)

            else:
                return http_error(404)

            # don't include charset
            m, p = parse_options_header(response.headers.get('Content-Type'))
            try:
                del p['charset']
            except KeyError:
                pass
            response.headers.set('Content-Type', dump_options_header(m, p))

            return response

        # unknown action
        else:
            return http_error(400, "Action not supported.", format=format)
Beispiel #22
0
 # Re-zip the result.
 resultOdtName = os.path.join(self.tempFolder, 'result.odt')
 try:
     resultOdt = zipfile.ZipFile(resultOdtName,'w', zipfile.ZIP_DEFLATED)
 except RuntimeError:
     resultOdt = zipfile.ZipFile(resultOdtName,'w')
 for dir, dirnames, filenames in os.walk(self.unzipFolder):
     for f in filenames:
         folderName = dir[len(self.unzipFolder)+1:]
         resultOdt.write(os.path.join(dir, f),
                         os.path.join(folderName, f))
     if not dirnames and not filenames:
         # This is an empty leaf folder. We must create an entry in the
         # zip for him
         folderName = dir[len(self.unzipFolder):]
         zInfo = zipfile.ZipInfo("%s/" % folderName,time.localtime()[:6])
         zInfo.external_attr = 48
         resultOdt.writestr(zInfo, '')
 resultOdt.close()
 resultType = os.path.splitext(self.result)[1]
 try:
     if (resultType == '.odt') and not self.forceOoCall:
         # Simply move the ODT result to the result
         os.rename(resultOdtName, self.result)
     else:
         if resultType.startswith('.'): resultType = resultType[1:]
         if not resultType in FILE_TYPES.keys():
             raise PodError(BAD_RESULT_TYPE % (
                 self.result, FILE_TYPES.keys()))
         # Call OpenOffice to perform the conversion or document update
         output = self.callOpenOffice(resultOdtName, resultType)
Beispiel #23
0
 def write_file_str(zfile, fname, data):
     # work around a permission bug in the zipfile library:
     # http://bugs.python.org/issue3394
     zi = zipfile.ZipInfo(fname)
     zi.external_attr = int("100644", 8) << 16
     zfile.writestr(zi, data)
Beispiel #24
0
 def write(self, cname, data):
     info = zipfile.ZipInfo(cname + self.suffix, (1980, 1, 1, 0, 0, 0))
     self.zip.writestr(info, data)
     return 'zipfile'
Beispiel #25
0
def ZipWriteStr(zip, filename, data, perms=0644):
    # use a fixed timestamp so the output is repeatable.
    zinfo = zipfile.ZipInfo(filename=filename, date_time=(2009, 1, 1, 0, 0, 0))
    zinfo.compress_type = zip.compression
    zinfo.external_attr = perms << 16
    zip.writestr(zinfo, data)
Beispiel #26
0
def rewriteNotionZip(notionClient,
                     zipPath,
                     outputPath=".",
                     removeTopH1=False,
                     rewritePaths=True):
    """
  Takes a Notion .zip and prettifies the whole thing
  * Removes all Notion IDs from end of names, folders and files
  * Add icon to the start of folder/file name if Unicode character
  * For files had content in Notion, move them inside the folder, and set the
    name to something that will sort to the top
  * Fix links inside of files
  * Optionally remove titles at the tops of files

  @param {NotionClient} notionClient The NotionClient to use to query Notion with
  @param {string} zipPath The path to the Notion zip
  @param {string} [outputPath="."] Optional output path, otherwise will use cwd
  @param {boolean} [removeTopH1=False] To remove titles at the top of all the md files
  @param {boolean} [rewritePaths=True] To rewrite all the links and images in the Markdown files too
  @returns {string} Path to the output zip file
  """
    with tempfile.TemporaryDirectory() as tmpDir:
        # Unpack the whole thing first (probably faster than traversing it zipped, like with tar files)
        print(f"Extracting '{zipPath}' temporarily...")
        with zipfile.ZipFile(zipPath) as zf:
            zf.extractall(tmpDir)

        # Make new zip to begin filling
        zipName = os.path.basename(zipPath)
        newZipName = f"{zipName}.formatted"
        newZipPath = os.path.join(outputPath, newZipName)
        with zipfile.ZipFile(newZipPath, 'w', zipfile.ZIP_DEFLATED) as zf:

            #Traverse over the files, renaming, modifying, and rewriting back to the zip
            renamer = NotionExportRenamer(notionClient, tmpDir)
            for tmpWalkDir, dirs, files in os.walk(tmpDir):
                walkDir = os.path.relpath(tmpWalkDir, tmpDir)
                for name in files:
                    realPath = os.path.join(tmpWalkDir, name)
                    relPath = os.path.join(
                        "" if walkDir == "." else walkDir, name
                    )  # Prevent paths starting with .\\ which, when written to the tar, do annoying things
                    # print(f"Reading '{root}' '{name}'")

                    # Rewrite the current path and get the times from Notion
                    newPath, createdTime, lastEditedTime = renamer.renamePathAndTimesWithNotion(
                        relPath)

                    if os.path.splitext(name)[1] == ".md":
                        # Grab the data from the file if md file
                        with open(realPath, "r", encoding='utf-8') as f:
                            mdFileData = f.read()
                        mdFileData = mdFileRewrite(renamer,
                                                   relPath,
                                                   mdFileContents=mdFileData,
                                                   removeTopH1=removeTopH1,
                                                   rewritePaths=rewritePaths)

                        print(
                            f"Writing '{newPath}' with time '{lastEditedTime}' renamed from '{relPath}'"
                        )
                        zi = zipfile.ZipInfo(newPath,
                                             lastEditedTime.timetuple())
                        zf.writestr(zi, mdFileData)
                    else:
                        print(f"Writing '{newPath}'")
                        zf.write(realPath, newPath)
    return newZipPath
Beispiel #27
0
    def archive(self, build_dir):
        # type: (str) -> None
        """Saves archive to provided build_dir.

        Used for saving downloaded VCS requirements as part of `pip download`.
        """
        assert self.source_dir

        create_archive = True
        archive_name = '%s-%s.zip' % (self.name, self.metadata["version"])
        archive_path = os.path.join(build_dir, archive_name)

        if os.path.exists(archive_path):
            response = ask_path_exists(
                'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)bort ' %
                display_path(archive_path), ('i', 'w', 'b', 'a'))
            if response == 'i':
                create_archive = False
            elif response == 'w':
                logger.warning('Deleting %s', display_path(archive_path))
                os.remove(archive_path)
            elif response == 'b':
                dest_file = backup_dir(archive_path)
                logger.warning(
                    'Backing up %s to %s',
                    display_path(archive_path),
                    display_path(dest_file),
                )
                shutil.move(archive_path, dest_file)
            elif response == 'a':
                sys.exit(-1)

        if not create_archive:
            return

        zip_output = zipfile.ZipFile(
            archive_path,
            'w',
            zipfile.ZIP_DEFLATED,
            allowZip64=True,
        )
        with zip_output:
            dir = os.path.normcase(
                os.path.abspath(self.unpacked_source_directory))
            for dirpath, dirnames, filenames in os.walk(dir):
                if 'pip-egg-info' in dirnames:
                    dirnames.remove('pip-egg-info')
                for dirname in dirnames:
                    dir_arcname = self._get_archive_name(
                        dirname,
                        parentdir=dirpath,
                        rootdir=dir,
                    )
                    zipdir = zipfile.ZipInfo(dir_arcname + '/')
                    zipdir.external_attr = 0x1ED << 16  # 0o755
                    zip_output.writestr(zipdir, '')
                for filename in filenames:
                    if filename == PIP_DELETE_MARKER_FILENAME:
                        continue
                    file_arcname = self._get_archive_name(
                        filename,
                        parentdir=dirpath,
                        rootdir=dir,
                    )
                    filename = os.path.join(dirpath, filename)
                    zip_output.write(filename, file_arcname)

        logger.info('Saved %s', display_path(archive_path))
Beispiel #28
0
 def store_file(name, filepath):
     info = zipfile.ZipInfo(name)
     info.external_attr = os.stat(filepath).st_mode << 16L
     with open(filepath) as f:
         zf.writestr(info, f.read())
Beispiel #29
0
            def build_example(label, param_dict_real, zip_path_label):
                """Build the model with parameter values set in param_dict_real.

        Args:
          label: Label of the model
          param_dict_real: Parameter dictionary (arguments to the factories
            make_graph and make_test_inputs)
          zip_path_label: Filename in the zip

        Returns:
          (tflite_model_binary, report) where tflite_model_binary is the
          serialized flatbuffer as a string and report is a dictionary with
          keys `tflite_converter_log` (log of conversion), `tf_log` (log of tf
          conversion), `converter` (a string of success status of the
          conversion), `tf` (a string success status of the conversion).
        """

                np.random.seed(RANDOM_SEED)
                report = {
                    "tflite_converter": report_lib.NOTRUN,
                    "tf": report_lib.FAILED
                }

                # Build graph
                report["tf_log"] = ""
                report["tflite_converter_log"] = ""
                tf.reset_default_graph()

                with tf.Graph().as_default():
                    with tf.device("/cpu:0"):
                        try:
                            inputs, outputs = make_graph(param_dict_real)
                            inputs = [x for x in inputs if x is not None]
                        except (tf.errors.UnimplementedError,
                                tf.errors.InvalidArgumentError, ValueError):
                            report["tf_log"] += traceback.format_exc()
                            return None, report

                    sess = tf.Session()
                    try:
                        baseline_inputs, baseline_outputs = (make_test_inputs(
                            param_dict_real, sess, inputs, outputs))
                        baseline_inputs = [
                            x for x in baseline_inputs if x is not None
                        ]
                        # Converts baseline inputs/outputs to maps. The signature input and
                        # output names are set to be the same as the tensor names.
                        input_names = [
                            _normalize_input_name(x.name) for x in inputs
                        ]
                        output_names = [
                            _normalize_output_name(x.name) for x in outputs
                        ]
                        baseline_input_map = dict(
                            zip(input_names, baseline_inputs))
                        baseline_output_map = dict(
                            zip(output_names, baseline_outputs))
                    except (tf.errors.UnimplementedError,
                            tf.errors.InvalidArgumentError, ValueError):
                        report["tf_log"] += traceback.format_exc()
                        return None, report
                    report["tflite_converter"] = report_lib.FAILED
                    report["tf"] = report_lib.SUCCESS

                    # Builds a saved model with the default signature key.
                    input_names, tensor_info_inputs = _get_tensor_info(
                        inputs, "input_", _normalize_input_name)
                    output_tensors, tensor_info_outputs = _get_tensor_info(
                        outputs, "output_", _normalize_output_name)
                    input_tensors = [(name, t.shape, t.dtype)
                                     for name, t in zip(input_names, inputs)]

                    inference_signature = (
                        tf.saved_model.signature_def_utils.build_signature_def(
                            inputs=tensor_info_inputs,
                            outputs=tensor_info_outputs,
                            method_name="op_test"))
                    saved_model_dir = tempfile.mkdtemp("op_test")
                    saved_model_tags = [tf.saved_model.tag_constants.SERVING]
                    signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
                    builder = tf.saved_model.builder.SavedModelBuilder(
                        saved_model_dir)
                    builder.add_meta_graph_and_variables(
                        sess,
                        saved_model_tags,
                        signature_def_map={
                            signature_key: inference_signature,
                        },
                        strip_default_attrs=True)
                    builder.save(as_text=False)
                    # pylint: disable=g-long-ternary
                    graph_def = freeze_graph(
                        sess,
                        tf.global_variables() + inputs +
                        outputs) if use_frozen_graph else sess.graph_def

                if "split_tflite_lstm_inputs" in param_dict_real:
                    extra_convert_options.split_tflite_lstm_inputs = param_dict_real[
                        "split_tflite_lstm_inputs"]
                tflite_model_binary, converter_log = options.tflite_convert_function(
                    options,
                    saved_model_dir,
                    input_tensors,
                    output_tensors,
                    extra_convert_options=extra_convert_options,
                    test_params=param_dict_real)
                report["tflite_converter"] = (report_lib.SUCCESS if
                                              tflite_model_binary is not None
                                              else report_lib.FAILED)
                report["tflite_converter_log"] = converter_log

                if options.save_graphdefs:
                    zipinfo = zipfile.ZipInfo(zip_path_label + ".pbtxt")
                    archive.writestr(zipinfo,
                                     text_format.MessageToString(graph_def),
                                     zipfile.ZIP_DEFLATED)

                if tflite_model_binary:
                    if options.make_edgetpu_tests:
                        # Set proper min max values according to input dtype.
                        baseline_input_map, baseline_output_map = generate_inputs_outputs(
                            tflite_model_binary, min_value=0, max_value=255)
                    zipinfo = zipfile.ZipInfo(zip_path_label + ".bin")
                    archive.writestr(zipinfo, tflite_model_binary,
                                     zipfile.ZIP_DEFLATED)

                    example = {
                        "inputs": baseline_input_map,
                        "outputs": baseline_output_map
                    }

                    example_fp = StringIO()
                    write_examples(example_fp, [example])
                    zipinfo = zipfile.ZipInfo(zip_path_label + ".inputs")
                    archive.writestr(zipinfo, example_fp.getvalue(),
                                     zipfile.ZIP_DEFLATED)

                    example_fp2 = StringIO()
                    write_test_cases(example_fp2, zip_path_label + ".bin",
                                     [example])
                    zipinfo = zipfile.ZipInfo(zip_path_label + "_tests.txt")
                    archive.writestr(zipinfo, example_fp2.getvalue(),
                                     zipfile.ZIP_DEFLATED)

                    zip_manifest_label = zip_path_label + " " + label
                    if zip_path_label == label:
                        zip_manifest_label = zip_path_label

                    zip_manifest.append(zip_manifest_label + "\n")

                return tflite_model_binary, report
Beispiel #30
0
def backup(owner, repo, z, auth):
    # Escape owner and repo suitably for use in a URL.
    owner = urllib.parse.quote(owner, safe="")
    repo = urllib.parse.quote(repo, safe="")

    now = datetime.datetime.utcnow()
    z.writestr(
        "README", f"""\
Archive of the GitHub repository https://github.com/{owner}/{repo}/
made {now.strftime("%Y-%m-%d %H:%M:%S")}.
""")

    # https://docs.github.com/en/free-pro-team@latest/rest/reference/issues#list-repository-issues
    issues_url = urllib.parse.urlparse(BASE_URL)._replace(
        path=f"/repos/{owner}/{repo}/issues", ).geturl()
    for r in get_paginated(issues_url, MEDIATYPE_REACTIONS, auth, {
            "sort": "created",
            "direction": "asc"
    }):
        for issue in r.json():
            check_url_origin(BASE_URL, issue["url"])
            zi = zipfile.ZipInfo(f"issues/{issue['id']}.json",
                                 timestamp_to_zip_time(issue["created_at"]))
            get_to_zipinfo(issue["url"], zi, MEDIATYPE_REACTIONS, auth)

            # There's no API for getting all reactions in a repository, so get
            # them per issue and per comment.
            # https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions#list-reactions-for-an-issue
            reactions_url = issue["reactions"]["url"]
            check_url_origin(BASE_URL, reactions_url)
            for r2 in get_paginated(reactions_url, MEDIATYPE_REACTIONS, auth):
                for reaction in r2.json():
                    zi = zipfile.ZipInfo(
                        f"issues/{issue['id']}/reactions/{reaction['id']}.json",
                        timestamp_to_zip_time(reaction["created_at"]))
                    with z.open(zi, mode="w") as f:
                        f.write(json.dumps(reaction).encode("utf-8"))

    # https://docs.github.com/en/free-pro-team@latest/rest/reference/issues#list-issue-comments-for-a-repository
    # Comments are linked to their parent issue via the issue_url field.
    comments_url = urllib.parse.urlparse(BASE_URL)._replace(
        path=f"/repos/{owner}/{repo}/issues/comments", ).geturl()
    for r in get_paginated(comments_url, MEDIATYPE_REACTIONS, auth):
        for comment in r.json():
            check_url_origin(BASE_URL, comment["url"])
            zi = zipfile.ZipInfo(f"issues/comments/{comment['id']}.json",
                                 timestamp_to_zip_time(comment["created_at"]))
            get_to_zipinfo(comment["url"], zi, MEDIATYPE_REACTIONS, auth)

            # There's no API for getting all reactions in a repository, so get
            # them per issue and per comment.
            # https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions#list-reactions-for-an-issue-comment
            reactions_url = comment["reactions"]["url"]
            check_url_origin(BASE_URL, reactions_url)
            for r2 in get_paginated(reactions_url, MEDIATYPE_REACTIONS, auth):
                for reaction in r2.json():
                    zi = zipfile.ZipInfo(
                        f"issues/comments/{comment['id']}/reactions/{reaction['id']}.json",
                        timestamp_to_zip_time(reaction["created_at"]))
                    with z.open(zi, mode="w") as f:
                        f.write(json.dumps(reaction).encode("utf-8"))

    labels_url = urllib.parse.urlparse(BASE_URL)._replace(
        path=f"/repos/{owner}/{repo}/labels", ).geturl()
    for r in get_paginated(labels_url, MEDIATYPE, auth):
        for label in r.json():
            check_url_origin(BASE_URL, label["url"])
            zi = zipfile.ZipInfo(f"labels/{label['id']}.json")
            get_to_zipinfo(label["url"], zi, MEDIATYPE, auth)