Esempio n. 1
0
 def test_pred(self):
     """Test with a custom predicate"""
     self.assertEqual(
         mi.first_true([2, 4, 6], pred=lambda x: x % 3 == 0), 6
     )
Esempio n. 2
0
 def test_nothing_true(self):
     """Test default return value."""
     self.assertEqual(mi.first_true([0, 0, 0]), False)
Esempio n. 3
0
 def test_default(self):
     """Test with a default keyword"""
     self.assertEqual(mi.first_true([0, 0, 0], default='!'), '!')
Esempio n. 4
0
 def test_nothing_true(self):
     """Test default return value."""
     self.assertIsNone(mi.first_true([0, 0, 0]))
Esempio n. 5
0
 def test_something_true(self):
     """Test with no keywords"""
     self.assertEqual(mi.first_true(range(10)), 1)
 def test_pred(self):
     """Test with a custom predicate"""
     self.assertEqual(mi.first_true([2, 4, 6], pred=lambda x: x % 3 == 0), 6)
Esempio n. 7
0
 def test_something_true(self):
     """Test with no keywords"""
     self.assertEqual(mi.first_true(range(10)), 1)
 def test_default(self):
     """Test with a default keyword"""
     self.assertEqual(mi.first_true([0, 0, 0], default="!"), "!")
 def test_nothing_true(self):
     """Test default return value."""
     self.assertIsNone(mi.first_true([0, 0, 0]))
Esempio n. 10
0
 def find_by_type(self, idl_type):
     return first_true(self._definitions,
                       pred=lambda definition: definition.name == idl_type)
Esempio n. 11
0
def template_to_filepath(template, metadata, template_patterns=None):
    """Create directory structure and file name based on metadata template.

	Note:

	A template meant to be a base directory for suggested
	names should have a trailing slash or backslash.

	Parameters:
		template (str or ~os.PathLike): A filepath which can include template patterns as defined by :param template_patterns:.

		metadata (~collections.abc.Mapping): A metadata dict.

		template_patterns (~collections.abc.Mapping): A dict of ``pattern: field`` pairs used to replace patterns with metadata field values.
			Default: :const:`~google_music_utils.constants.TEMPLATE_PATTERNS`

	Returns:
		~pathlib.Path: A filepath.
	"""

    path = Path(template)

    if template_patterns is None:
        template_patterns = TEMPLATE_PATTERNS

    suggested_filename = suggest_filename(metadata)

    if (path == Path.cwd() or path == Path('%suggested%')):
        filepath = Path(suggested_filename)
    elif any(template_pattern in path.parts
             for template_pattern in template_patterns):
        if template.endswith(('/', '\\')):
            template += suggested_filename

        path = Path(template.replace('%suggested%', suggested_filename))

        parts = []
        for part in path.parts:
            if part == path.anchor:
                parts.append(part)
            else:
                for key in template_patterns:
                    if (  # pragma: no branch
                            key in part
                            and any(field in metadata
                                    for field in template_patterns[key])):
                        field = more_itertools.first_true(
                            template_patterns[key],
                            pred=lambda k: k in metadata)

                        if key.startswith(('%disc', '%track')):
                            number = _split_number_field(
                                str(list_to_single_value(metadata[field])))

                            if key.endswith('2%'):
                                metadata[field] = number.zfill(2)
                            else:
                                metadata[field] = number

                        part = part.replace(
                            key, list_to_single_value(metadata[field]))

                parts.append(_replace_invalid_characters(part))

        filepath = Path(*parts)
    elif '%suggested%' in template:
        filepath = Path(template.replace('%suggested%', suggested_filename))
    elif template.endswith(('/', '\\')):
        filepath = path / suggested_filename
    else:
        filepath = path

    return filepath
Esempio n. 12
0
    def test_log_data(
        self,
        mock_channel_tracker,
        pandas_data_frame,
        preview,
        size,
        schema,
        stats,
        histograms,
        path,
    ):
        """
        This test is a bit complicated but it cover any flag that there is in this function (almost)
        For each scenario we want to be sure that the expected output sent to the tracker.
        This mean that when a specific flag is set to false we expect that the relevant metrics will *not* be send.

        This is not a test to make sure that the histogram calculations or preview or schema output are as expected.
        This is only a test for the api of log_data/log_dataframe.

        !! This test help us see that this interface is not very intuitive !!
        """
        @task()
        def task_with_log_data():
            log_data(
                key="df",
                value=pandas_data_frame,
                with_preview=preview,
                with_size=size,
                with_schema=schema,
                with_stats=stats,
                with_histograms=histograms,
                path="/my/path/to_file.txt" if path else None,
            )

        task_with_log_data()
        metrics_info = list(get_log_metrics(mock_channel_tracker))
        map_metrics = {
            metric_info["metric"].key: metric_info["metric"]
            for metric_info in metrics_info
        }

        # note: This is a test helper to use when debugging
        # side-note: I wish we didn't had to support py2 and could use f-string
        # side-note: I wish to use the f-string debug available from py3.8
        # https://tirkarthi.github.io/programming/2019/05/08/f-string-debugging.html
        # >>> print(f"{preview=}, {size=}, {schema=}, {stats=},{histograms=}")
        print(
            "preview={preview}, size={size}, schema={schema}, stats={stats}, histograms={histograms}, path={path}"
            .format(
                preview=preview,
                size=size,
                schema=schema,
                stats=stats,
                histograms=histograms,
                path=path,
            ))

        for m in metrics_info:
            print(m["metric"], m["metric"].value)

        # no matter which configuration is set we expect to log the shape:
        assert "df.shape0" in map_metrics
        assert map_metrics["df.shape0"].value == 5
        assert "df.shape1" in map_metrics
        assert map_metrics["df.shape1"].value == 3

        # Tests for schema
        # ------------------
        # Only report schema if the schema flag is on, the schema source is the user.
        assert if_and_only_if(
            schema,
            ("df.schema" in map_metrics
             and map_metrics["df.schema"].source == "user"),
        )
        #
        # Size flag is used only with schema flag
        # together they add a size.bytes calculation in the schema value
        assert if_and_only_if(
            (schema and size),
            ("df.schema" in map_metrics
             and "size.bytes" in map_metrics["df.schema"].value),
        )

        # Tests for preview
        # ------------------
        # When preview is on we expect to have a the value sent with a preview
        assert if_and_only_if(
            preview,
            ("df" in map_metrics
             and "value_preview" in map_metrics["df"].value),
        )
        #
        # When we have both preview and schema we expect to have a schema part of the value metric
        assert if_and_only_if(
            (preview and schema),
            ("df" in map_metrics and "schema" in map_metrics["df"].value),
        )
        #
        # When we preview, schema and size we expect the the preview inside the schema inside the value
        # would have size.bytes value
        assert if_and_only_if(
            (preview and schema and size),
            ("df" in map_metrics and "schema" in map_metrics["df"].value
             and "size.bytes" in map_metrics["df"].value["schema"]),
        )

        # Tests for histograms
        # ---------------------
        # We only log the histogram metrics when we use the histogram flag
        assert if_and_only_if(
            histograms,
            ("df.histograms" in map_metrics
             and map_metrics["df.histograms"].source == "histograms"
             and "df.histogram_system_metrics" in map_metrics
             and map_metrics["df.histogram_system_metrics"].source
             == "histograms"),
        )
        #
        # This is a tricky one - when we have stats on
        # we create for each of the columns multiple histograms' metrics.
        assert if_and_only_if(
            stats,
            all(
                any(header in metric_name for metric_name in map_metrics)
                for header in pandas_data_frame.columns),
        )

        if path:
            log_target = first_true(
                get_log_targets(mock_channel_tracker),
                pred=lambda t: not t.target_path.startswith("memory://"),
            )
            assert log_target.target_path == "/my/path/to_file.txt"
            # the data dimensions is taken from the data frame
            assert log_target.data_dimensions == (5, 3)

            has_data_schema = bool(eval(log_target.data_schema))
            assert if_and_only_if(schema or size, has_data_schema)