Пример #1
0
 def test_get_all_hashes_no_future_keys(self) -> None:
     current = V(__version__.split("-", 1)[0])  # remove git hash, "-dirty", etc
     all_hashes = resources.get_all_sri_hashes()
     for key in all_hashes:
         assert (
             V(key) < current
         ), f"SRI hash dict contains vesion {key} which is newer than current version {__version__}"
Пример #2
0
def check_version_order():
    if V(CONFIG.new_version) > V(CONFIG.last_any_version):
        passed("New version %r is newer than last version %r" %
               (CONFIG.new_version, CONFIG.last_any_version))
    else:
        failed("New version %r is NOT newer than last version %r" %
               (CONFIG.new_version, CONFIG.last_any_version))
Пример #3
0
    def finish_epoch(self, sub_reporter: SubReporter) -> None:
        if self.epoch != sub_reporter.epoch:
            raise RuntimeError(
                f"Don't change epoch during observation: "
                f"{self.epoch} != {sub_reporter.epoch}"
            )

        # Calc mean of current stats and set it as previous epochs stats
        stats = {}
        for key2, values in sub_reporter.stats.items():
            v = aggregate(values)
            stats[key2] = v

        stats["time"] = datetime.timedelta(
            seconds=time.perf_counter() - sub_reporter.start_time
        )
        stats["total_count"] = sub_reporter.total_count
        if V(torch.__version__) >= V("1.4.0"):
            if torch.cuda.is_initialized():
                stats["gpu_max_cached_mem_GB"] = (
                    torch.cuda.max_memory_reserved() / 2**30
                )
        else:
            if torch.cuda.is_available() and torch.cuda.max_memory_cached() > 0:
                stats["gpu_cached_mem_GB"] = torch.cuda.max_memory_cached() / 2**30

        self.stats.setdefault(self.epoch, {})[sub_reporter.key] = stats
        sub_reporter.finished()
Пример #4
0
    def inverse(
        self,
        input: Union[torch.Tensor, ComplexTensor],
        ilens: torch.Tensor = None
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
        """Inverse STFT.

        Args:
            input: Tensor(batch, T, F, 2) or ComplexTensor(batch, T, F)
            ilens: (batch,)
        Returns:
            wavs: (batch, samples)
            ilens: (batch,)
        """
        if V(torch.__version__) >= V("1.6.0"):
            istft = torch.functional.istft
        else:
            try:
                import torchaudio
            except ImportError:
                raise ImportError(
                    "Please install torchaudio>=0.3.0 or use torch>=1.6.0")

            if not hasattr(torchaudio.functional, "istft"):
                raise ImportError(
                    "Please install torchaudio>=0.3.0 or use torch>=1.6.0")
            istft = torchaudio.functional.istft

        if self.window is not None:
            window_func = getattr(torch, f"{self.window}_window")
            if is_complex(input):
                datatype = input.real.dtype
            else:
                datatype = input.dtype
            window = window_func(self.win_length,
                                 dtype=datatype,
                                 device=input.device)
        else:
            window = None

        if is_complex(input):
            input = torch.stack([input.real, input.imag], dim=-1)
        elif input.shape[-1] != 2:
            raise TypeError("Invalid input type")
        input = input.transpose(1, 2)

        wavs = istft(
            input,
            n_fft=self.n_fft,
            hop_length=self.hop_length,
            win_length=self.win_length,
            window=window,
            center=self.center,
            normalized=self.normalized,
            onesided=self.onesided,
            length=ilens.max() if ilens is not None else ilens,
        )

        return wavs, ilens
Пример #5
0
    def from_pretrained(
        model_tag: Optional[str] = None,
        vocoder_tag: Optional[str] = None,
        **kwargs: Optional[Any],
    ):
        """Build Text2Speech instance from the pretrained model.

        Args:
            model_tag (Optional[str]): Model tag of the pretrained models.
                Currently, the tags of espnet_model_zoo are supported.
            vocoder_tag (Optional[str]): Vocoder tag of the pretrained vocoders.
                Currently, the tags of parallel_wavegan are supported, which should
                start with the prefix "parallel_wavegan/".

        Returns:
            Text2Speech: Text2Speech instance.

        """
        if model_tag is not None:
            try:
                from espnet_model_zoo.downloader import ModelDownloader

            except ImportError:
                logging.error(
                    "`espnet_model_zoo` is not installed. "
                    "Please install via `pip install -U espnet_model_zoo`.")
                raise
            d = ModelDownloader()
            kwargs.update(**d.download_and_unpack(model_tag))

        if vocoder_tag is not None:
            if vocoder_tag.startswith("parallel_wavegan/"):
                try:
                    from parallel_wavegan.utils import download_pretrained_model

                except ImportError:
                    logging.error(
                        "`parallel_wavegan` is not installed. "
                        "Please install via `pip install -U parallel_wavegan`."
                    )
                    raise

                from parallel_wavegan import __version__

                # NOTE(kan-bayashi): Filelock download is supported from 0.5.2
                assert V(__version__) > V("0.5.1"), (
                    "Please install the latest parallel_wavegan "
                    "via `pip install -U parallel_wavegan`.")
                vocoder_tag = vocoder_tag.replace("parallel_wavegan/", "")
                vocoder_file = download_pretrained_model(vocoder_tag)
                vocoder_config = Path(vocoder_file).parent / "config.yml"
                kwargs.update(vocoder_config=vocoder_config,
                              vocoder_file=vocoder_file)

            else:
                raise ValueError(f"{vocoder_tag} is unsupported format.")

        return Text2Speech(**kwargs)
Пример #6
0
def check_version_order(config: Config, system: System) -> None:
    if V(config.version) > V(config.last_any_version):
        passed(
            f"New version {config.version!r} is newer than last version {config.last_any_version!r}"
        )
    else:
        failed(
            f"New version {config.version!r} is NOT newer than last version {config.last_any_version!r}"
        )
        config.abort()
Пример #7
0
def griffin_lim(
    spc: np.ndarray,
    n_fft: int,
    n_shift: int,
    win_length: int = None,
    window: Optional[str] = "hann",
    n_iter: Optional[int] = 32,
) -> np.ndarray:
    """Convert linear spectrogram into waveform using Griffin-Lim.

    Args:
        spc: Linear spectrogram (T, n_fft // 2 + 1).
        n_fft: The number of FFT points.
        n_shift: Shift size in points.
        win_length: Window length in points.
        window: Window function type.
        n_iter: The number of iterations.

    Returns:
        Reconstructed waveform (N,).

    """
    # assert the size of input linear spectrogram
    assert spc.shape[1] == n_fft // 2 + 1

    if V(librosa.__version__) >= V("0.7.0"):
        # use librosa's fast Grriffin-Lim algorithm
        spc = np.abs(spc.T)
        y = librosa.griffinlim(
            S=spc,
            n_iter=n_iter,
            hop_length=n_shift,
            win_length=win_length,
            window=window,
            center=True if spc.shape[1] > 1 else False,
        )
    else:
        # use slower version of Grriffin-Lim algorithm
        logging.warning(
            "librosa version is old. use slow version of Grriffin-Lim algorithm."
            "if you want to use fast Griffin-Lim, please update librosa via "
            "`source ./path.sh && pip install librosa==0.7.0`."
        )
        cspc = np.abs(spc).astype(np.complex).T
        angles = np.exp(2j * np.pi * np.random.rand(*cspc.shape))
        y = librosa.istft(cspc * angles, n_shift, win_length, window=window)
        for i in range(n_iter):
            angles = np.exp(
                1j
                * np.angle(librosa.stft(y, n_fft, n_shift, win_length, window=window))
            )
            y = librosa.istft(cspc * angles, n_shift, win_length, window=window)

    return y
Пример #8
0
def check_version_order(config: Config, system: System) -> ActionReturn:
    try:
        out = system.run("git for-each-ref --sort=-taggerdate --format '%(tag)' refs/tags")
        tags = [x.strip("'\"") for x in out.split("\n")]

        if all(V(config.version) > V(tag) for tag in tags if tag.startswith(config.release_level)):
            return PASSED(f"Version {config.version!r} is newer than any tag at release level {config.release_level!r}")
        else:
            return FAILED(f"Version {config.version!r} is older than an existing tag at release level {config.release_level!r}")

    except RuntimeError as e:
        return FAILED("Could compare tag version order", details=e.args)
Пример #9
0
    def from_manifest(cls, path):
        shipped = is_shipped_plugin(path)
        manifest = path / 'manifest.ini'
        if not manifest.exists() and not manifest.is_dir():
            raise ValueError(f'Not a plugin path: {path}')

        conf = configparser.ConfigParser()
        conf.remove_section('info')

        with manifest.open() as conf_file:
            try:
                conf.read_file(conf_file)
            except configparser.Error as error:
                raise ValueError(f'Error while parsing manifest: '
                                 f'{path.name}, {error}')

        for field in FIELDS:
            try:
                value = conf.get('info', field, fallback=None)
            except configparser.Error as error:
                raise ValueError(f'Error while parsing manifest: '
                                 f'{path.name}, {error}')

            if value is None:
                raise ValueError(f'No {field} found for {path.name}')

        name = conf.get('info', 'name')
        short_name = conf.get('info', 'short_name')
        description = p_(conf.get('info', 'description'))
        authors = conf.get('info', 'authors')
        homepage = conf.get('info', 'homepage')
        version = V(conf.get('info', 'version'))
        min_gajim_version = V(conf.get('info', 'min_gajim_version'))
        max_gajim_version = V(conf.get('info', 'max_gajim_version'))
        gajim_version = V(gajim.__version__.split('+', 1)[0])

        if not min_gajim_version <= gajim_version <= max_gajim_version:
            raise ValueError(
                f'Plugin {path.name} not loaded, '
                f'newer version of gajim required: '
                f'{min_gajim_version} <= {gajim_version} <= {max_gajim_version}'
            )

        return cls(name=name,
                   short_name=short_name,
                   description=description,
                   authors=authors,
                   homepage=homepage,
                   version=version,
                   min_gajim_version=min_gajim_version,
                   max_gajim_version=max_gajim_version,
                   shipped=shipped,
                   path=path)
Пример #10
0
def check_docs_version_config(config: Config, system: System) -> ActionReturn:
    try:
        with open(Path("sphinx/switcher.json")) as fp:
            versions = json.load(fp)
            all_versions = versions["all"]
            latest_version = versions["latest"]
            if config.version not in all_versions:
                return FAILED(f"Version {config.version!r} is missing from 'all' versions")
            if V(config.version) > V(latest_version):
                return FAILED(f"Version {config.version!r} is not configured as 'latest' version")
            return PASSED("Docs versions config is correct")
    except RuntimeError as e:
        return FAILED("Could not check docs versions config", details=e.args)
Пример #11
0
def publish_cdn(config: Config) -> None:
    subdir = "dev" if V(config.version).is_prerelease else "release"
    version = config.version

    access_key_id, secret_access_key = config.credentials["AWS"]

    buckets = []
    for bucket_name, bucket_region in [("cdn.bokeh.org", "us-east-1"),
                                       ("cdn-backup.bokeh.org", "us-west-2")]:
        conn = boto.s3.connect_to_region(
            bucket_region,
            aws_access_key_id=access_key_id,
            aws_secret_access_key=secret_access_key,
            calling_format=boto.s3.connection.OrdinaryCallingFormat(),
        )
        buckets.append(conn.get_bucket(bucket_name))

    content_type = "application/javascript"
    for name in ("bokeh", "bokeh-api", "bokeh-widgets", "bokeh-tables",
                 "bokeh-gl"):
        for suffix in ("js", "min.js"):
            local_path = "bokehjs/build/js/%s.%s" % (name, suffix)
            cdn_path = "bokeh/%s/%s-%s.%s" % (subdir, name, version, suffix)
            for bucket in buckets:
                upload_file_to_cdn(local_path, cdn_path, content_type, bucket)
Пример #12
0
def upload_cdn(cdn_token, cdn_id, url=RSURL):
    subdir = 'dev' if V(CONFIG.version).is_prerelease else 'release'
    version = CONFIG.version

    content_type = "application/javascript"
    for name in ('bokeh', 'bokeh-api', 'bokeh-widgets', 'bokeh-tables',
                 'bokeh-gl'):
        for suffix in ('js', 'min.js'):
            local_path = 'bokehjs/build/js/%s.%s' % (name, suffix)
            cdn_path = 'bokeh/bokeh/%s/%s-%s.%s' % (subdir, name, version,
                                                    suffix)
            cdn_upload(local_path,
                       cdn_path,
                       content_type,
                       cdn_token,
                       cdn_id,
                       url=url)

    content_type = "text/css"
    for name in ('bokeh', 'bokeh-widgets', 'bokeh-tables'):
        for suffix in ('css', 'min.css'):
            local_path = 'bokehjs/build/css/%s.%s' % (name, suffix)
            cdn_path = 'bokeh/bokeh/%s/%s-%s.%s' % (subdir, name, version,
                                                    suffix)
            cdn_upload(local_path,
                       cdn_path,
                       content_type,
                       cdn_token,
                       cdn_id,
                       url=url)
Пример #13
0
def publish_bokehjs_to_cdn(config: Config, system: System) -> ActionReturn:
    subdir = "dev" if V(config.version).is_prerelease else "release"
    version = config.version

    access_key_id = config.secrets["AWS_ACCESS_KEY_ID"]
    secret_access_key = config.secrets["AWS_SECRET_ACCESS_KEY"]

    try:
        buckets = []
        for bucket_name, bucket_region in [("cdn.bokeh.org", "us-east-1"),
                                           ("cdn-backup.bokeh.org",
                                            "us-west-2")]:
            conn = boto.s3.connect_to_region(
                bucket_region,
                aws_access_key_id=access_key_id,
                aws_secret_access_key=secret_access_key,
                calling_format=boto.s3.connection.OrdinaryCallingFormat(),
            )
            buckets.append(conn.get_bucket(bucket_name))

        content_type = "application/javascript"
        for name in ("bokeh", "bokeh-gl", "bokeh-api", "bokeh-widgets",
                     "bokeh-tables", "bokeh-mathjax"):
            for suffix in ("js", "min.js", "esm.js", "esm.min.js"):
                local_path = f"bokehjs/build/js/{name}.{suffix}"
                cdn_path = f"bokeh/{subdir}/{name}-{version}.{suffix}"
                for bucket in buckets:
                    _upload_file_to_cdn(local_path, cdn_path, content_type,
                                        bucket)
        return PASSED("Uploaded BokehJS to CDN")
    except Exception as e:
        return FAILED(f"BokehJS CDN upload failed: {e}", details=e.args)
Пример #14
0
def upload_anaconda(token):
    channel = 'dev' if V(CONFIG.version).is_prerelease else 'main'
    for plat in PLATFORMS:
        files = glob.glob("%s/bokeh*.tar.bz2" % plat)
        for file in files:
            cmd = "anaconda -t %s upload -u bokeh %s -c dev --force --no-progress"
            run(cmd % (token, file), fake_cmd=cmd % ("<hidden>", file))
Пример #15
0
def upload_docs():
    cd("sphinx")
    if V(CONFIG.version).is_prerelease:
        run("fab deploy:dev")
    else:
        run("fab deploy:%s" % CONFIG.version)
        run("fab latest:%s" % CONFIG.version)
    cd("..")
Пример #16
0
def publish_npm(config: Config) -> None:
    if V(config.version).is_prerelease:
        skipped("Not updating NPM package for pre-releases")
        return

    cd("bokehjs")
    run("npm publish")
    cd("..")
Пример #17
0
    def __call__(self, config: Config) -> ActionReturn:

        if config.dry_run and self._action_type is ActionType.CHECK:
            return SKIPPED(f"{self.name} skipped for dry run")

        if self._skip_prerelease and V(config.version).is_prerelease:
            return SKIPPED(f"{self.name} skipped for pre-release")

        return self.execute(config)
Пример #18
0
    def __init__(self,
                 odim,
                 eprojs,
                 dropout_rate,
                 ctc_type="warpctc",
                 reduce=True):
        super().__init__()
        self.dropout_rate = dropout_rate
        self.loss = None
        self.ctc_lo = torch.nn.Linear(eprojs, odim)
        self.dropout = torch.nn.Dropout(dropout_rate)
        self.probs = None  # for visualization

        # In case of Pytorch >= 1.7.0, CTC will be always builtin
        self.ctc_type = ctc_type if V(
            torch.__version__) < V("1.7.0") else "builtin"

        if ctc_type != self.ctc_type:
            logging.warning(
                f"CTC was set to {self.ctc_type} due to PyTorch version.")

        if self.ctc_type == "builtin":
            reduction_type = "sum" if reduce else "none"
            self.ctc_loss = torch.nn.CTCLoss(reduction=reduction_type,
                                             zero_infinity=True)
        elif self.ctc_type == "cudnnctc":
            reduction_type = "sum" if reduce else "none"
            self.ctc_loss = torch.nn.CTCLoss(reduction=reduction_type)
        elif self.ctc_type == "warpctc":
            import warpctc_pytorch as warp_ctc

            self.ctc_loss = warp_ctc.CTCLoss(size_average=True, reduce=reduce)
        elif self.ctc_type == "gtnctc":
            from espnet.nets.pytorch_backend.gtn_ctc import GTNCTCLossFunction

            self.ctc_loss = GTNCTCLossFunction.apply
        else:
            raise ValueError(
                'ctc_type must be "builtin" or "warpctc": {}'.format(
                    self.ctc_type))

        self.ignore_id = -1
        self.reduce = reduce
Пример #19
0
def publish_pypi(config: Config) -> None:
    if V(config.version).is_prerelease:
        skipped("Not updating PyPI package for pre-releases")
        return

    token = config.credentials["PYPI"]
    cmd = "twine upload -u __token__ -p %s %s"
    files = glob.glob("dist/bokeh*.tar.gz")
    for file in files:
        run(cmd % (token, file), fake_cmd=cmd % ("<hidden>", file))
Пример #20
0
def detect_phantomjs(version='2.1'):
    ''' Detect if PhantomJS is avaiable in PATH, at a minimum version.

    Args:
        version (str, optional) :
            Required minimum version for PhantomJS (mostly for testing)

    Returns:
        str, path to PhantomJS

    '''
    if settings.phantomjs_path() is not None:
        phantomjs_path = settings.phantomjs_path()
    else:
        if hasattr(shutil, "which"):
            phantomjs_path = shutil.which("phantomjs") or "phantomjs"
        else:
            # Python 2 relies on Environment variable in PATH - attempt to use as follows
            phantomjs_path = "phantomjs"

    try:
        proc = Popen([phantomjs_path, "--version"], stdout=PIPE, stderr=PIPE)
        proc.wait()
        out = proc.communicate()

        if len(out[1]) > 0:
            raise RuntimeError('Error encountered in PhantomJS detection: %r' %
                               out[1].decode('utf8'))

        required = V(version)
        installed = V(out[0].decode('utf8'))
        if installed < required:
            raise RuntimeError(
                'PhantomJS version to old. Version>=%s required, installed: %s'
                % (required, installed))

    except OSError:
        raise RuntimeError(
            'PhantomJS is not present in PATH or BOKEH_PHANTOMJS_PATH. Try "conda install phantomjs" or \
            "npm install -g phantomjs-prebuilt"')

    return phantomjs_path
Пример #21
0
def upload_cdn(buckets):
    subdir = 'dev' if V(CONFIG.version).is_prerelease else 'release'
    version = CONFIG.version

    content_type = "application/javascript"
    for name in ('bokeh', 'bokeh-api', 'bokeh-widgets', 'bokeh-tables', 'bokeh-gl'):
        for suffix in ('js', 'min.js'):
            local_path = 'bokehjs/build/js/%s.%s' % (name, suffix)
            cdn_path = 'bokeh/%s/%s-%s.%s' % (subdir, name, version, suffix)
            for bucket in buckets:
                cdn_upload(local_path, cdn_path, content_type, bucket)
Пример #22
0
def publish_docs(config: Config, system: System) -> None:
    system.cd("sphinx")
    sync_cmd = "aws s3 sync build/html s3://docs.bokeh.org/en/%s/ --acl bucket-owner-full-control --cache-control max-age=31536000,public"
    invalidate_cmd = "aws cloudfront create-invalidation --distribution-id E2OC6Q27H5UQ63 --paths %s"

    if V(config.version).is_prerelease:
        system.run(sync_cmd % "dev")
        system.run(invalidate_cmd % "/en/dev*")
    else:
        system.run(sync_cmd % config.version)
        system.run(sync_cmd % "latest")
        paths = "/en/latest* /versions.json"
        system.run(invalidate_cmd % paths)
    system.cd("..")
Пример #23
0
def upload_docs():
    cd("sphinx")
    sync_cmd = "aws s3 sync build/html s3://docs.bokeh.org/en/%s/ --acl bucket-owner-full-control --cache-control max-age=31536000,public"
    invalidate_cmd = "aws cloudfront create-invalidation --distribution-id E2OC6Q27H5UQ63 --paths %s"

    if V(CONFIG.version).is_prerelease:
        run(sync_cmd % "dev")
        run(invalidate_cmd % "/en/dev")
    else:
        run(sync_cmd % CONFIG.version)
        run(sync_cmd % "latest")
        paths = "/en/latest /versions.json" % CONFIG.version
        run(invalidate_cmd % paths)
    cd("..")
Пример #24
0
def update_hash_manifest(config: Config) -> None:
    if V(config.version).is_prerelease:
        skipped("Not updating SRH hash manifest for pre-releases")
        return

    try:
        cd("scripts")
        run(f"python sri.py {config.version}")
        cd("..")
        passed("Updated SRI hash manifest")
        commit(config, "bokeh/_sri.json")
    except CalledProcessError as e:
        failed("SRI hash manifest update failed", e.output.split("\n"))
        config.abort()
Пример #25
0
    def update_config(self, old_version, new_version):
        old = V(old_version)

        if old < V('0.16.4.1'):
            self.update_config_to_01641()
        if old < V('0.16.10.1'):
            self.update_config_to_016101()
        if old < V('0.16.10.2'):
            self.update_config_to_016102()
        if old < V('0.16.10.4'):
            self.update_config_to_016104()
        if old < V('0.16.10.5'):
            self.update_config_to_016105()
        if old < V('0.98.3'):
            self.update_config_to_0983()
        if old < V('1.1.93'):
            self.update_config_to_1193()
        if old < V('1.1.94'):
            self.update_config_to_1194()
        if old < V('1.1.95'):
            self.update_config_to_1195()

        app.config.set('version', new_version)
Пример #26
0
def update_changelog(config: Config) -> None:
    if V(config.version).is_prerelease:
        skipped("Not updating CHANGELOG for pre-releases")
        return

    try:
        cd("scripts")
        run(f"python issues.py -p {config.last_full_version} -r {config.version}")
        cd("..")
        passed("Updated CHANGELOG with new closed issues")
        filename = join(config.repo_top_dir, "CHANGELOG")
        commit(config, filename)
    except CalledProcessError as e:
        if "HTTP Error 403: Forbidden" in e.output:
            failed("CHANGELOG cannot be updated right now due to GitHub rate limiting")
        else:
            failed("CHANGELOG update failed", e.output.split("\n"))
Пример #27
0
def publish_anaconda(config: Config, system: System) -> None:
    if V(config.version).is_prerelease:
        cmd = "anaconda -t %s upload -u bokeh %s -l dev --force --no-progress"
    else:
        cmd = "anaconda -t %s upload -u bokeh %s -l dev -l main --force --no-progress"

    try:
        conda_base_dir = system.run("conda info --base").strip()
    except CalledProcessError as e:
        failed("Could not get conda base dir", str(e).split("\n"))
        config.abort()

    token = config.credentials["ANACONDA"]
    files = glob.glob(f"{conda_base_dir}/conda-bld/noarch/bokeh*.tar.bz2")
    for file in files:
        system.run(cmd % (token, file), fake_cmd=cmd % ("<hidden>", file))

    files = glob.glob("dist/bokeh*.tar.gz")
    for file in files:
        system.run(cmd % (token, file), fake_cmd=cmd % ("<hidden>", file))
import os.path
__dir__ = os.path.split(os.path.abspath(os.path.realpath(__file__)))[0]
data_location = os.path.join(__dir__, "data")
src = "https://git.llvm.org/git/compiler-rt.git/"

# Module version
version_str = "0.0.post6206"
version_tuple = (0, 0, 6206)
try:
    from packaging.version import Version as V
    pversion = V("0.0.post6206")
except ImportError:
    pass

# Data version info
data_version_str = "0.0.post6140"
data_version_tuple = (0, 0, 6140)
try:
    from packaging.version import Version as V
    pdata_version = V("0.0.post6140")
except ImportError:
    pass
data_git_hash = "81fb4f00c2cfe13814765968e09931ffa93b5138"
data_git_describe = "v0.0-6140-g81fb4f00c"
data_git_msg = """\
commit 81fb4f00c2cfe13814765968e09931ffa93b5138
Author: Chris Bieneman <*****@*****.**>
Date:   Fri Nov 6 23:19:29 2015 +0000

    [CMake] Need to filter ${arch}/*.c builtins as well as ${arch}/*.S builtins.
    
Пример #29
0
import math
from abc import ABC, abstractmethod
from functools import reduce

import torch
import torch.nn.functional as F
from packaging.version import parse as V

from espnet2.enh.layers.complex_utils import complex_norm, is_complex, new_complex_like
from espnet2.enh.loss.criterions.abs_loss import AbsEnhLoss

is_torch_1_9_plus = V(torch.__version__) >= V("1.9.0")

EPS = torch.finfo(torch.get_default_dtype()).eps


def _create_mask_label(mix_spec, ref_spec, noise_spec=None, mask_type="IAM"):
    """Create mask label.

    Args:
        mix_spec: ComplexTensor(B, T, [C,] F)
        ref_spec: List[ComplexTensor(B, T, [C,] F), ...]
        noise_spec: ComplexTensor(B, T, [C,] F)
            only used for IBM and IRM
        mask_type: str
    Returns:
        labels: List[Tensor(B, T, [C,] F), ...] or List[ComplexTensor(B, T, F), ...]
    """

    # Must be upper case
    mask_type = mask_type.upper()
import os.path
__dir__ = os.path.split(os.path.abspath(os.path.realpath(__file__)))[0]
data_location = os.path.join(__dir__, "system_verilog")
src = "https://github.com/enjoy-digital/black-parrot.git"

# Module version
version_str = "0.0.post1817"
version_tuple = (0, 0, 1817)
try:
    from packaging.version import Version as V
    pversion = V("0.0.post1817")
except ImportError:
    pass

# Data version info
data_version_str = "0.0.post1751"
data_version_tuple = (0, 0, 1751)
try:
    from packaging.version import Version as V
    pdata_version = V("0.0.post1751")
except ImportError:
    pass
data_git_hash = "03d2514542557199a79373443c8e405f0bfee53f"
data_git_describe = "v0.0-1751-g03d25145"
data_git_msg = """\
commit 03d2514542557199a79373443c8e405f0bfee53f
Merge: 30b599ca 10401383
Author: enjoy-digital <*****@*****.**>
Date:   Fri May 15 12:47:57 2020 +0200

    Merge pull request #2 from scanakci/master