Пример #1
0
def run_demo(input_filename, outputfilename):
    """Demo of using MSBG to process a signal according to various
    levels of hearing impairment, as specified by audiograms.

    Arguments:
        input_filename {str} -- Name of the input wav file
        output_filename {str} -- Stem name of output wav files
    """

    signal = ccs.read_signal(input_filename)
    logging.info(f"Signal shape is {signal.shape}")
    # Make the list of ears. Each has a different audiogram.
    audiograms = MSBG.audiogram.standard_audiograms()
    ears = [
        MSBG.Ear(audiogram=audiogram, src_pos="ff") for audiogram in audiograms
    ]

    # process the signal with each ear in the list of ears
    outputs = [ear.process(signal, add_calibration=True) for ear in ears]

    # Output the signals
    for i, output in enumerate(outputs):
        outfile = Path(outputfilename)
        ccs.write_signal(
            f"{outfile.parent}/{outfile.stem}_{i}.wav",
            output[0],
            CONFIG.fs,
            floating_point=True,
        )
Пример #2
0
 def process_files(self, infile_names, outfile_name):
     """Process signals in infiles and write result to outfile."""
     signals = [read_signal(infile) for infile in infile_names]
     output_signal = self.process_signals(signals)
     write_signal(
         outfile_name,
         output_signal,
         self.fs,
         floating_point=True,
     )
def generate_HA_inputs(scene, input_path, output_path, fs, channels,
                       tail_duration):
    """Generate all HA input signals for a given scene.

    Args:
        scene (dict): dictionary defining the scene to be generated
        input_path (str): path to the input data
        output_path (str): path to the output data
        fs (int): sample frequency
        channels (list, optional): list of HA channels to process (default: 1)
        tail_duration (float, optional): length in seconds to append
            for reverberation tail
    """
    logging.debug("In generate_HA_inputs")
    logging.debug(scene)

    n_tail = int(tail_duration * fs)

    pre_samples = scene["pre_samples"]
    post_samples = scene["post_samples"]
    dataset = scene["dataset"]
    target = scene["target"]["name"]
    noise_type = scene["interferer"]["type"]
    interferer = scene["interferer"]["name"]
    room = scene["room"]["name"]
    brir_stem = f"{input_path}/{dataset}/rooms/brir/brir_{room}"
    anechoic_brir_stem = f"{input_path}/{dataset}/rooms/brir/anech_brir_{room}"

    target_fn = f"{input_path}/{dataset}/targets/{target}.wav"
    interferer_fn = f"{input_path}/{dataset}/interferers/{noise_type}/{interferer}.wav"

    target = ccs.read_signal(target_fn)
    target = np.pad(target, [(pre_samples, post_samples)])

    offset = scene["interferer"]["offset"]  # Offset in samples
    interferer = ccs.read_signal(interferer_fn,
                                 offset=offset,
                                 nsamples=len(target),
                                 offset_is_samples=True)

    if len(target) != len(interferer):
        logging.debug("Target and interferer have different lengths")

    # Apply 500ms half-cosine ramp
    interferer = ccs.apply_ramp(interferer, dur=CONFIG.ramp_duration)

    prefix = f"{output_path}/{scene['scene']}"
    outputs = [
        (f"{prefix}_target.wav", target),
        (f"{prefix}_interferer.wav", interferer),
    ]

    snr_ref = None
    for channel in channels:
        # Load scene BRIRs
        target_brir_fn = f"{brir_stem}_t_CH{channel}.wav"
        interferer_brir_fn = f"{brir_stem}_i1_CH{channel}.wav"
        target_brir = ccs.read_signal(target_brir_fn)
        interferer_brir = ccs.read_signal(interferer_brir_fn)

        # Apply the BRIRs
        target_at_ear = ccs.apply_brir(target, target_brir, n_tail=n_tail)
        interferer_at_ear = ccs.apply_brir(interferer,
                                           interferer_brir,
                                           n_tail=n_tail)

        # Scale interferer to obtain SNR specified in scene description
        snr_dB = scene["SNR"]
        logging.info(
            f"Scaling interferer to obtain mixture SNR = {snr_dB} dB.")

        if snr_ref is None:
            # snr_ref computed for first channel in the list and then
            # same scaling applied to all
            snr_ref = ccs.compute_snr(
                target_at_ear,
                interferer_at_ear,
                pre_samples=pre_samples,
                post_samples=post_samples,
            )
            logging.debug(f"Using channel {channel} as reference.")

        # Apply snr_ref reference scaling to get 0 dB and then scale to target snr_dB
        interferer_at_ear = interferer_at_ear * snr_ref
        interferer_at_ear = interferer_at_ear * 10**((-snr_dB) / 20)

        # Sum target and scaled and ramped interferer
        signal_at_ear = ccs.sum_signals([target_at_ear, interferer_at_ear])
        outputs.extend([
            (f"{prefix}_mixed_CH{channel}.wav", signal_at_ear),
            (f"{prefix}_target_CH{channel}.wav", target_at_ear),
            (f"{prefix}_interferer_CH{channel}.wav", interferer_at_ear),
        ])

    if channels == []:
        target_brir_fn = f"{brir_stem}_t_CH0.wav"
        target_brir = ccs.read_signal(target_brir_fn)

    # Construct the anechoic target reference signal
    anechoic_brir_fn = f"{anechoic_brir_stem}_t_CH1.wav"  # CH1 used for the anechoic signal
    anechoic_brir = ccs.read_signal(anechoic_brir_fn)
    # Padding the anechoic brir very inefficient but keeps it simple
    anechoic_brir_pad = ccs.pad(anechoic_brir, len(target_brir))
    target_anechoic = ccs.apply_brir(target, anechoic_brir_pad, n_tail=n_tail)

    outputs.append((f"{prefix}_target_anechoic.wav", target_anechoic))

    # Write all output files
    for (filename, signal) in outputs:
        ccs.write_signal(filename, signal, CONFIG.fs)
    def process_files(self, infile_names, outfile_name):
        """Process a set of input signals and generate an output.

        Args:
            infile_names (list[str]): List of input wav files. One stereo wav
                file for each hearing device channel
            outfile_name (str): File in which to store output wav files
            dry_run (bool): perform dry run only
        """
        dirname = os.path.abspath(
            os.path.join(os.path.dirname(__file__), os.pardir))

        logging.info(
            f"Processing {outfile_name} with listener {self.listener}")
        audiogram = GHA.audiogram(self.listener)
        logging.info(f"Audiogram severity is {audiogram.severity}")
        audiogram = audiogram.select_subset_of_cfs(self.audf)

        # Get gain table with noisegate correction
        gaintable = GHA.get_gaintable(
            audiogram,
            self.noisegatelevels,
            self.noisegateslope,
            self.cr_level,
            self.max_output_level,
        )
        formatted_sGt = GHA.format_gaintable(gaintable, noisegate_corr=True)

        cfg_template = f"{dirname}/cfg_files/{self.cfg_file}_template.cfg"

        # Merge CH1 and CH3 files. This is the baseline configuration.
        # CH2 is ignored.
        fd_merged, merged_filename = tempfile.mkstemp(prefix="clarity-merged-",
                                                      suffix=".wav")
        # Only need file name; must immediately close the unused file handle.
        os.close(fd_merged)

        ccs.create_HA_inputs(infile_names, merged_filename)

        # Create the openMHA config file from the template
        fd_cfg, cfg_filename = tempfile.mkstemp(prefix="clarity-openmha-",
                                                suffix=".cfg")
        # Again, only need file name; must immediately close the unused file handle.
        os.close(fd_cfg)

        with open(cfg_filename, "w") as f:
            f.write(
                GHA.create_configured_cfgfile(
                    merged_filename,
                    outfile_name,
                    formatted_sGt,
                    cfg_template,
                    self.ahr,
                ))

        # Process file using configured cfg file
        # Suppressing OpenMHA output with -q - comment out when testing
        # Append log of OpenMHA commands to /cfg_files/logfile
        subprocess.run([
            "mha",
            "-q",
            "--log=logfile.txt",
            f"?read:{cfg_filename}",
            "cmd=start",
            "cmd=stop",
            "cmd=quit",
        ])

        # Delete temporary files.
        os.remove(merged_filename)
        os.remove(cfg_filename)

        # Check output signal has energy in every channel
        sig = ccs.read_signal(outfile_name)

        if len(np.shape(sig)) == 1:
            sig = np.expand_dims(sig, axis=1)

        if not np.all(np.sum(abs(sig), axis=0)):
            raise ValueError(f"Channel empty.")

        ccs.write_signal(outfile_name, sig, CONFIG.fs, floating_point=True)

        logging.info("OpenMHA processing complete")
Пример #5
0
def run_HL_processing(scene, listener, input_path, output_path, fs):
    """Run baseline HL processing.

    Applies the MSBG model of hearing loss.

    Args:
        scene (dict): dictionary defining the scene to be generated
        listener (dict): dictionary containing listener data
        input_path (str): path to the input data
        output_path (str): path to the output data
        fs (float): sampling rate
    """
    logging.debug(f"Running HL processing: Listener {listener['name']}")
    logging.debug("Listener data")
    logging.debug(listener["name"])

    # Get audiogram centre frequencies
    cfs = np.array(listener["audiogram_cfs"])

    # Read HA output and mixed signals
    signal = read_signal(
        f"{input_path}/{scene['scene']}_{listener['name']}_HA-output.wav")

    mixture_signal = read_signal(
        f"{input_path}/{scene['scene']}_mixed_CH0.wav")

    # Create discrete delta function (DDF) signal for time alignment
    ddf_signal = np.zeros((np.shape(signal)))
    ddf_signal[:, 0] = unit_impulse(len(signal), int(fs / 2))
    ddf_signal[:, 1] = unit_impulse(len(signal), int(fs / 2))

    # Get flat-0dB ear audiograms
    flat0dB_audiogram = MSBG.Audiogram(cfs=cfs,
                                       levels=np.zeros((np.shape(cfs))))
    flat0dB_ear = MSBG.Ear(audiogram=flat0dB_audiogram, src_pos="ff")

    # For flat-0dB audiograms, process the signal with each ear in the list of ears
    flat0dB_HL_outputs = listen(signal, [flat0dB_ear, flat0dB_ear])

    # Get listener audiograms and build a pair of ears
    audiogram_left = np.array(listener["audiogram_levels_l"])
    left_audiogram = MSBG.Audiogram(cfs=cfs, levels=audiogram_left)
    audiogram_right = np.array(listener["audiogram_levels_r"])
    right_audiogram = MSBG.Audiogram(cfs=cfs, levels=audiogram_right)
    audiograms = [left_audiogram, right_audiogram]
    ears = [
        MSBG.Ear(audiogram=audiogram, src_pos="ff") for audiogram in audiograms
    ]

    # Process the HA output signal, the raw mixed signal, and the ddf signal
    outputs = listen(signal, ears)
    mixture_outputs = listen(mixture_signal, ears)
    ddf_outputs = listen(ddf_signal, ears)

    # Write the outputs
    outfile_stem = f"{output_path}/{scene['scene']}_{listener['name']}"
    signals_to_write = [
        (
            flat0dB_HL_outputs,
            f"{output_path}/{scene['scene']}_flat0dB_HL-output.wav",
        ),
        (outputs, f"{outfile_stem}_HL-output.wav"),
        (ddf_outputs, f"{outfile_stem}_HLddf-output.wav"),
        (mixture_outputs, f"{outfile_stem}_HL-mixoutput.wav"),
    ]
    for signal, filename in signals_to_write:
        write_signal(filename, signal, CONFIG.fs, floating_point=True)