コード例 #1
0
    def run(self, input_filename1, input_filename2, output_filename):
        """ Run the Repetition Automatic Detection annotation.

        :param input_filename1: Name of the file with aligned tokens of spkeaker 1 (the source)
        :param input_filename2: Name of the file with aligned tokens of spkeaker 2 (the echo) if OR, or None for SR
        :param output_filename: Name of the file to save the result

        """
        self.print_filename(input_filename1)
        self.print_options()
        self.print_diagnosis(input_filename1)
        if input_filename2 is not None:
            self.print_diagnosis(input_filename2)
        if self.logfile is not None:
            self.logfile.print_message("Span = " + str(self._span), indent=3)
            self.logfile.print_message("Alpha = " + str(self._alpha), indent=3)

        # Get the tiers to be used
        # ---------------------------------------------------------------

        # Tokens of main speaker
        trs_input1 = sppas.src.annotationdata.aio.read(input_filename1)
        tier1 = sppasSearchTier.aligned_tokens(trs_input1)
        if tier1.IsEmpty() is True:
            raise Exception("Empty tokens tier (main speaker).\n")

        # Tokens of echoing speaker (if any)
        tier2 = None
        if input_filename2 is not None:
            trs_input2 = sppas.src.annotationdata.aio.read(input_filename2)
            tier2 = sppasSearchTier.aligned_tokens(trs_input2)
            if tier2.IsEmpty() is True:
                raise Exception("Empty tokens tier (echoing speaker).\n")

        # Lemmatize input?
        if self._use_lemmatize:
            tier1 = self.lemmatize(tier1)
            if tier2 is not None:
                tier2 = self.lemmatize(tier2)

        # Repetition Automatic Detection
        # ---------------------------------------------------------------
        if tier2 is None:
            (src_tier, echo_tier) = self.self_detection(tier1)
        else:
            (src_tier, echo_tier) = self.other_detection(tier1, tier2)

        # Save results
        # --------------------------------------------------------------
        trs_output = Transcription("Repetitions")
        trs_output.Append(src_tier)
        trs_output.Append(echo_tier)
        sppas.src.annotationdata.aio.write(output_filename, trs_output)
コード例 #2
0
ファイル: ipamapping.py プロジェクト: jeanbaptisteb/sppas
# read content
trs_input = aio.read(args.i)

# ----------------------------------------------------------------------------
# Convert input file

trs = Transcription(name=trs_input.GetName()+"-IPA")

for n in args.n.split(','):
    print(" -> Tier {:s}:".format(n))
    tier = trs_input.Find(n, case_sensitive=False)
    if tier is not None:
        new_tier = mapping.map_tier(tier)
        new_tier.SetName(n+"-IPA")
        new_tier.metadata = tier.metadata
        trs.Append(new_tier)
    else:
        print(" [IGNORED] Wrong tier name.")

# Set the other members
trs.metadata = trs_input.metadata

# ----------------------------------------------------------------------------
# Write converted tiers

if trs.GetSize() == 0:
    print("No tier converted. No file created.")
    sys.exit(1)

infile, inext = os.path.splitext(args.i)
filename = infile + "-ipa" + inext
コード例 #3
0
    tiersnumbs = range(1, (trsinput.GetSize() + 1))
elif args.t:
    tiersnumbs = args.t

# ----------------------------------------------------------------------------
# Fill

trsout = Transcription()

for i in tiersnumbs:
    tier = trsinput[i - 1]

    tier = fill_gaps(tier, trsinput.GetMinTime(), trsinput.GetMaxTime())
    ctrlvocab = tier.GetCtrlVocab()
    if ctrlvocab is not None:
        if ctrlvocab.Contains(args.f) is False:
            ctrlvocab.Append(args.f, descr="Filler")

    print "Tier: ", tier.GetName()
    print "Fill empty intervals with", args.f, "(and merge with previous or following if any)"
    tier = fct_fill(tier, args.f)
    print "Merge intervals during less than", args.d
    tier = fct_clean(tier, args.f, args.d)
    print()
    trsout.Append(tier)

# ----------------------------------------------------------------------------
# Write

sppas.src.annotationdata.aio.write(args.o, trsout)
コード例 #4
0
    print("ERROR: MarsaTag plugin requires SPPAS alignment files "
          "(i.e. with -palign in its name).")
    sys.exit(1)

if fext.lower() != "textgrid":

    # read content
    trs_input = aio.read(filename)
    tier = trs_input.Find("TokensAlign", case_sensitive=False)
    if tier is None:
        print("ERROR: A tier with name TokensAlign is required.")
        sys.exit(1)

    # write as textgrid
    trs = Transcription(name="TokensAlign")
    trs.Append(tier)
    filename = fname + ".TextGrid"
    aio.write(filename, trs)

# ----------------------------------------------------------------------------
# Get MarsaTag path 

if len(args.p) == 0:
    print("ERROR: No given directory for MarsaTag software tool.")
    sys.exit(1)

if os.path.isdir(args.p) is False:
    print("ERROR: {:s} is not a valid directory.".format(args.p))
    sys.exit(1)

MARSATAG = os.path.join(args.p, "lib", "MarsaTag-UI.jar")
コード例 #5
0
ファイル: sppasipusseg.py プロジェクト: lym0302/sppas
    def get_transcription(self, input_filename, tier_idx=None):
        """ Extract transcription from a file, either time-aligned or not.

        If input is a simple text file, it must be formatted like:
            - each line is supposed to be at least one unit;
            - each '#' symbol is considered as a unit boundary.
            - both can be combined.

        If input is a time-aligned file, the expected tier name for the
        transcription are:
            - priority: trans in the tier name;
            - secondary: trs, ortho, toe or ipu in the tier name.
        It also extracts IPUs file names if any, i.e. a tier with name
        "Name" or "File".

        :param input_filename: (str) Name of the input file
        :param tier_idx: (int) Force the tier index for the transcription
        :returns: Transcription

        """
        if input_filename is None:
            return Transcription()

        trs_input = sppas.src.annotationdata.aio.read(input_filename)
        # input is a simple text file
        if input_filename.lower().endswith("txt"):
            if trs_input.GetSize() != 1:
                raise IOError(
                    'Error while reading file (expected one tier. Got %d)' %
                    trs_input.GetSize())
            return trs_input

        # input is a time-aligned file
        if tier_idx is None:
            trs_tier = sppasSearchTier.transcription(trs_input)
        else:
            trs_tier = trs_input[tier_idx]

        trs_output = Transcription("Output")
        if self.logfile:
            self.logfile.print_message("IPUs+Transcription tier found: %s" %
                                       trs_tier.GetName(),
                                       indent=3,
                                       status=INFO_ID)

        trs_tier.SetName('Transcription')
        trs_output.Append(trs_tier)

        # Expected IPUs file names
        for tier in trs_input:
            tier_name = tier.GetName().lower()
            if "name" in tier_name or "file" in tier_name:
                if self.logfile:
                    self.logfile.print_message("IPUs file names found: %s" %
                                               tier.GetName(),
                                               indent=3,
                                               status=INFO_ID)
                tier.SetName("Name")
                trs_output.Append(tier)
                break

        return trs_output
コード例 #6
0
    def create_chunks(self, inputaudio, phontier, toktier, diralign):
        """ Create time-aligned tiers from raw intput tiers.

        :param inputaudio: (str) Name of the audio file
        :param phontier: (Tier) the tier with phonetization
        :param toktier:  (Tier) the tier with tokenization to split
        :param diralign: (str) the working directory to store temporary data.

        """
        trsoutput = Transcription("Chunks")

        # Extract the audio channel
        channel = autils.extract_audio_channel(inputaudio, 0)
        channel = autils.format_channel(channel, 16000, 2)

        # Extract the lists of tokens and their corresponding pronunciations
        pronlist = self._tier2raw(phontier, map=True).split()
        toklist = self._tier2raw(toktier, map=False).split()
        if len(pronlist) != len(toklist):
            raise IOError("Inconsistency between the number of items in "
                          "phonetization %d and tokenization %d." % (len(pronlist), len(toklist)))

        # At a first stage, we'll find anchors.
        anchor_tier = AnchorTier()
        anchor_tier.set_duration(channel.get_duration())
        anchor_tier.set_ext_delay(1.)
        anchor_tier.set_out_delay(0.5)

        # Search silences and use them as anchors.
        if self.SILENCES is True:
            anchor_tier.append_silences(channel)

        # Estimates the speaking rate (amount of tokens/sec. in average)
        self._spkrate.eval_from_duration(channel.get_duration(), len(toklist))

        # Multi-pass ASR to find anchors
        nb_anchors = -1      # number of anchors in the preceding pass
        ngram = self.N  # decreasing N-gram value
        win_length = self.W  # decreasing window length

        while nb_anchors != anchor_tier.GetSize() and anchor_tier.check_holes_ntokens(self.NBT) is False:

            anchor_tier.set_win_delay(win_length)
            nb_anchors = anchor_tier.GetSize()

            logging.debug(" =========================================================== ")
            logging.debug(" Number of anchors: %d" % nb_anchors)
            logging.debug(" N-gram:   %d" % ngram)
            logging.debug(" W-length: %d" % win_length)

            # perform ASR and append new anchors in the anchor tier (if any)
            self._asr(toklist, pronlist, anchor_tier, channel, diralign, ngram)

            # append the anchor tier as intermediate result
            if self.ANCHORS is True and nb_anchors != anchor_tier.GetSize():
                Chunks._append_tier(anchor_tier, trsoutput)
                out_name = os.path.join(diralign, "ANCHORS-%d.xra" % anchor_tier.GetSize())
                sppas.src.annotationdata.aio.write(out_name, trsoutput)

            # prepare next pass
            win_length = max(win_length-1., self.WMIN)
            ngram = max(ngram-1, self.NMIN)

        # Then, anchors are exported as tracks.
        tiert = anchor_tier.export(toklist)
        tiert.SetName("Chunks-Tokenized")
        tierp = anchor_tier.export(pronlist)
        tierp.SetName("Chunks-Phonetized")
        trsoutput.Append(tiert)
        trsoutput.Append(tierp)

        return trsoutput