def test_search_stresses(self):
     words = pronouncing.search_stresses('^000100$')
     self.assertEqual(words, [
         'phytogeography', 'uninterruptible', 'uninterruptible',
         'variability'
     ])
     words = pronouncing.search_stresses('^[12]0[12]0[12]0[12]$')
     self.assertEqual(
         words, ['dideoxycytidine', 'homosexuality', 'hypersensitivity'])
 def test_search_stresses(self):
     words = pronouncing.search_stresses('^000100$')
     self.assertEqual(
         words,
         ['phytogeography', 'uninterruptible', 'uninterruptible',
             'variability'])
     words = pronouncing.search_stresses('^[12]0[12]0[12]0[12]$')
     self.assertEqual(
         words,
         ['dideoxycytidine', 'homosexuality', 'hypersensitivity'])
示例#3
0
def stress_pattern():
    phones_list = pronouncing.phones_for_word("snappiest")
    meter = pronouncing.stresses(phones_list[0])
    print meter  # 102
    # 1 : primary stress, 2: secondary stress , 0: unstressed
    ## search by stress pattern
    stress_first = pronouncing.search_stresses('100100')
    stress_either = pronouncing.search_stresses(
        '^00[12]00[12]$')  ## either 1 or 2 in the []
    print stress_first
    print stress_either
示例#4
0
def get_choices(word_pool, pat):
    choices = pronouncing.search_stresses(pat)
    new_list = []
    for word in word_pool:
        if word in choices:
            new_list.append(word)

    return new_list
示例#5
0
def stress_search(stress_pattern):
    regex = '^' + stress_pattern.replace('1', '[12]') + '$'
    pat_len = len(stress_pattern)
    results = []
    while(not results):
        results = pronouncing.search_stresses(regex)
        pat_len -= 1
        regex = '^' + stress_pattern[:pat_len].replace('1','[12]') + '$'
    return results, pat_len + 1
示例#6
0
def evaluate(prime_str=['f**k'], predict_len=4, temperature=0.8):
    hidden = decoder.init_hidden()
    prime_input = char_tensor(prime_str)
    predicted = prime_str
    target_meter = "1010101010"
    target_rhyme = ""

    # Use priming string to "build up" hidden state
    for p in range(len(prime_str) - 1):
        _, hidden = decoder(prime_input[p], hidden)
    inp = prime_input[-1]
   
    
    while predicted.count("\n") < predict_len:
        output, hidden = decoder(inp, hidden)
        
        # Sample from the network as a multinomial distribution
        output_dist = output.data.view(-1).div(temperature).exp()
        # print(output_dist)
        top_i = torch.multinomial(output_dist, 1)[0]
        # print(top_i)
        
        # Add predicted character to string and use as next input
        predicted_char = vocab[top_i]
        # print(predicted_char)

        if predicted_char.strip() != "":
          if pronouncing.stresses_for_word(predicted_char):
            word_meter = adjusted_meter(pronouncing.stresses_for_word(predicted_char)[0])
            if target_meter.startswith(word_meter):
              predicted += [predicted_char]
              inp = char_tensor([predicted_char])
              target_meter = target_meter[len(word_meter):]
              if target_meter == "":
                final_word = predicted.pop()
                if target_rhyme != "":
                  meter_candidates = pronouncing.search_stresses(pronouncing.stresses_for_word(final_word)[0])
                  rhyme_candidates = pronouncing.rhymes(target_rhyme)
                  candidates = list(set(meter_candidates) & set(rhyme_candidates) & set(vocab))
                  if candidates:
                    #candidates = [output_dist[char_tensor([candidate])] for candidate in candidates if candidate in vocab]
                    #print([char_tensor([candidate]) for candidate in candidates])
                    final_word = random.choice(candidates)
                predicted += [final_word, "\n"]
                print(final_word)
                if target_rhyme == "":
                     target_rhyme = final_word
                else:
                     target_rhyme = ""
                target_meter = "1010101010"  
              elif target_meter == "0":
                target_meter = "1"

    return predicted
示例#7
0
def generate_joo_joo_eyeball(syllable_count):
    text = [0]
    while(text[0] != syllable_count):
        text = random.choice(joo_joo_eyeball)
    text = text[1] # Discard count, we don't need it
    result = []
    for word in text.split():
        pronunciations = pronouncing.phones_for_word(word)
        pat = pronouncing.stresses(pronunciations[0])
        replacement = random.choice(pronouncing.search_stresses("^"+pat+"$"))
        result.append(replacement)
    return ' '.join(result)
示例#8
0
def steal_voices():
    words_ending_with_sounds = pr.search("S IH0 [ZS]$")
    words_with_stresses = pr.search_stresses("[12][12]0")
    words_with_sounds_and_stresses = set(
        words_ending_with_sounds).intersection(words_with_stresses)

    # Find words with correct syllable count
    three_syllable_words = []
    for word in words_with_sounds_and_stresses:
        word_phones = pr.phones_for_word(word)
        syllable_count = pr.syllable_count(word_phones[0])
        if syllable_count == 3:
            three_syllable_words.append(word)

    return three_syllable_words
def summon_seas():
    # Find words with mattching stress patterns
    caspian_phones = pr.phones_for_word("caspian")
    caspian_stresses = pr.stresses(caspian_phones[0])
    words_with_stress_pattern = pr.search_stresses(caspian_stresses)

    # Find words with correct syllable count
    three_syllable_words = []
    for word in words_with_stress_pattern:
        word_phones = pr.phones_for_word(word)
        syllable_count = pr.syllable_count(word_phones[0])
        if syllable_count == 3:
            sea = string.capwords((word + " sea"))
            three_syllable_words.append(sea)

    return three_syllable_words
示例#10
0
def find_words():
    # Find words with mattching stress patterns
    beluga_phones = pr.phones_for_word("beluga")
    last_beluga_phone = pr.phones_for_word("beluga")[0].split(" ")[-1]
    beluga_stresses = pr.stresses(beluga_phones[0])
    words_with_stress_pattern = pr.search_stresses(beluga_stresses)

    # Find words with matching end phone
    words_ending_with_ah = pr.search(last_beluga_phone + "$")

    # Find words with correct syllable count
    words_with_stress_and_ending = list(
        set(words_with_stress_pattern).intersection(words_ending_with_ah))
    three_syllable_words = []
    for word in words_with_stress_and_ending:
        word_phones = pr.phones_for_word(word)
        syllable_count = pr.syllable_count(word_phones[0])
        if syllable_count == 3:
            three_syllable_words.append(word)

    return three_syllable_words
示例#11
0
def evaluate_prob(prime_str=['f**k'], predict_len=4, temperature=0.8):
    hidden = decoder.init_hidden()
    prime_input = char_tensor(prime_str)
    predicted = prime_str
    target_meter = "1010101010"
    target_rhyme = ""

    # Use priming string to "build up" hidden state
    for p in range(len(prime_str) - 1):
        _, hidden = decoder(prime_input[p], hidden)
    inp = prime_input[-1]
   
    count = 0
    while predicted.count("\n") < predict_len:
        count = count + 1
        output, hidden = decoder(inp, hidden)
        
        # Sample from the network as a multinomial distribution
        output_dist = output.data.view(-1).div(temperature).exp()
        top_i = torch.multinomial(output_dist, 1)[0]
#         max_prob = 0.0
#         top_i = 0
#         for j in range(len(output_dist)):
#           if output_dist[j] > max_prob:
#             max_prob = output_dist[j]
#             top_i = j
        
        # Add predicted character to string and use as next input
        predicted_char = vocab[top_i]
        
        while predicted_char.strip() == "":
#           output_dist[top_i] = 0.0
#           max_prob = 0.0
#           top_i = 0
#           for j in range(len(output_dist)):
#             if output_dist[j] > max_prob:
#               max_prob = output_dist[j]
#               top_i = j
          top_i = torch.multinomial(output_dist, 1)[0]
          predicted_char = vocab[top_i]

        if predicted_char.strip() != "":
          meter = True
          while meter:
            if pronouncing.stresses_for_word(predicted_char):
              word_meter = adjusted_meter(pronouncing.stresses_for_word(predicted_char)[0])
              if target_meter.startswith(word_meter):
                meter = False
              else:
                top_i = torch.multinomial(output_dist, 1)[0]
                predicted_char = vocab[top_i]
            else:
              top_i = torch.multinomial(output_dist, 1)[0]
              predicted_char = vocab[top_i]
          if pronouncing.stresses_for_word(predicted_char):
            word_meter = adjusted_meter(pronouncing.stresses_for_word(predicted_char)[0])
            if target_meter.startswith(word_meter):
              predicted += [predicted_char]
              inp = char_tensor([predicted_char])
              target_meter = target_meter[len(word_meter):]
              if target_meter == "":
                final_word = predicted.pop()
                if target_rhyme != "":
                  meter_candidates = pronouncing.search_stresses(pronouncing.stresses_for_word(final_word)[0])
                  rhyme_candidates = pronouncing.rhymes(target_rhyme)
                  candidates = list(set(meter_candidates) & set(rhyme_candidates) & set(vocab))
                  if candidates:
                    #candidates = [output_dist[char_tensor([candidate])] for candidate in candidates if candidate in vocab]
                    #print([char_tensor([candidate]) for candidate in candidates])
                    max_prob = 0.0
                    max_index = 0
                    for i in range(len(output_dist)):
                      if vocab[i] in candidates and output_dist[i] > max_prob:
                        max_prob = output_dist[i]
                        max_index = i
                    final_word = vocab[max_index]
                predicted += [final_word, "\n"]
                inp = char_tensor([final_word])
                if target_rhyme == "":
                     target_rhyme = final_word
                else:
                     target_rhyme = ""
                target_meter = "1010101010"  
              elif target_meter == "0":
                target_meter = "1"

    #print(count)
    return predicted
示例#12
0
            Quoth the Raven “Nevermore.”

    And the Raven, never flitting, still is sitting, still is sitting
On the pallid bust of Pallas just above my chamber door;
    And his eyes have all the seeming of a demon’s that is dreaming,
    And the lamp-light o’er him streaming throws his shadow on the floor;
And my soul from out that shadow that lies floating on the floor
            Shall be lifted—nevermore!"""
poem = poem.translate(str.maketrans('', '', string.punctuation))
poem = poem.translate(str.maketrans('', '', '1234567890'))
newpoem = []
for line in poem.split("\n"):
    for word in poem.lower().split():
        phones = pronouncing.phones_for_word(word)
        if phones:
            stress_list = [pronouncing.stresses(phone) for phone in phones]
            search = "^" + stress_list[0] + "$"
            if len(stress_list) > 1:
                for stress in stress_list[1:]:
                    search += "$|^%s" % stress
            word = random.choice(pronouncing.search_stresses(search))
        else:
            pass
        for letter in word:
            sys.stdout.write(letter)
            sys.stdout.flush()
            time.sleep(.1)
        sys.stdout.write(" ")
        sys.stdout.flush()
    print("\n")
示例#13
0
 def test_search_stresses(self):
     words = pronouncing.search_stresses("^000100$")
     self.assertEqual(words, ["phytogeography", "uninterruptible", "uninterruptible", "variability"])
     words = pronouncing.search_stresses("^[12]0[12]0[12]0[12]$")
     self.assertEqual(words, ["dideoxycytidine", "homosexuality", "hypersensitivity"])