コード例 #1
0
ファイル: voicebox.py プロジェクト: vtbassmatt/pt-voicebox
 def add_voice(self):
     new_voice = voice.Voice(
         {})  # creates new voice with no name and empty tree of corpora
     texts = os.listdir('texts')
     add_another_corpus = ''
     while add_another_corpus != 'n':
         for i in range(len(texts)):
             print("%s %s" % (i + 1, texts[i]))
         choice = input(
             'Enter the number of the corpus you want to load:\n')
         corpus_name = texts[int(choice) - 1]
         path = 'texts/%s' % corpus_name
         f = open(path, 'r')
         text = f.read()
         corpus_weight_prompt = 'Enter the weight for %s:\n' % corpus_name
         corpus_weight = float(input(corpus_weight_prompt))
         new_voice.add_corpus(corpus.Corpus(text, corpus_name),
                              corpus_weight)
         texts.remove(corpus_name)
         add_another_corpus = input(
             'Add another corpus to this voice? y/n\n')
     voicename = input('Name this voice:\n')
     new_voice.name = voicename
     new_voice.normalize_weights()
     self.voices[voicename] = new_voice
コード例 #2
0
ファイル: generate-voice.py プロジェクト: uliska/gruppen
def main():
    info('Gruppen - generate-voice\n')
    info('Generate voice')

    # add script specific options to the command line parser
    add_commandline_args()

    # read and interpret options
    args = commandline.parse()
    voice_props = get_voice_props(args)

    # open target repository
    proj = script.open_project(args)

    # create a new Voice object for the project
    new_voice = voice.Voice(proj, voice_props)
    new_voice.print_props()

    # process all cell teimpate files and write to disk
    info('Writing {} segments to {}'.format(
        len(new_voice.segments._segments_list),
        os.path.join(proj['paths']['root'], new_voice.music_dir)))
    file_list = new_voice.segments.write_segments()

    # process part concatenation template
    new_voice.segments.write_part_concat_file(file_list)

    # process part standalone file
    new_voice.segments.write_part_file()
コード例 #3
0
    def deserialize(self, buf):
        eyes = {
            1: eye.Eye,
            2: glasses.Glasses,
            3: eyelashes.Eyelashes,
            4: halfmoon.Halfmoon,
            5: sunglasses.Sunglasses,
            6: wireframes.Wireframes,
            7: sleepy.Sleepy
        }
        mouths = {
            1: mouth.PeakMouth,
            2: waveform_mouth.WaveformMouth,
            3: fft_mouth.FFTMouth
        }

        data = json.loads(buf)
        self.voice = voice.Voice(data['voice']['language'],
                                 data['voice']['name'])
        self.pitch = data['pitch']
        self.rate = data['rate']
        self.eyes = [eyes[i] for i in data['eyes']]
        self.mouth = mouths[data['mouth']]

        return self
コード例 #4
0
ファイル: main.py プロジェクト: GabsGear/MakerCar
def main():

    test = recognition.Recognition()
    vc = voice.Voice()
    name = test.getFromCam()
    if (name != 'Unknown'):
        vc.sayHello(str(name))

    else:
        test.RegisterNewPerson()
コード例 #5
0
    def deserialize(self, buf):
        eyes = {1: eye.Eye}
        mouths = {1: mouth.Mouth}

        data = json.loads(buf)
        self.voice = voice.Voice(data['voice']['language'],
                                 data['voice']['name'])
        self.pitch = data['pitch']
        self.rate = data['rate']
        self.eyes = [eyes[i] for i in data['eyes']]
        self.mouth = mouths[data['mouth']]

        return self
コード例 #6
0
def generateMelody(bar, length, scale, chords, start=0, theme=None):
    melody = voice.Voice(start)
    templength = 0
    beat = start  #monesko isku menossa
    spaceInFrase = 0
    oddsAndEnds = 0
    length = length + beat
    #append theme to the start
    #	print theme
    for note in theme:  #theme syntax : [[rythm, note],[rythm,note]]
        #		print "beat: "+str(beat)
        if (length > beat):
            melody.nextRythm(note[0])
            melody.nextNote(note[1])
            beat = beat + note[
                0]  #Tahan voisi tehda esim. transponoinnin sointuihin.


#			print note
        else:
            break
    while (length > beat):
        frase = nextFrase(bar)
        #		print "frase first: "+str(frase)
        #		print "length-beat: "+str(length-beat)
        if (frase > (length - beat)):
            frase = length - beat
        spaceInFrase = frase
        #		print "frase: "+str(frase)
        while (spaceInFrase > 0):  #one frase at a time
            templength = probabilities.getRythm(
                spaceInFrase, melody.getLastRythm(),
                music.respirationAtTheMoment(beat))
            spaceInFrase = spaceInFrase - templength
            melody.nextRythm(templength)
            melody.nextNote(getNextNote(melody, beat, scale, chords, bar))
            if (beat % bar == 0):
                force = 4
            else:
                force = 0
            melody.forceEvent(music.getForce(force, beat))
            #beats must be counted in ints, because you can't use a float as the index of chords[]
            if (templength == int(templength)):
                beat = beat + templength
            else:
                oddsAndEnds = oddsAndEnds + templength
                beat = beat + int(oddsAndEnds)
                oddsAndEnds = oddsAndEnds - int(
                    oddsAndEnds
                )  #remember the remainder of the beat, so we don't mess up calculations
    return melody.getMusic()
コード例 #7
0
    def deserialize(self, buf):
        eyes = {1: eye.Eye, 2: glasses.Glasses}
        mouths = {
            1: mouth.Mouth,
            2: fft_mouth.FFTMouth,
            3: waveform_mouth.WaveformMouth
        }

        data = cjson.decode(buf)
        self.voice = voice.Voice(data['voice']['language'],
                                 data['voice']['name'])
        self.pitch = data['pitch']
        self.rate = data['rate']
        self.eyes = [eyes[i] for i in data['eyes']]
        self.mouth = mouths[data['mouth']]

        return self
コード例 #8
0
def voices_generator(measure1, measure_length, tied_tones):

    cons_pause_threshold = 0.07
    cons_note_threshold = 0.01
    notes = ((1, 0.01, 0.07), (2, 0.07, 0.13), (3, 0.13, 0.19),
             (4, 0.19, 0.26), (6, 0.26, 0.39), (8, 0.39, 0.52),
             (10, 0.52, 0.64), (12, 0.64, 0.77), (14, 0.77, 0.88), (16, 0.88,
                                                                    1.00))
    rests = ((1, 0.07, 0.012), (2, 0.012, 0.18), (3, 0.18, 0.24),
             (4, 0.24, 0.31), (6, 0.31, 0.44), (8, 0.44, 0.57),
             (10, 0.57, 0.69), (12, 0.69, 0.82), (14, 0.82, 0.93), (16, 0.93,
                                                                    1.00))
    log = open('log1.txt', 'a')

    for voice1 in measure1:
        voice_object = voice.Voice()
        for tone in voice1:
            temp = (float(tone[1]) / float(measure_length))  # (Prozentwert)
            if temp > cons_pause_threshold and tone[0] == 'X':
                log.write('z: ' + str(temp) + '\n')
                for value, min_v, max_v in rests:
                    if min_v < temp <= max_v:
                        rest_object = rest.Rest(value)
                        voice_object.add(rest_object)

            elif temp > cons_note_threshold and not tone[0] == 'X':
                log.write(tone[0] + ': ' + str(temp) + '\n')
                for value, min_v, max_v in notes:
                    if min_v < temp <= max_v:
                        note_object = note.Note(tone[0], value)
                        voice_object.add(note_object)

        log.write(
            str(measure_length) + '---------------------voice-------------\n')

        for tone in tied_tones:
            if tone == voice_object.notes_chords_rests[-1].str_format:
                voice_object.notes_chords_rests[-1].tie = 'start'

        voice_object.improve_rhythm()

        yield voice_object

    log.write('---------------------measure-------------\n')
    log.close()
コード例 #9
0
def generateAccompaniment0(bar, length, scale, chords, starttime):
	accompaniment = voice.Voice(starttime)
	note = None
	beat = starttime
	starttime = int(starttime/bar) #because the chords are ordered by bar
	for i in range (starttime, starttime+int(length/bar)):#process one chord and bar at a time
		for j in range (0, bar*4):
			note = scale[chords[i]]
			accompaniment.nextNote(note)
			accompaniment.nextRythm(0.25)
			if (j == 0 or j == 7 or j==3 or j == 10):			
				force = -4	
			else:
				force = -11
			force = music.getForce(force, beat)
			beat = beat + 0.25	
			accompaniment.forceEvent(force)
	return accompaniment.getMusic()
コード例 #10
0
ファイル: voicebox.py プロジェクト: vtbassmatt/pt-voicebox
 def load_voices_from_transcript(self):
     transcripts = os.listdir('texts/transcripts')
     for i in range(len(transcripts)):
         print("%s %s" % (i + 1, transcripts[i]))
     choice = input(
         'Enter the number of the transcript you want to load:\n')
     transcript_name = transcripts[int(choice) - 1]
     number = int(input('Enter the number of voices to load:\n'))
     for charname, size in self.biggest_characters(transcript_name, number):
         print(charname)
         path = 'texts/transcripts/%s/%s' % (transcript_name, charname)
         source_text = open(path).read()
         corpus_name = charname
         weighted_corpora = {}
         weighted_corpora[charname] = [
             corpus.Corpus(source_text, corpus_name), 1
         ]
         self.voices[charname] = voice.Voice(weighted_corpora, charname)
コード例 #11
0
def generateAccompaniment2(bar, length, scale, chords, starttime):
	accompaniment = voice.Voice(starttime)
	acc = 0
	note = None
	beat = starttime
	starttime = int(starttime/bar) #because the chords are ordered by bar
	for i in range (starttime, starttime+int(length/bar)):#process one chord and bar at a time
		for j in range (0, bar):
			note = scale[chords[i]+acc]
			accompaniment.nextNote(note)
			acc = acc + 4
			if (acc >= 4):	
				acc = 0
			accompaniment.nextRythm(1)
			beat = beat + 1	
			force = -6
			force = music.getForce(force, beat)
			accompaniment.forceEvent(force)
	return accompaniment.getMusic()
コード例 #12
0
ファイル: photoface.py プロジェクト: leonardcj/speak
    def deserialize(self, buf):
        data = json.loads(buf)

        self.voice = voice.Voice(data['voice']['language'],
                                 data['voice']['name'])
        self.pitch = data['pitch']
        self.rate = data['rate']

        self.left_eye = Eye(tuple(data['left_eye']['center']),
                            data['left_eye']['circ'])
        self.right_eye = Eye(tuple(data['right_eye']['center']),
                             data['right_eye']['circ'])
        self.pixbuf = _b64_to_pixbuf(data['pixbuf'])

        m = data['mouth']
        mouth_pixbuf = _b64_to_pixbuf(m['pixbuf'])
        self.mouth = Mouth()
        self.mouth.from_values(m['x'], m['y'], m['w'], m['h'], mouth_pixbuf)

        return self
コード例 #13
0
    def update_info(self):
        #self.root.ids.lable_info.text = u'帅哥,今天你看起来很开心,需要来首歌吗'
        y = yuyin.Baiduyuyin()
        t = tuling.Tuling()
        v = voice.Voice()

        try:
            print('听取命令')
            v.my_record()
            print('收到命令,进行识别')
            s = y.asr('01.wav')['result'][0]
            print('识别结果为:%S' % s)
            print('启动图灵机器人对话')
            a = t.get_answer(s)
            print('图灵机巧人反馈:%s' % a)
            self.root.ids.lable_info.text = a
        except:
            self.root.ids.lable_info.text = "主人,没有听懂你的命令,请重新下命令吧"
        finally:
            Clock.schedule_once(lambda dt: self.update_info(), 5)
コード例 #14
0
def generateAccompaniment1(bar, length, scale, chords, starttime):
	accompaniment = voice.Voice(starttime)
	acc = 0
	beat = starttime
	note = None
	starttime = int(starttime/bar) #because the chords are ordered by bar
	for i in range (starttime, starttime+int(length/bar)):#process one chord and bar at a time
		for j in range (0, bar*2):
			note = scale[chords[i]+acc]
			accompaniment.nextNote(note)
			acc = acc + 2
			if (acc == 6):	
				acc = 0
			accompaniment.nextRythm(0.5)
			if (beat%bar == 0):
				force = 2
			else:
				force = -8	
			force = music.getForce(force, beat)
			beat = beat + 0.5
			accompaniment.forceEvent(force)
	return accompaniment.getMusic()
コード例 #15
0
    nextSlideFromAnswer4.exposed = True

    def nextSlideFromAnswer5(self, q):
        return respondToAnswer(5, q)

    nextSlideFromAnswer5.exposed = True

    def nextSlideFromAnswer6(self, q):
        return respondToAnswer(6, q)

    nextSlideFromAnswer6.exposed = True


seq = objects.Sequence()
voiceInstance = voice.Voice()


def speakAndReturnForm():
    # Check for visited answers. If found, do not re-read question
    noVisitedAnswers = True
    for a in seq.sequence[seq.onQuestion].answers:
        if a.visited:
            noVisitedAnswers = False
    if noVisitedAnswers:
        speakList(seq.sequence[seq.onQuestion].questionTexts)
        for a in seq.sequence[seq.onQuestion].answers:
            speakList([a.answerText])
    linkToShow = seq.sequence[seq.onQuestion].linkToShow

    if linkToShow.lower().endswith(".pdf"):
コード例 #16
0
    'female': ['f1', 'f2', 'f3', 'f4', 'f5'],
    'male': ['m1', 'm2', 'm3', 'm4', 'm4'],  #m5 has an error or something
    'other': ['whisper', 'croak']
}
pitches = {
    'low': 0,
    'normal': 50,
    'high': 99
}
speed = {
    'slow': 100,
    'normal': 150,
    'fast': 180
}
voices = [
    voice.Voice('Female 1', tones['female'][3], pitches['normal'],
                speed['normal']),
    voice.Voice('Female 2', tones['female'][1], pitches['low'],
                speed['normal']),
    voice.Voice('Female 3', tones['female'][1], pitches['high'],
                speed['normal']),
    voice.Voice('Female 4', tones['female'][2], pitches['normal'],
                speed['normal']),
    voice.Voice('Female 5', tones['female'][3], pitches['high'],
                speed['normal']),
    voice.Voice('Female Fast', tones['female'][2], pitches['high'],
                speed['fast']),
    voice.Voice('Female Slow', tones['female'][4], pitches['low'],
                speed['slow']),
    voice.Voice('Whisper', tones['other'][0], pitches['normal'],
                speed['normal']),
    voice.Voice('Male 1', tones['male'][3], pitches['normal'],
コード例 #17
0
def process_file(fp_in,
                 fp_out,
                 xref_str,
                 pat,
                 sel_all,
                 search_field,
                 info=None):
    # int type;
    common.within_tune = False
    common.within_block = False
    common.do_this_tune = False

    # This is where there is the statements of verbose.
    # Just need to understand what the verbose number means to
    #  debug, warning, info, error, critical

    with open(fp_in) as f:
        lines = f.readlines()
    if parse.is_cmdline(lines[0].strip()):
        subs.process_cmdline(lines[0])
        del lines[0]
    for line in lines:
        line = line.strip()
        if not line:
            continue
        if parse.is_pseudocomment(line):
            process_ps_comment(fp_in, fp_out, line)
            continue
        if parse.is_comment(line):
            continue

        parse.decomment_line(line)

        # reset_info(default_info)
        field = info.Field()
        if field.is_field(line):
            # skip after history field. Nightmarish syntax, that.
            k, v = line.split(':', 1)
            if k != 'H':
                pass
            else:
                field.history(v)

        if not common.do_music:
            return
        if voice.parse_vocals(line):
            return

        # now parse a real line of music
        if not common.voices:
            common.ivc = voice.Voice().switch_voice(DEFVOICE)

        n_sym_0 = len(common.voices[common.ivc].syms)

        # music or tablature?
        if tab.is_tab_line(line):
            tab.parse_tab_line(line)
        else:
            parse.parse_music_line(line)

        log.debug(
            f"  parsed music symbols {n_sym_0} to"
            f" {len(common.voices[common.ivc].syms)-1} for voice {common.ivc}")
        field.process_line(fp_out, xref_str, pat, sel_all, search_field)

    if not common.epsf:
        buffer.buffer_eob(fp_out)
        buffer.write_buffer(fp_out)
コード例 #18
0
ファイル: test_info.py プロジェクト: curtis-penner/bac
 def setUp(self):
     self.v = voice.Voice()