示例#1
0
def test_scan_no_init():
    src = Stream(None)
    s = record(scan(lambda acc, x: acc + ' ' + x, None, src))
    src('hello')
    src('world,')
    src('frpy!')
    assert s.footprint == ['hello', 'hello world,', 'hello world, frpy!']
示例#2
0
def test_diff_no_init():
    src = Stream(None)
    s = record(diff(lambda x, y: y - x, None, src))
    src(5)
    src(11)
    src(19)
    assert s.footprint == [5, 6, 8]
示例#3
0
def test_scan():
    src = Stream(None)
    s = record(scan(lambda acc, x: acc + x, -1, src))
    src(2)
    src(6)
    src(10)
    assert s.footprint == [1, 7, 17]
示例#4
0
def test_fmap():
    src = Stream(None)
    s = record(fmap(lambda x: x + 3, src))
    src(5)
    src(208)
    src(176)
    src(1021)
    assert s.footprint == [8, 211, 179, 1024]
示例#5
0
def test_diff():
    src = Stream(None)
    s = record(diff(lambda x, y: y != x, '', src))
    src('aaa')
    src('aaa')
    src('bbb')
    src('aaa')
    src('bbb')
    assert s.footprint == [True, False, True, True, True]
示例#6
0
def test_repeat_fraction():
    clk = Stream(None)
    clk.clock = clk
    s = record(repeat(2.5, clk))
    clk(0)
    clk(1)
    clk(2)
    clk(3)
    clk(4)
    clk(5)
    clk(6)
    assert s.footprint == [0, 3, 6]
示例#7
0
def test_merge_topic():
    s1 = Stream(None)
    s2 = Stream(None)
    s3 = Stream(None)
    s = record(merge([s1, s2, s3], ['s1', 's2', 's3']))
    s1(123)
    s2(456)
    s1(12)
    s1(42)
    s3(500)
    s2(789)
    assert s.footprint == [('s1', 123), ('s2', 456), ('s1', 12), ('s1', 42),
                           ('s3', 500), ('s2', 789)]
示例#8
0
文件: Slexy.py 项目: alican/dumpmon
	def monitor(self, bot, l_lock, t_lock):
		self.update()
		while(1):
			while not self.empty():
				paste = self.get()
				self.ref_id = paste.id
				with l_lock:
					helper.log('[*] Checking ' + paste.url)
				paste.text = helper.download(paste.url)
				with l_lock:
					tweet = helper.build_tweet(paste)
				if tweet:
					print tweet
					with t_lock:
						helper.record(tweet)
						bot.PostUpdate(tweet)
			self.update()
			# If no new results... sleep for 5 sec
			while self.empty():
				with l_lock:
					helper.log('[*] No results... sleeping')
				sleep(SLEEP_SLEXY)
				self.update()
示例#9
0
def test_sequence_ended():
    clk = Stream(None)
    clk.clock = clk
    s = record(sequence(2, iter(range(42, 46, 2)), clk))
    clk(0)
    clk(1)
    clk(2)
    clk(3)
    clk(4)
    clk(5)
    clk(6)
    clk(7)
    clk(8)
    assert s.footprint == [42, 44]
示例#10
0
文件: Pastie.py 项目: alican/dumpmon
	def monitor(self, bot, l_lock, t_lock):
		self.update()
		while(1):
			while not self.empty():
				paste = self.get()
				self.ref_id = paste.id
				with l_lock:
					helper.log('[*] Checking ' + paste.url)
				# goober pastie - Not actually showing *raw* text.. Still need to parse it out
				paste.text = BeautifulSoup(helper.download(paste.url)).pre.text
				with l_lock:
					tweet = helper.build_tweet(paste)
				if tweet:
					print tweet
					with t_lock:
						helper.record(tweet)
						bot.PostUpdate(tweet)
			self.update()
			# If no new results... sleep for 5 sec
			while self.empty():
				with l_lock:
					helper.log('[*] No results... sleeping')
				sleep(SLEEP_PASTIE)
				self.update()
示例#11
0
文件: Site.py 项目: Telco/dumpmon
 def monitor(self, bot, t_lock):
     self.update()
     while(1):
         while not self.empty():
             paste = self.get()
             self.ref_id = paste.id
             logging.info('[*] Checking ' + paste.url)
             # goober pastie - Not actually showing *raw* text.. Still need
             # to parse it out
             paste.text = self.get_paste_text(paste)
             tweet = helper.build_tweet(paste)
             if tweet:
                 logging.info(tweet)
                 with t_lock:
                     helper.record(tweet)
                     try:
                         bot.statuses.update(status=tweet)
                     except TwitterError:
                         pass
         self.update()
         while self.empty():
             logging.debug('[*] No results... sleeping')
             time.sleep(self.sleep)
             self.update()
示例#12
0
def test_combine_deps():
    # Sum two stream values when either changes
    s1 = Stream(None)
    s2 = Stream(None)

    def sum_upstreams(deps, s, src, value):
        return sum(dep() for dep in deps if dep() is not None)

    # existing value will also be pushed
    s1(1)
    s = record(combine(sum_upstreams, [s1, s2]))
    s2(3)
    s1(2)
    s1(5)
    s2(6)
    assert s.footprint == [1, 4, 5, 8, 11]
示例#13
0
    out = args.output
    inp = args.input
    rate = args.rate
    channels = args.channels
    delay = args.delay

    if not out.endswith('/'):
        out = out + '/'
    if not inp.endswith('/'):
        inp = inp + '/'

    if not os.path.exists(out):
        os.makedirs(out)

    for text in sorted(os.listdir(inp)):
        while True:
            print('press n to record ' + os.path.join(inp, text) +
                  ' or q to quit')
            ans = input()
            if ans == "n":
                break
            if ans == "q":
                sys.exit("Exiting...")
        with open(os.path.join(inp, text), 'r') as f:
            print('Text to dictate:')
            print(f.read())
            time.sleep(delay)
            wav_path = os.path.join(out, text + '.wav')
            if not os.path.exists(wav_path):
                record(rate, channels, wav_path)
vocoder.load_model(voc_model_fpath)

model_load_state.text("Loaded pretrained models!")

st.header("1. Record your own voice")

filename = st.text_input("Choose a filename: ")

if st.button(f"Click to Record"):
    if filename == "":
        st.warning("Choose a filename.")
    else:
        record_state = st.text("Recording...")
        duration = 5  # seconds
        fs = 48000
        myrecording = record(duration, fs)
        record_state.text(f"Saving sample as {filename}.mp3")

        path_myrecording = f"./samples/{filename}.mp3"

        save_record(path_myrecording, myrecording, fs)
        record_state.text(f"Done! Saved sample as {filename}.mp3")

        st.audio(read_audio(path_myrecording))

        fig = create_spectrogram(path_myrecording)
        st.pyplot(fig)

"## 2. Choose an audio record"

audio_folder = "samples"
示例#15
0
文件: ppt_voice.py 项目: muvazima/fyp
def main():

    st.title("Paper to PPT converter with custom voice delivery")
    filename = file_selector()
    st.sidebar.write('You selected `%s`' % filename)
    d=read_file(filename)
    dclean=summarize(d)

    filename=st.sidebar.text_input("Enter PPT file name")
    #filename='./PPTs/'
    #uploaded_file = st.sidebar.file_uploader("Choose a file", type=['txt'])
    if filename!='':
        #d=gen_dict(uploaded_file)
        #print(d)
        #dclean=summarize(d)
        #print(dclean)
        #filename=st.sidebar.text_input("Enter File Path to save PPT")
        create_ppt_new(dclean,filename)
        st.header("pptx file saved.")
        #inputFileName='/Users/Manam/fyp/PPTs/'+filename+'.pptx'
        #outputFileName='/Users/Manam/fyp/PDFs/'+filename+'.pdf'
        inputFileName=filename
        outputFileName=filename

        # PPTtoPDF(inputFileName, outputFileName)
        # if outputFileName[-3:] != 'pdf':
        #                 outputFileName = outputFileName + ".pdf"
        # with open(outputFileName,"rb") as f:
        #     base64_pdf = base64.b64encode(f.read()).decode('utf-8')
        # pdf_display = f'<embed src="data:application/pdf;base64,{base64_pdf}" width="700" height="1000" type="application/pdf">'

        # st.markdown(pdf_display, unsafe_allow_html=True)

        st.sidebar.title("Record your own voice")

        audiofilename = st.sidebar.text_input("Enter a filename for your voice: ")

        if st.sidebar.button(f"Click to Record"):
            if audiofilename == "":
                st.warning("Choose a filename.")
            else:
                record_state = st.text("Recording...")
                duration = 10  # seconds
                fs = 48000
                myrecording = record(duration, fs)
                record_state.text(f"Saving sample as {filename}.mp3")

                path_myrecording = f"./samples/{filename}.mp3"

                save_record(path_myrecording, myrecording, fs)
                #record_state.text(f"Done! Saved sample as {filename}.mp3")

                st.sidebar.audio(read_audio(path_myrecording))

                #fig = create_spectrogram(path_myrecording)
                #st.pyplot(fig)

        audio_folder = "samples"
        filenames = glob.glob(os.path.join(audio_folder, "*.mp3"))
        selected_filename = st.sidebar.selectbox("Select a voice", filenames)
        
        if selected_filename is not None:
    # Create embedding
            embed=create_embedding(selected_filename)
    #st.success("Created the embedding")

    #st.audio(read_audio(in_fpath))

    #fig = draw_embed(embed, "myembedding", None)
    #st.pyplot(fig)
        for i in dclean:
            text = dclean[i]
            texts = [text]
            embeds = [embed]

    # generate waveform
            #with st.spinner("Generating your speech..."):
            specs = synthesizer.synthesize_spectrograms(texts, embeds)
            spec = specs[0]
            
        
            generated_wav = vocoder.infer_waveform(spec)
            generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant")
            generated_wav = encoder.preprocess_wav(generated_wav)
    

    # Save it on the disk
            opfilename = "Output/"+filename+"-"+i+".wav"
            sf.write(opfilename, generated_wav.astype(np.float32), synthesizer.sample_rate)
            st.text(i)
            st.audio(read_audio(opfilename))