def __init__(self): self.r = sr.Recognizer() self.MUSIC=music.music() self.LeftHand=0 self.RightHand=0 self.TVcheck=0 self.VMP()
def POST(self): str_xml = web.data() #获得post来的数据 xml = etree.fromstring(str_xml)#进行XML解析 content=xml.find("Content").text#获得用户所输入的内容 msgType=xml.find("MsgType").text fromUser=xml.find("FromUserName").text toUser=xml.find("ToUserName").text #增加订阅与退订事件 if msgType =='event': mscontent = xml.find("Event").text if mscontent =='subscribe': replyText = u''' 欢迎关注大T的微信平台,目前提供在线翻译,调戏小黄鸡智能聊天机器人,未来将有今日足彩推荐。输入help查看帮助命令''' return render.reply_text(fromUser,toUser,int(time.time()),replyText) if mscontent =='unsubscribe': replyText = u'''我目前功能还简单,在不断完善中,随时欢迎您回来!''' return render.reply_text(fromUser,toUser,int(time.time()),replyText) #增加help操作 if msgType =='text': if content == 'help': replyText = u'''**********************\n1.直接输入英文或者中文返回对应的中英翻译\n2.输入xhj你就可以调戏小贱鸡了\n3.今日足彩推荐\n****************** ''' return render.reply_text(fromUser,toUser,int(time.time()),replyText) if content == 'music': musicUrl,musicTitle,musicDesc = music() print musicUrl print musicTitle print musicDesc return render.reply_music(fromUser,toUser,int(time.time()),musicUrl,musicTitle,musicDesc) if content.lower() == 'bye': mc.delete(fromUser+'_xhj') return render.reply_text(fromUser,toUser,int(time.time()),u'*******************\n您已经跳出了和小贱鸡的交谈中,输入help来显示操作指令\n*******************') if content.lower() == 'xhj': mc.set(fromUser+'_xhj','xhj')#set memcache return render.reply_text(fromUser,toUser,int(time.time()),u'********************\n您已经进入与小贱鸡的交谈中,随便搞吧骚年!输入bye跳出与小黄鸡的交谈\n*********************') mcxhj = mc.get(fromUser+'_xhj')#get memcache if mcxhj == 'xhj': reply = xiaoHuangJi(content) reply_text = reply['sentence_resp'] return render.reply_text(fromUser,toUser,int(time.time()),reply_text) #翻译 #如果是unicode则对其进行utf-8转码 if type(content).__name__=="unicode": content = content.encode("utf-8") res =youDao(content) return render.reply_text(fromUser,toUser,int(time.time()),res)
def audio_in(): while True: try: with sr.Microphone() as source: initial.adjust_for_ambient_noise() com_out(statement) audio_input = initial.listen(source) y_text = initial.recognize_google(audio_input) y_text = text.lower() user_out(y_text) return y_text except: com_out('Sorry I can\'t hear you') abc = gTTS('Sorry I can\t hear you') abc.save('a.wav') os.system('start a.wav') time.sleep(2) def com_out(message): print('Computer: ', message) def user_out(message): print('User: '******'Please Command me') audio_input = initial.listen(source) text = initial.recognize_google(audio_input) text = text.lower() user_out(text) except: pass if re.search('play', text) or re.search('music', text) or re.search('song', text): statement = 'Please name the song you want me to play or say random to let me play any song for you of my choice' audio_out = gTTS(statement) audio_out.save('a.wav') os.system('start a.wav') time.sleep(9) try: with sr.Microphone() as source: initial.adjust_for_ambient_noise() com_out(statement) audio_input = initial.listen(source) text = initial.recognize_google(audio_input) text = text.lower() user_out(text) music_path = music.music(text) command = 'start ' + music_path os.system(command) except: continue elif re.search('news', text) or re.search('headline', text): news_text = '' news.data() reader = csv.reader('news.csv') for row in reader: audio_out = gTTS(row) audio_out.save('a.wav') os.system('start a.wav') try: with sr.Microphone() as source: initial.adjust_for_ambient_noise() com_out(statement) audio_input = initial.listen(source) news_text = initial.recognize_google(audio_input) news_text = text.lower() user_out(news_text) except: pass if re.search('exit', news_text) or re.search('stop', text): break else: pass time.sleep(6) elif re.search('add', text) and ('reminder', text): statement = 'Which year' audio_out = gTTS(statement) audio_out.save('a.wav') os.system('start a.wav') com_out(statement) time.sleep(1) year = audio_in() if re.search('exit', year) or re.search('close', year) or re.search('stop', year): break statement = 'Which month' audio_out = gTTS(statement) audio_out.save('a.wav') os.system('start a.wav') com_out(statement) time.sleep(1) month = audio_in() if re.search('exit', month) or re.search('close', month) or re.search('stop', month): break statement = 'Which date' audio_out = gTTS(statement) audio_out.save('a.wav') os.system('start a.wav') com_out(statement) time.sleep(1) date = audio_in() if re.search('exit', date) or re.search('close', date) or re.search('stop', date): break statement = 'Hour of the day?' audio_out = gTTS(statement) audio_out.save('a.wav') os.system('start a.wav') com_out(statement) time.sleep(1) hour = audio_in() if re.search('exit', hour) or re.search('close', hour) or re.search('stop', hour): break statement = 'Purpose' audio_out = gTTS(statement) audio_out.save('a.wav') os.system('start a.wav') com_out(statement) time.sleep(1) purpose = audio_in() try: year = w2n.word_to_num(year) month = w2n.word_to_num(month) date = w2n.word_to_num(date) hour = w2n.word_to_num(hour) reminder.set(year, month, date, hour, purpose) except: statement = 'Sorry, unable to set reminder' audio_out = gTTS(statement) audio_out.save('a.wav') os.system('start a.wav') time.sleep(2)
def talk(): r = sr.Recognizer() with sr.Microphone() as source: print("Say something!") beeping.beep() r.dynamic_energy_threshold = True #r.energy_threshold = 200 r.adjust_for_ambient_noise( source, 0.5 ) # listen for 0.5 second to calibrate the energy threshold for ambient noise levels r.pause_threshold = 0.5 r.dynamic_energy_adjustment_ratio = 2.5 # speech is louder than ambient noise by a factor of 2.5 audio = r.listen(source) beeping.beep() # write audio to a WAV file #with open("microphone-results.wav", "wb") as f: # f.write(audio.get_wav_data()) ans = r.recognize_sphinx(audio) # recognize speech using Sphinx try: print("Sphinx thinks you said = " + ans) except sr.UnknownValueError: print("Sphinx could not understand audio") except sr.RequestError as e: print("Sphinx error; {0}".format(e)) command = '0' if ans == '': return command if 'START' in ans: if 'FORWARD' in ans: print("Moving Forward") command = 'f' elif 'LEFT' in ans: print("Moving left") command = 'm' elif 'NAME' in ans: print("Moving left") command = 'm' elif 'GOOD' in ans: print("Moving left") command = 'm' elif 'QUIT' in ans: print("Moving left") command = 'm' elif 'RIGHT' in ans: print("Moving right") command = 'n' elif 'HEIGHT' in ans: print("Moving right") command = 'n' elif 'BYE' in ans: print("Moving right") command = 'n' elif 'BACK' in ans: print("Moving back") command = 'b' elif 'LINE' in ans and 'FOLLOW' in ans: print("Going to follow a line") command = 't' elif 'OBJECT' in ans and 'TRACK' in ans: print("Going for object tracking") command = 'o' elif 'FOLLOW' in ans: print("Moving Forward") command = 'f' return command # elif 'TURN' in ans: # if 'LEFT' in ans: # print("Moving left" ) # command='m' # elif 'NAME' in ans: # print("Moving left" ) # command='m' # elif 'GOOD' in ans: # print("Moving left" ) # command='m' # elif 'RIGHT' in ans: # print("Moving right") # command='n' # elif 'HEIGHT' in ans: # print("Moving right") # command='n' # elif 'BYE' in ans: # print("Moving right") # command='n' # return command elif 'STRAIGHT' in ans: print("Moving Forward") command = 'f' return command elif 'COME BACK' in ans: print("Moving back") command = 'b' return command elif 'STOP' in ans: command = 's' return command elif 'HELLO' in ans or 'GOOD' in ans: command = 'g' elif 'MUSIC' in ans: music.music() return command elif 'PLAY' in ans: music.music() return command # elif 'EXIT' in ans: # command='q' # return command elif 'FINISH' in ans: command = 'q' return command elif 'QUIT' in ans: command = 'q' return command # elif 'HELLO' in ans or 'GOOD' in ans : # command = 'g' # return command from chatterbot import ChatBot from chatterbot.training.trainers import ListTrainer if os.path.isfile("./database.db"): os.remove("./database.db") # Create a new instance of a ChatBot bot = ChatBot( "Terminal", storage_adapter="chatterbot.adapters.storage.JsonDatabaseAdapter", logic_adapters=[ "chatterbot.adapters.logic.MathematicalEvaluation", "chatterbot.adapters.logic.TimeLogicAdapter", "chatterbot.adapters.logic.ClosestMatchAdapter" ], input_adapter="chatterbot.adapters.input.VariableInputTypeAdapter", #output_adapter="chatterbot.adapters.output.OutputFormatAdapter", format='text', database="./database.db") bot.set_trainer(ListTrainer) bot.train([ "hello", "hi", "how are you", "i am fine", "that is good to hear", # "thank you", # "you are welcome", # "sorry", # "its okay", "what is your name", "my name is HURO", "bye", "bye bye", "see you later", "take care", "you too", "good morning", "very good morning", "good afternoon", "very good afternoon", "good evening", "charming evening", "good night", "sweet dreams", "same to you", # "tell me about yourself", # "I am HURO. I am 60 centimeters tall and weighs 3 kilograms. I have 8 degrees of freedom, 2 in head and 3 in each arm. I am using ODroid and Arduino UNO.", # "what is your height", # "I am 60 centimeters tall", # "what is your weight", # "3 kilograms", "team members", "Ujjwal, Rijak, Abhishek, Saurabh, Mrinaal, Bhuvi, Shruti, Nitish", "what is ", "Ask a better question" ]) response = bot.get_response(ans) print(response) import pyttsx engine = pyttsx.init() engine.setProperty('rate', 135) engine.say(response) engine.runAndWait() return command
def talk(): r = sr.Recognizer() with sr.Microphone() as source: print("Say something!") beeping.beep() r.dynamic_energy_threshold=True #r.energy_threshold = 200 r.adjust_for_ambient_noise(source, 0.5) # listen for 0.5 second to calibrate the energy threshold for ambient noise levels r.pause_threshold = 0.5 r.dynamic_energy_adjustment_ratio = 2.5 # speech is louder than ambient noise by a factor of 2.5 audio = r.listen(source) beeping.beep() # write audio to a WAV file #with open("microphone-results.wav", "wb") as f: # f.write(audio.get_wav_data()) ans = r.recognize_sphinx(audio) # recognize speech using Sphinx try: print("Sphinx thinks you said = " + ans) except sr.UnknownValueError: print("Sphinx could not understand audio") except sr.RequestError as e: print("Sphinx error; {0}".format(e)) command = '0' if ans=='' : return command if 'START' in ans: if 'FORWARD' in ans: print("Moving Forward") command='f' elif 'LEFT' in ans: print("Moving left" ) command='m' elif 'NAME' in ans: print("Moving left" ) command='m' elif 'GOOD' in ans: print("Moving left" ) command='m' elif 'QUIT' in ans: print("Moving left" ) command='m' elif 'RIGHT' in ans: print("Moving right") command='n' elif 'HEIGHT' in ans: print("Moving right") command='n' elif 'BYE' in ans: print("Moving right") command='n' elif 'BACK' in ans: print("Moving back") command='b' elif 'LINE' in ans and 'FOLLOW' in ans: print("Going to follow a line") command='t' elif 'OBJECT' in ans and 'TRACK' in ans: print("Going for object tracking") command='o' elif 'FOLLOW' in ans: print("Moving Forward") command='f' return command # elif 'TURN' in ans: # if 'LEFT' in ans: # print("Moving left" ) # command='m' # elif 'NAME' in ans: # print("Moving left" ) # command='m' # elif 'GOOD' in ans: # print("Moving left" ) # command='m' # elif 'RIGHT' in ans: # print("Moving right") # command='n' # elif 'HEIGHT' in ans: # print("Moving right") # command='n' # elif 'BYE' in ans: # print("Moving right") # command='n' # return command elif 'STRAIGHT' in ans: print("Moving Forward") command='f' return command elif 'COME BACK' in ans: print("Moving back") command='b' return command elif 'STOP' in ans: command='s' return command elif 'HELLO' in ans or 'GOOD' in ans : command = 'g' elif 'MUSIC' in ans: music.music() return command elif 'PLAY' in ans: music.music() return command # elif 'EXIT' in ans: # command='q' # return command elif 'FINISH' in ans: command='q' return command elif 'QUIT' in ans: command='q' return command # elif 'HELLO' in ans or 'GOOD' in ans : # command = 'g' # return command from chatterbot import ChatBot from chatterbot.training.trainers import ListTrainer if os.path.isfile("./database.db"): os.remove("./database.db") # Create a new instance of a ChatBot bot = ChatBot("Terminal", storage_adapter="chatterbot.adapters.storage.JsonDatabaseAdapter", logic_adapters=[ "chatterbot.adapters.logic.MathematicalEvaluation", "chatterbot.adapters.logic.TimeLogicAdapter", "chatterbot.adapters.logic.ClosestMatchAdapter" ], input_adapter="chatterbot.adapters.input.VariableInputTypeAdapter", #output_adapter="chatterbot.adapters.output.OutputFormatAdapter", format='text', database="./database.db" ) bot.set_trainer(ListTrainer) bot.train([ "hello", "hi", "how are you", "i am fine", "that is good to hear", # "thank you", # "you are welcome", # "sorry", # "its okay", "what is your name", "my name is HURO", "bye", "bye bye", "see you later", "take care", "you too", "good morning", "very good morning", "good afternoon", "very good afternoon", "good evening", "charming evening", "good night", "sweet dreams", "same to you", # "tell me about yourself", # "I am HURO. I am 60 centimeters tall and weighs 3 kilograms. I have 8 degrees of freedom, 2 in head and 3 in each arm. I am using ODroid and Arduino UNO.", # "what is your height", # "I am 60 centimeters tall", # "what is your weight", # "3 kilograms", "team members", "Ujjwal, Rijak, Abhishek, Saurabh, Mrinaal, Bhuvi, Shruti, Nitish", "what is ", "Ask a better question" ]) response = bot.get_response(ans) print(response) import pyttsx engine = pyttsx.init() engine.setProperty('rate', 135) engine.say(response) engine.runAndWait() return command
# getting started https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html from music import music from laser import laser lasernumber = 5 device = 1 dummy = 0 instrument = 61 mc = music(lasernumber ,0,60,True, instrument); mc.test(); harfe = laser(lasernumber, device, dummy); harfe.calibration() while(1): # ak = cv2.waitKey(1) # if ak != -1: # ak = chr(ak) # if ak == 'c': # sys.exit() keylist = harfe.readInputUntilRecognition(0) for i in range(lasernumber):
def spotfi(csi): carriers = get_subcarriers_index(20, 2) smoothed_csi = smooth_csi(csi) smoothed_csi = remove_sto(smoothed_csi, carriers) p, doa = music(X=smoothed_csi, N=32, d=6.25e-2, f=5.327e9, M=30, L=1) return p
def main(): game = tool.Game(stateDict, "mainmenu") #state=mainmenu.Mainmenu() startinterface.StartInterface(game.screen) music.music() game.run()
from mail import mail from wea import weather from bot import bot from wiki import wiki # Infinite Loop count = 0 while 1: tts(mail()) tts(weather('jabalpur')) tts(news()) while 1: string = str(input("Ask -> ")) if "weather report" in string: location = str(input("Enter LOC -> ")) tts(weather(location)) elif "news" in string: tts(news()) elif "mail" in string: tts(mail()) elif "song" in string: tts("Which song you wanna listen") song = str(input("Song -> ")) music(song) elif "neola" in string: string = string.replace("neola search ", "") tts(bot(string)) elif "wiki" in string: string = string.replace("wiki ", "") tts(wiki(string))
# -*- coding: utf-8 -*- #in this notebook, we will explore how to read radar data #the necessary routines are in pydarn.sdio import sys sys.path.append('/davitpy') import pydarn.sdio import datetime as dt import music msc = music.music() msc.params myPtr = pydarn.sdio.radDataOpen((msc.params['datetime'])[0], msc.params['radar'], eTime=(msc.params['datetime'])[1], channel=msc.params['channel'], bmnum=msc.params['bmnum'], filtered=msc.options['filtered']) #Note that the output or radDataOpen is of type radDataPtr #Let's explore its contents for key, val in myPtr.__dict__.iteritems(): print 'myPtr.' + key + ' = ' + str(val) myBeam = pydarn.sdio.radDataReadRec(myPtr) #The output is of type beamData #a beamData object can store fit data as well as rawacf and iqdat data #let's look at the contents of myBeam for key, val in myBeam.__dict__.iteritems(): print 'myBeam.' + key + ' = ' + str(val)
def menu(db, cur, form): if "mus_id" not in form: mus_id = 0 else: mus_id = int(form.getvalue("mus_id")) if mus_id == 0: if "filter" in form: mus_filter = str(form.getvalue("filter")) cur.execute("SELECT id, title, genres FROM musicians ORDER BY {};".format(mus_filter)) data = cur.fetchall() elif "search" in form: mus_search = str(form.getvalue("search")) cur.execute("SELECT id, title, genres FROM musicians WHERE title LIKE %s;", (mus_search)) data = cur.fetchall() else: cur.execute("SELECT id, title, genres FROM musicians;") data = cur.fetchall() print('''<div class="content-wrapper">''') if not len(data): print(''' <p class="not-found">There is no <span class="request">'{}'</span> without SUCC!</p> <img class="foqu" src="../tmp/foqu.png"> '''.format(mus_search)) else: print(''' <div class="search-n-filter"> <form class="search" action="engine.py" target="_self" method="get"> <input type="hidden" name="function" value="page"> <input type="hidden" name="page_id" value="6"> <input class="search-input" type="text" name="search" placeholder="Type the name of an artist..."> <input class="search-button" type="submit" value="Search"> </form> <form class="filter" action="engine.py" target="_self" method="get"> <input type="hidden" name="function" value="page"> <input type="hidden" name="page_id" value="6"> <button class="filter-button" type="submit" name="filter" value="title"> Title </button> <button class="filter-button" type="submit" name="filter" value="origin"> Location </button> <button class="filter-button" type="submit" name="filter" value="genres"> Genre </button> </form> </div> ''') for item in data: print('''<a class="search-item" href="http://g03u32.nn2000.info/cgi-bin/engine.py?function=page&page_id=6&mus_id={}" style="background: url('../tmp/thumb-{}.jpg') center no-repeat; background-size: auto 100%;"><p>{}<br><span>{}</span></p></a>'''.format(item[0], item[1].split(' ')[0], item[1], item[2])) print('''</div>''') elif mus_id > 0: cur.execute("SELECT title FROM musicians WHERE id = {};".format(mus_id)) page_info = cur.fetchone() music.music(db, cur, 6, page_info[0], form, mus_id)
def webhook(): global reqContext print(request.get_json(silent=True, force=True)) reqContext = request.get_json(silent=True, force=True) print("webhook---->" + reqContext.get("result").get("action")) if reqContext.get("result").get("action") == "input.welcome.edwin": return welcome() elif reqContext.get("result").get("action") == "showpopularcategories": return showpopularcategories() elif reqContext.get("result").get("action") == "professionalcourses": return professional_courses.professionalcourses() elif reqContext.get("result").get("action") == "professionalcertificates": return professional_courses.professionalcertificates() elif reqContext.get("result").get("action") == "careeradvancement": return professional_courses.careeradvancement() elif reqContext.get("result").get("action") == "micromastersprograms": return professional_courses.micromastersprograms() elif reqContext.get("result").get("action") == "advancedskillsetcourses": return professional_courses.advancedskillsetcourses() elif reqContext.get("result").get("action") == "popularcourses": return popular_course.popularcourselist() elif reqContext.get("result").get("action") == "computerscience": return computer_science.computerscience() elif reqContext.get("result").get("action") == "data_and_statistics": return data_and_statistics.data_and_statistics() elif reqContext.get("result").get("action") == "business_management": return business_management.business_management() elif reqContext.get("result").get("action") == "language": return language.language() elif reqContext.get("result").get("action") == "economics_and_finance": return economics_and_finance.economics_and_finance() elif reqContext.get("result").get("action") == "engineering": return engineering.engineering() elif reqContext.get("result").get("action") == "humanities": return humanities.humanities() elif reqContext.get("result").get("action") == "life_sciences": return life_sciences.life_sciences() elif reqContext.get("result").get("action") == "science_courses": return science_courses.science_courses() elif reqContext.get("result").get("action") == "chemistry": return chemistry.chemistry() elif reqContext.get("result").get("action") == "electronics": return electronics.electronics() elif reqContext.get("result").get("action") == "environmental_studies": return environmental_studies.environmental_studies() elif reqContext.get("result").get("action") == "mathematics": return mathematics.mathematics() elif reqContext.get("result").get("action") == "medicine": return medicine.medicine() elif reqContext.get("result").get("action") == "physics": return physics.physics() elif reqContext.get("result").get("action") == "social_science": return social_science.social_science() elif reqContext.get("result").get("action") == "energy_and_earth_science": return energy_and_earth_science.energy_and_earth_science() elif reqContext.get("result").get("action") == "art_courses": return art_courses.art_courses() elif reqContext.get("result").get("action") == "architecture": return architecture.architecture() elif reqContext.get("result").get("action") == "art_and_culture": return art_and_culture.art_and_culture() elif reqContext.get("result").get("action") == "communication": return communication.communication() elif reqContext.get("result").get("action") == "design": return design.design() elif reqContext.get("result").get("action") == "food_and_nutrition": return food_and_nutrition.food_and_nutrition() elif reqContext.get("result").get("action") == "health_and_safety": return health_and_safety.health_and_safety() elif reqContext.get("result").get("action") == "history": return history.history() elif reqContext.get("result").get("action") == "music": return music.music() elif reqContext.get("result").get("action") == "law": return law.law() elif reqContext.get("result").get("action") == "BacktoWelcomeContent": return showpopularcategories() elif reqContext.get("result").get("action") == "BackToProfessionalCourses": return professional_courses.professionalcourses() else: print("Good Bye")
theme.doGrid(grid) print "</div>" print "<p><h2>Recipe</h2>" from editRecipe import editRecipe r = editRecipe() r.recipeName = form['recipeName'].value r.localUser = False r.displayRecipe() # Will Wheaton inspire mashup # last.fm sql if os.path.exists("../../misc/%s.lastfm.json" % (form['brewlog'].value)): print "<p> </p><p><h2>Music</h2><blockquote>" from music import music m = music() m.brewlog = form['brewlog'].value m.show() theme.presentFoot() sys.exit(0) activity = 0 activitySteps = json.loads(brewerslabCloudApi().listActivitySteps( "*****@*****.**", process, form['activityNum'].value, form['brewlog'].value)['json'])['result'] #<iframe src="iframeTimerTemp.py" frameBorder=0 width=100% height=170px scrolling=no></iframe> visible = "hidden" if os.path.exists("/currentdata/temp-mcast-rx-on"): if len(os.listdir("/currentdata/temps")) > 0: showTemps = "visible"
import time import os import RPi.GPIO as GPIO import threading import requests import json from google.cloud import storage from subprocess import call import music MUSIC = music.music() os.environ[ "GOOGLE_APPLICATION_CREDENTIALS"] = "/home/pi/Downloads/robo-cc0c2-dcd11e1765d5.json" storage_client = storage.Client() bucket = storage_client.bucket("robo-cc0c2.appspot.com") def RecordPlay(): print("녹음 파일 다운로드 시작") blob2 = bucket.blob("RecordPlay.mp3") destination_uri = '/home/pi/Desktop/robosapiens/{}'.format(blob2.name) blob2.download_to_filename(destination_uri) print("녹음 파일 재생") call('cvlc /home/pi/Desktop/robosapiens/RecordPlay.mp3', shell=True)
""" from music import music from dataxml import dataxml as data mus1 = music() dat1 = data() dat1.read('old.xml', mus1) dat1.write('new.xml', mus1) for id in mus1.getGroupIds(): print(mus1.getGroupDescription(id)) """ from music import music from dataxml import dataxml as datax from datasql import datasql as data mus1 = music() dat1 = datax() dat1.read("old.xml", mus1) dat2 = data() dat2.write("new5.sqlite", mus1) dat2.read("new5.sqlite", mus1) for id in mus1.getGroupIds(): print(mus1.getGroupDescription(id))
import pygame import window import images import music import superficie import time import random imagens = images.images() sup = superficie.superficie() musica = music.music() class game(): def __init__(self): self.window = window.window(800, 600, "Jogo", imagens.back, musica.menu, 0) self.window.setBack(imagens.back, (0, 0)) self.fonte = pygame.font.SysFont("comicsans", 24, True) self.main() def main(self): RunOn = True frames = pygame.time.Clock() esta_tocando = True sound = imagens.soundOn esc_down = True desenharp = True x = 350 pos_letra_certa = [] self.window.music_on(esta_tocando, musica.menu) restart = True
def main(): game = tool.Game(stateDict, "mainmenu") #state=mainmenu.Mainmenu() music.music() game.run()
pl = Autsave(x, y) sprite_group.add(pl) autsave_blocks.append(pl) x += 40 y += 40 x = 0 # Пишется отдельно из-за неудобных размеров изображения. informatic = End(4510, 2430) sprite_group.add(informatic) # Создание таймера. timer = pygame.time.Clock() # Создание звукового сопровождения. music() # Переменная для выхода из игры. done = True # Переменная для создания сохранялок. d = 0 # Переменная для подсчета набранных очков. score = 0 # Всеобъемлющий цикл. while done: # Считывание и выпонение запросов пользователя. for e in pygame.event.get():
def main(): system('cls') playlist=music() playlist.print_playlist() " " " " sound = input("Enter the name of the song you want to play: or select sound according to position: ") playlist.sound_selection(sound) # WINDOW SETTINGS window = Tk() window.title("Music Player") window.geometry("700x50") topFrame = Frame(window) topFrame.pack() bottomFrame = Frame(window) bottomFrame.pack(side=BOTTOM) window.resizable(width=False, height=False) window["bg"] = "black" def quitprogram(): time.sleep(0.25) window.destroy() time.sleep(0.25) quit() playBUTTON = Button(text="PLAY", fg="white") playBUTTON.pack(side=LEFT) playBUTTON.configure(command=playlist.playsong) playBUTTON["bg"] = "black" pauseBUTTON = Button(text="PAUSE", fg="white") pauseBUTTON.pack(side=LEFT, padx=10) pauseBUTTON.configure(command=playlist.pausesong) pauseBUTTON["bg"] = "black" stopBUTTON = Button(text="Stop", fg="white") stopBUTTON.pack(side=LEFT, padx=10) stopBUTTON.configure(command=playlist.stopsong) stopBUTTON["bg"] = "black" nextBUTTON = Button(text=">>>", fg="white") nextBUTTON.pack(side=LEFT, padx=10) nextBUTTON.configure(command=playlist.nextsong) nextBUTTON["bg"] = "black" prevBUTTON = Button(text="<<<", fg="white") prevBUTTON.pack(side=LEFT, padx=10) prevBUTTON.configure(command=playlist.prevsong) prevBUTTON["bg"] = "black" resumeBUTTON = Button(text="resume", fg="white") resumeBUTTON.pack(side=LEFT, padx=10) resumeBUTTON.configure(command=playlist.resumesong) resumeBUTTON["bg"] = "black" volumedownBUTTON = Button(text="VOL -", fg="white") volumedownBUTTON.pack(side=LEFT, padx=10) volumedownBUTTON.configure(command=playlist.volumedown) volumedownBUTTON["bg"] = "black" volumeupBUTTON = Button(text="VOL +", fg="white") volumeupBUTTON.pack(side=LEFT, padx=10) volumeupBUTTON.configure(command=playlist.volumeup) volumeupBUTTON["bg"] = "black" '''songLABEL = Label(text=playlist.displaysoundchoice(), fg="white") songLABEL.pack(side=TOP) songLABEL["bg"] = "black"''' window.protocol('WM_DELETE_WINDOW', quitprogram) # THE WINDOW BEING KEPT OPEN window.mainloop()
# -*- coding: utf-8 -*- #in this notebook, we will explore how to read radar data #the necessary routines are in pydarn.sdio import sys sys.path.append('/davitpy') import pydarn.sdio import datetime as dt import music msc = music.music() msc.params myPtr = pydarn.sdio.radDataOpen( (msc.params['datetime'])[0], msc.params['radar'], eTime=(msc.params['datetime'])[1], channel=msc.params['channel'], bmnum=msc.params['bmnum'], filtered=msc.options['filtered']) #Note that the output or radDataOpen is of type radDataPtr #Let's explore its contents for key,val in myPtr.__dict__.iteritems(): print 'myPtr.'+key+' = '+str(val) myBeam = pydarn.sdio.radDataReadRec(myPtr) #The output is of type beamData #a beamData object can store fit data as well as rawacf and iqdat data #let's look at the contents of myBeam for key,val in myBeam.__dict__.iteritems():