Ejemplo n.º 1
0
 def setUp(self) -> None:
     current_dir = os.path.dirname(os.path.abspath(__file__))
     current_dir = Path(current_dir)
     config_path = current_dir / 'resources' / 'test_config.yaml'
     self.wav_path = current_dir / 'resources' / 'small_sample.wav'
     self.cfg = Config.load(config_path)
     self.audio = Audio(self.cfg)
Ejemplo n.º 2
0
 def __init__(self, bucket_name, filepath, output_filepath):
     self.bucket_name = bucket_name
     self.storage_client = storage.Client()
     self.bucket = self.storage_client.get_bucket(self.bucket_name)
     self.filepath = filepath
     self.output_filepath = output_filepath
     self.audio = Audio()
def multiple_random_tempos():
    print 'Running multiple_random_tempos...'
    marimba = Marimba()
    audio = Audio(1200)

    quarter_duration_in_seconds = 1.0

    n_parts = 40

    lowest_harmonic = 2
    n_harmonics = n_parts

    lowest_midi_note = 36.0

    pitches = get_slice_of_harmonic_series(lowest_harmonic,
                                           n_harmonics,
                                           lowest_midi_note=lowest_midi_note)

    durations = random_from_range(2.0, 6.0, size=n_parts)

    print durations

    for pitch, duration in zip(pitches, durations):
        duration *= quarter_duration_in_seconds
        pulse(audio, marimba, pitch, duration, 0.75)

    audio.write_wav('multiple-random-tempos')
    print 'Done running multiple_random_tempos.'
def multiple_tempos():
    print 'Running multiple_tempos...'
    marimba = Marimba()
    audio = Audio(120)

    pulse(audio, marimba, 41.0, 1.5)  # 1 -- 0
    pulse(audio, marimba, 48.02, 1.4)  # 3 -- 702

    pulse(audio, marimba, 53.0, 1.3)  # 1 -- 0
    pulse(audio, marimba, 56.86, 1.2)  # 5 -- 386
    pulse(audio, marimba, 60.02, 1.1)  # 3 -- 702
    pulse(audio, marimba, 62.69, 1.0)  # 7 -- 969

    pulse(audio, marimba, 65.0, 0.9)  # 1 -- 0
    pulse(audio, marimba, 67.04, 0.8)  # 9 -- 204
    pulse(audio, marimba, 68.86, 0.7)  # 5 -- 386
    pulse(audio, marimba, 70.51, 0.6)  # 11 - 551
    pulse(audio, marimba, 72.02, 0.5)  # 3 -- 702
    pulse(audio, marimba, 73.41, 0.4)  # 13 -- 841
    pulse(audio, marimba, 74.69, 0.3)  # 7 -- 969
    pulse(audio, marimba, 75.88, 0.2)  # 15 -- 1088

    # pulse(audio, marimba, 77.0, 0.1) # 1 -- 0

    audio.write_wav('multiple-tempos')
    print 'Done running multiple_tempos.'
def multiple_tempos_refactored():
    print 'Running multiple_tempos_refactored...'
    marimba = Marimba()
    audio = Audio(120)

    quarter_duration_in_seconds = 1.2

    n_parts = 23

    lowest_harmonic = 1
    n_harmonics = n_parts

    lowest_midi_note = 36.0

    pitches = get_slice_of_harmonic_series(lowest_harmonic,
                                           n_harmonics,
                                           lowest_midi_note=lowest_midi_note)

    durations = np.linspace(1.5, .2, n_parts)

    random_mute_threshholds = [
        n / (n_parts + 1) for n in np.linspace(n_parts, 1, n_parts)
    ]

    print random_mute_threshholds

    for pitch, duration, random_mute_threshhold in zip(
            pitches, durations, random_mute_threshholds):
        duration *= quarter_duration_in_seconds
        pulse(audio, marimba, pitch, duration, random_mute_threshhold)

    audio.write_wav('multiple-tempos-muting')
    print 'Done running multiple_tempos_refactored.'
Ejemplo n.º 6
0
    def __init__(
        self,
        mixture_path: str,
        audio_sample_rate: int,
        sample_length: int,
        stft_n_fft: int,
        stft_win_length: int,
        stft_hop: int,
        device: torch.device = torch.device("cpu")) -> None:
        self.sample_length: int = sample_length
        self.audio_sample_rate: int = audio_sample_rate
        self.stft_n_fft: int = stft_n_fft
        self.stft_win_length = stft_win_length
        self.stft_hop: int = stft_hop

        waveform_length = Audio.calc_waveform_length(mixture_path,
                                                     audio_sample_rate)
        self.samples_count = Audio.calc_stft_length(waveform_length, stft_hop)

        self.waveform = Audio.load(mixture_path,
                                   sample_rate=audio_sample_rate,
                                   device=device)
        self.amplitude, self.phase = Audio.compute_stft(
            self.waveform, self.stft_n_fft, self.stft_win_length,
            self.stft_hop)
Ejemplo n.º 7
0
def main():

    # pygame setup
    pygame.display.set_caption("Breakout")

    CLOCK = pygame.time.Clock()
    AUDIO = Audio()

    play_intro_sequence(CLOCK, AUDIO)

    AUDIO.play_music_main()

    # Setup menus for the game

    op = Menu('Options', CLOCK)

    op.add_item('Borderless', callback=lambda: update_mode(pygame.NOFRAME))
    op.add_item('Fullscreen', callback=lambda: update_mode(pygame.FULLSCREEN))
    op.add_item('Windowed', callback=lambda: update_mode(0))
    op.add_item('Back', callback='exit')

    mm = Menu('Main Menu', CLOCK)

    mm.add_item('New Game', callback=lambda: new_level(CLOCK))
    mm.add_item('Level Select', callback=lambda: level_select(CLOCK))
    mm.add_item('Stage Editor', callback=LevelEditor(CLOCK).activate)
    mm.add_item('Options', callback=op.activate)
    mm.add_item('Quit', callback='exit')

    # main game loop
    mm.activate()
Ejemplo n.º 8
0
 def __init__(self, fifo, lock, **kw):
     threading.Thread.__init__(self, **kw)
     self.fifo = fifo
     self.lock = lock
     self.audio = Audio(byte=True, seconds=1)
     self.sleep = 1
     self.BS = 18
Ejemplo n.º 9
0
 def __init__(self):
     self.levels = list()
     self.controllers = set()
     self.currentLevel = None
     self.PC = None
     self.audio = Audio()
     self.title = ""
Ejemplo n.º 10
0
    def __init__(self):

        self.ftp = FTPCtrol()
        self.audio = Audio()
        self.camera = Camera()

        self.setup_pins()
Ejemplo n.º 11
0
def audio_extract_api():
    if request.method == "POST":
        print("Reading data")
        data = request.form.to_dict()
        files = request.files.to_dict()

        # Process
        audio = Audio(data, files, mode='extract')
        audio.extract()

        # Response
        print("Building Response")
        mime = magic.Magic(mime=True)
        m = MultipartEncoder(
            fields={
                'messageFileName':
                audio.output_file_name,
                'messageFileType':
                mime.from_file(audio.output_file_path),
                'messageFile': (audio.output_file_name,
                                open(audio.output_file_path, 'rb'),
                                mime.from_file(audio.output_file_path)),
                'containerFileName':
                audio.container_file_name,
                'containerFilePath':
                audio.container_file_path,
                'containerFile': (audio.container_file_name,
                                  open(audio.container_file_path, 'rb'),
                                  mime.from_file(audio.container_file_path))
            })
        return (m.to_string(), {'Content-Type': m.content_type})

    else:
        return jsonify({'message': "Input file not appropriate"}), 400
def game_telas_manager():

    pygame.init()

    tela_menu = Menu()
    tela_dificuldade = Dificuldade()
    tela_creditos = Creditos()
    tela_loop = Loop()

    audio_comeco_jogo = Audio("pickup_3.wav", 0.5)
    audio_menu_fundo = Audio("garotadeipanema.ogg", 0.5)
    audio_menu_fundo.audio.play()

    decisao_menu = "tomar_decisao"
    fechar_jogo = False
    in_loop = True
    while in_loop:
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                fechar_jogo = True
                in_loop = False

        if decisao_menu == "tomar_decisao":
            decisao_menu = tela_menu.game_menu()

        elif decisao_menu == "fechar_jogo":
            fechar_jogo = True
            in_loop = False

        elif decisao_menu == "menu_iniciar":
            audio_comeco_jogo.audio.play()
            audio_menu_fundo.audio.stop()
            retorno_tela_loop = tela_loop.game_loop()
            if retorno_tela_loop == "fechar_jogo":
                fechar_jogo = True
                in_loop = False

        elif decisao_menu == "menu_dificuldade":
            retorno_tela_dificuldade = tela_dificuldade.game_dificuldade()
            if retorno_tela_dificuldade == "fechar_jogo":
                fechar_jogo = True
                in_loop = False
            elif retorno_tela_dificuldade == "voltar":
                decisao_menu = "tomar_decisao"
            elif retorno_tela_dificuldade == "dificuldade_normal":
                decisao_menu = "menu_iniciar"
            elif retorno_tela_dificuldade == "dificuldade_dificil":
                decisao_menu = "menu_iniciar"

        elif decisao_menu == "menu_creditos":
            retorno_tela_creditos = tela_creditos.game_creditos()
            if retorno_tela_creditos == "fechar_jogo":
                fechar_jogo = True
                in_loop = False
            elif retorno_tela_creditos == "voltar":
                decisao_menu = "tomar_decisao"

    if fechar_jogo:
        return "fechar_jogo"
Ejemplo n.º 13
0
Archivo: client.py Proyecto: wezu/a4p
    def __init__(self):
        log.debug('Starting Client')
        #open a window... but first set all the needed props
        wp = self.loadWindoProperites()
        #open the window
        base.openMainWindow(props=wp)
        #base.setBackgroundColor(0.06, 0.1, 0.12, 1)
        base.setBackgroundColor(0.0, 0.0, 0.0, 1)
        base.disableMouse()
        base.enableParticles()

        #needed to determine what window event fired
        self.window_focused = base.win.getProperties().getForeground()
        self.window_x = base.win.getXSize()
        self.window_y = base.win.getYSize()
        self.window_minimized = base.win.getProperties().getMinimized()

        #filter manager, post process
        self.filters = Filters()

        #audio sound effects (sfx) + music
        self.audio = Audio()
        self.audio.setMusic('background')
        self.audio.playMusic()

        #light manager
        self.lights = LightManager()

        #setup the user interface (gui+key/mouse bind)
        self.ui = UserInterface()

        #skybox
        self.sun_and_sky = Skybox(self.lights)

        #player (character) droid
        self.droid = PCDroid(self.ui)

        #some vars used later
        self.map_name = None
        self.loading_status = set()
        self.level_root = render.attachNewNode('level_root')
        self.level_root.hide()
        self.is_in_game = False

        #events
        base.win.setCloseRequestEvent('exit-event')
        self.accept('exit-event', self.onClientExit)
        self.accept('window-event', self.onWindowEvent)
        self.accept('window-reset', self.onWindowReset)
        self.accept('client-mouselock', self.setMouseLock)
        self.accept('load-level', self.onLevelLoad)
        self.accept('loading-done', self.onLoadingDone)
        self.accept('reload-shaders', self.onShaderReload)
        self.accept('client-set-team', self.onTeamCahnge)
        self.accept('client-quit', self.onQuit)
        # Task
        taskMgr.add(self.update, 'client_update')

        log.debug('Client started')
Ejemplo n.º 14
0
    def __init__(self, *args, **kw):
        super(Example, self).__init__(*args, **kw)
        self.a = Audio()
        self.InitUI()

        self.show_stt = False  # show speech to text
        self.input_made = False  # True if wav file made
        self.filter_pro = False  # setting to filter out profanity
 def setup(self):
     self.name = 'AnthonyDouglass'
     print '\nRunning {}...'.format(self.name)
     self.marimba = Marimba()
     self.end_padding_seconds = 5
     self.audio = Audio(self.duration_seconds + self.end_padding_seconds)
     self.len_audio = len(
         self.audio) - (self.audio.sample_rate * self.end_padding_seconds)
Ejemplo n.º 16
0
 def __init__(self, cfg: Config):
     self.cfg = cfg
     self.paths = Paths()
     self.audio = Audio(cfg)
     self.ckpt_path = self.paths.ckpt / cfg.config_id
     log_dir = self.ckpt_path / 'tensorboard'
     self.writer = SummaryWriter(log_dir=log_dir, comment='v1')
     self.criterion = MaskedL1()
Ejemplo n.º 17
0
 def setup(self):
     self.base_name = 'Utah2018'
     self.output_parent_dir = 'output/{}'.format(self.base_name)
     self.name = '{}-{}'.format(self.base_name, self.version)
     print '\nRunning {}...'.format(self.name)
     self.marimba = Marimba()
     self.audio = Audio(self.duration_seconds + 5)
     self.len_audio = len(self.audio) - (44100 * 5)
Ejemplo n.º 18
0
 def __init__(self):
     self.audio = Audio()
     self.people = {
         'Anthony Blardo': 'downfall.wav',
         'Matt Hardwick': 'fall.wav',
         'Daniel Hawkins': 'media.wav',
         'Allan Davis': 'vader.wav',
         'Tiffanie Robinson': 'girl.wav',
     }
Ejemplo n.º 19
0
	def on_client(self, conn):
		#conn.setblocking(0)
		au = Audio()
		while True:
			Y_abs = au.recordHz()
			
			send_data = Y_abs.tostring()
			conn.send(send_data + "END")
			
		conn.close()
Ejemplo n.º 20
0
    def on_client(self, conn):
        #conn.setblocking(0)
        au = Audio()
        while True:
            Y_abs = au.recordHz()

            send_data = Y_abs.tostring()
            conn.send(send_data + "END")

        conn.close()
Ejemplo n.º 21
0
def main():
    dp = Display()
    ad = Audio()
    thread = Thread(target=dp.start)
    thread.setDaemon(True)
    thread.start()

    while True:
        if ad_flag > 0:
            ad.play(state)
Ejemplo n.º 22
0
	def __init__(self, ipServer, portAudio = 5000, port1 = 5001, port2 = 5002, port3 = 5003, port4 = 5004):
		self.ip_server = ipServer
		self.portAudio = portAudio
		self.port1 = port1
		self.port2 = port2
		self.port3 = port3
		self.port4 = port4
		print("starting service")
		self.mic = Audio(self.ip_server, self.portAudio )
		self.vid = Video(self.ip_server, self.port1, self.port2, self.port3, self.port4)
Ejemplo n.º 23
0
Archivo: main.py Proyecto: fox-ahri/AI
def main():
    while True:
        info = input('我: ')
        tl = TuLing()
        res = tl.get_answer(info)
        au = Audio()
        filename = au.start(res)
        print(res)
        playsound.playsound(filename, True)
        os.remove(filename)
Ejemplo n.º 24
0
def process_audio(audio_file_path):
    try:
        track = Audio(audio_file_path)
        """printed_line = "[{genre}] {artist} - {album} - {title}".format(
            album=track.get_album(),
            artist=track.get_artist(),
            genre=track.get_genre(),
            title=track.get_title(),
        )
        if len(printed_line) > 160:
            printed_line = "{cut_line}...".format(cut_line=printed_line[:157])
        else:
            printed_line = printed_line.ljust(160)
        print(printed_line, end='\r')"""
        new_genre = guess_genre(track)
        if new_genre == track.get_genre():
            return
        logger.info(
            "{artist} - {album} - {title}: {old_genre} -> {genre}".format(
                album=track.get_album(),
                artist=track.get_artist(),
                genre=new_genre or '[Empty]',
                old_genre=track.get_genre() or '[Empty]',
                title=track.get_title(),
            ), )
        track.set_genre(new_genre)
    except Exception as error:
        logger.error(error)
Ejemplo n.º 25
0
class Loop:
    def __init__(self, beats_p_minute, beats_p_bar, bars_p_loop):
        if beats_p_minute < 10:
            raise ValueError("bpm is to slow")
        if beats_p_minute > 280:
            raise ValueError("bpm is to fast")
        if beats_p_bar <= 1:
            raise ValueError("Bar must contain more than one beat")
        if bars_p_loop <= 0:
            raise ValueError("You must set at least one bar per loop")

        self.beats_p_minute = beats_p_minute
        self.beats_p_bar = beats_p_bar
        self.bars_p_loop = bars_p_loop

        # time that passes between two clicks in seconds
        timeout = 1 / (beats_p_minute / 60)
        # length of one loop in seconds
        self.length = bars_p_loop * beats_p_bar * timeout

        self.channels: Dict = {'main': []}

        self.audio = Audio()

    def play(self, channel: Optional[str]):
        pass

    def stop(self, channel: Optional[str]):
        if channel is None:
            for channel in self.channels:
                self.stop(channel)
        elif channel in self.channels:
            # TODO stop playback of a channel
            pass
        else:
            raise ValueError("Channel does not exist")

    def record(self, channel: str):
        self.channels[channel] = self.audio.record(self.length, channel)

    def record_all(self, filename: str):
        """
        Records a running session
        """
        self.audio.record(filename)

    def add_channel(self, name: str):
        self.channels[name] = []

    class Click:
        def __init__(self):
            pass

        def get_audio(self):
            pass
Ejemplo n.º 26
0
    def openFile(self):
        fileName, _ = QFileDialog.getOpenFileName(
            self, "Open Video", QDir.homePath(),
            "Video(*.mp4;*.wmv;*.rmvb;*.avi;*.mkv)")

        if fileName != '':

            if self.audio_backend is not None:
                self.audio_backend.close()

            if self.state != State.IDLE:

                self.stop.emit()
                self.state = State.IDLE
                self.fitState()
            else:
                self.pushButton_2.setEnabled(True)
                self.pushButton_3.setEnabled(True)
                self.pushButton_9.setEnabled(True)

            self.video_backend = VideoFileClip(fileName)
            self.audio_backend = self.video_backend.audio
            self.durationChanged(self.video_backend.duration)
            # self.video_backend = clip.resize(width=clip.w//2,height=clip.h//2)
            if self.video is None:

                self.video = Video(self.video_backend)
                self.calthread = QThread()
                self.pushButton_5.clicked.connect(self.video.speed)
                self.video.moveToThread(self.calthread)
                self.video.t_changed.connect(self.positionChanged)
                self.stop.connect(self.video.stop)
                self.pause.connect(self.video.pause)
                self.resume.connect(self.video.resume)
                self.video.finish.connect(self.processFinish)
                self.calthread.started.connect(self.video.work)
                self.pushButton_9.clicked.connect(self.video.tiktok)
                self.set_video.connect(self.video.setClip)
            else:
                self.set_video.emit(self.video_backend)

            if self.audio is None:
                self.audioThread = QThread()
                self.audio = Audio(
                    self.audio_backend, self.audio_backend.fps,
                    int(1.0 / self.video_backend.fps * self.audio_backend.fps),
                    2)
                self.audio.moveToThread(self.audioThread)
                self.audioThread.started.connect(self.audio.work)
                self.set_audio.connect(self.audio.setClip)
            else:
                self.set_audio.emit(
                    self.audio_backend, self.audio_backend.fps,
                    int(1.0 / self.video_backend.fps * self.audio_backend.fps),
                    2)
Ejemplo n.º 27
0
 def move_right(self):
     print("You tried to move right")
     if (self.maze.can_move(self.player.x_coordinate,
                            self.player.y_coordinate, 'right')):
         print("You moved right")
         self.player.move_right()
         self.print_player_status()
         return Audio('footsteps')
     else:
         print("You failed")
         return Audio('hiting a wall')
Ejemplo n.º 28
0
def process_audio(audio_file_path):
    try:
        track = Audio(audio_file_path)
        if track.get_bitrate() < BITRATE_THRESHOLD:
            directory_name = '/'.join(audio_file_path.split('/')[4:-1])
            if directory_name not in dirs_with_poor_audio:
                print(directory_name)
                dirs_with_poor_audio.add(directory_name)

    except Exception as error:
        print(error)
Ejemplo n.º 29
0
    def resetToDefault(self):

        self.video = Video()
        self.audio = Audio()

        self.populateVideoComboBox()
        self.populateAudioComboBox()

        self.recordPushButton.setIcon(
            QtGui.QIcon(os.path.join(scriptDir, 'rec_btn_off.svg')))
        self.recordPushButton.setChecked(False)
        self.recordPushButton.setEnabled(False)
Ejemplo n.º 30
0
 def get_samples_from_waveform(waveform: Tensor,
                               sample_length: int,
                               stft_n_fft: int,
                               stft_win_length: int,
                               stft_hop: int,
                               shuffle: bool = False,
                               seed: Any = None) -> Generator:
     amplitude, _ = Audio.compute_stft(waveform, stft_n_fft,
                                       stft_win_length, stft_hop)
     spectrogram = Audio.get_spectrogram(amplitude, top_db=80)
     return IterableAudioDataset.get_samples_from_spectrogram(
         spectrogram, sample_length, shuffle=shuffle, seed=seed)
Ejemplo n.º 31
0
class Speech(object):
    _YOUR_AWS_KEY = "" # Enter your aws key here
    _YOUR_AWS_SECRET = "" # Enter your aws secret here

    def __init__(self, audioFolder):
        self._audio = Audio()
        self._audioFolder = audioFolder
        audioFilePathList = FileSearch.collectFilesEndsWithNameRecursively(".ogg", audioFolder)

        self._cache = {}
        for path in audioFilePathList:
            fileName = path.split("/")[-1].split(".")[0]
            self._cache[fileName] = path

        self._pollyClient = boto3.Session(
                        aws_access_key_id=self._YOUR_AWS_KEY,
                        aws_secret_access_key=self._YOUR_AWS_SECRET,
                        region_name='ap-southeast-2').client('polly')

    def speak(self, text):
        if len(self._YOUR_AWS_KEY) == 0 or len(self._YOUR_AWS_SECRET) == 0:
            return

        hashObject = hashlib.sha1(text.encode())
        hash = hashObject.hexdigest()

        if hash in self._cache:
            audioFilePath = self._cache[hash]
        else:
            audioFilePath = self._tts(text, hash)

        self._cache[hash] = audioFilePath

        self._audio.play(audioFilePath)


    def isSpeaking(self):
        return self._audio.isPlaying()

    def isAfterSpeaking(self):
        return self._audio.isAfterPlaying()

    def _tts(self, text, hash):
        response = self._pollyClient.synthesize_speech(VoiceId='Ivy',
                        OutputFormat='mp3',
                        Text = text)
        filePath = os.path.join(self._audioFolder, "{0}.mp3".format(hash))
        file = open(filePath, 'wb')
        file.write(response['AudioStream'].read())
        file.close()

        return filePath
Ejemplo n.º 32
0
def init():
	# initiate the audio controller class, and play a test sound.
    s = Audio()
    s.play_test_sound()

    # Start a counter in the main thread, to demonstrate that the audio happens in a separate thread and does not block the main thread.
    counter = 0
    while counter < 6:
        print counter
        counter += 1
        time.sleep(1)

    s.kill()
Ejemplo n.º 33
0
    def __init__(self, network, parent=None):
        super(MainWindow, self).__init__()

        self.logger = logging.getLogger('MainWindow')

        self.tracklist_model = TracklistModel()
        if network:
            self.network = network.server

        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)
        self.ui.trackListView.setModel(self.tracklist_model)
        self.ui.trackListView.doubleClicked.connect(self.listDoubleClicked)
        self.ui.trackListView.clicked.connect(self.listClicked)
        self.ui.searchBtn.clicked.connect(self.searchBtnClick)
        self.ui.playBtn.clicked.connect(self.playBtnClick)
        self.ui.stopBtn.clicked.connect(self.stopBtnClick)
        self.ui.pauseBtn.clicked.connect(self.pauseBtnClick)
        self.setWindowTitle(u'Сетевой Проигрыватель')

        self.current_stream_id = None
        self.current_track = None
        self.audio = Audio()
        self.audio.tick.connect(self.tick)
        self.audio.state_changed.connect(self.stateChanged)
        self.audio.finished.connect(self.playingFinished)
        self.ui.seekSlider.setMediaObject(self.audio.get_media_object())
        self.ui.volumeSlider.setAudioOutput(self.audio.get_audio_output())

        self.ui.pauseBtn.setEnabled(False)
        self.ui.playBtn.setEnabled(True)
        self.ui.stopBtn.setEnabled(False)
Ejemplo n.º 34
0
def run_server():
    global bot
    global cam
    global motion
    global audio
    try:
        try:
            app.bot_config = Config.read()
            bot = CoderBot.get_instance(
                servo=(app.bot_config.get("move_motor_mode") == "servo"),
                motor_trim_factor=float(app.bot_config.get("move_motor_trim", 1.0)),
            )
            audio = Audio.get_instance()
            audio.say(app.bot_config.get("sound_start"))
            try:
                cam = Camera.get_instance()
                motion = Motion.get_instance()
            except picamera.exc.PiCameraError:
                logging.error("Camera not present")

            if app.bot_config.get("load_at_start") and len(app.bot_config.get("load_at_start")):
                app.prog = app.prog_engine.load(app.bot_config.get("load_at_start"))
                app.prog.execute()
        except ValueError as e:
            app.bot_config = {}
            logging.error(e)

        bot.set_callback(PIN_PUSHBUTTON, button_pushed, 100)
        app.run(host="0.0.0.0", port=8080, debug=True, use_reloader=False, threaded=True)
    finally:
        if cam:
            cam.exit()
        if bot:
            bot.exit()
        app.shutdown_requested = True
Ejemplo n.º 35
0
 def __init__(self,fifo,lock,**kw):
     threading.Thread.__init__(self,**kw)
     self.fifo = fifo
     self.lock = lock
     self.audio = Audio(byte=True,seconds=1)
     self.sleep = 1
     self.BS = 18
Ejemplo n.º 36
0
Archivo: client.py Proyecto: wezu/a4p
    def __init__(self):
        log.debug('Starting Client')
        #open a window... but first set all the needed props
        wp=self.loadWindoProperites()
        #open the window
        base.openMainWindow(props = wp)
        #base.setBackgroundColor(0.06, 0.1, 0.12, 1)
        base.setBackgroundColor(0.0, 0.0, 0.0, 1)
        base.disableMouse()
        base.enableParticles()

        #needed to determine what window event fired
        self.window_focused=base.win.getProperties().getForeground()
        self.window_x=base.win.getXSize()
        self.window_y=base.win.getYSize()
        self.window_minimized=base.win.getProperties().getMinimized()

        #filter manager, post process
        self.filters=Filters()

        #audio sound effects (sfx) + music
        self.audio=Audio()
        self.audio.setMusic('background')
        self.audio.playMusic()

        #light manager
        self.lights=LightManager()

        #setup the user interface (gui+key/mouse bind)
        self.ui=UserInterface()

        #skybox
        self.sun_and_sky=Skybox(self.lights)

        #player (character) droid
        self.droid=PCDroid(self.ui)

        #some vars used later
        self.map_name=None
        self.loading_status=set()
        self.level_root=render.attachNewNode('level_root')
        self.level_root.hide()
        self.is_in_game=False

        #events
        base.win.setCloseRequestEvent('exit-event')
        self.accept('exit-event',self.onClientExit)
        self.accept( 'window-event', self.onWindowEvent)
        self.accept( 'window-reset', self.onWindowReset)
        self.accept( 'client-mouselock', self.setMouseLock)
        self.accept( 'load-level', self.onLevelLoad)
        self.accept( 'loading-done', self.onLoadingDone)
        self.accept( 'reload-shaders', self.onShaderReload)
        self.accept( 'client-set-team', self.onTeamCahnge)
        self.accept( 'client-quit', self.onQuit)
        # Task
        taskMgr.add(self.update, 'client_update')

        log.debug('Client started')
Ejemplo n.º 37
0
class StreamParser(Vad):
    def __init__(self):
        self.record = Audio()
        self.active = False
        self.play =Audio()
        Vad.__init__(self)
   
    def open_mic(self):
        print "start recording"
        t = threading.Thread(target=self.mic_record)
        t.setDaemon(True)
        t.start()

    def mic_record(self):
        self.record.record_stream_start()
        self.active = True
        print "The microphone has opened"
        while self.active: 
            data = self.record.record_read()
            self.add(data)
        self.record.record_stream_end()
        print "exit mic"
         

    def close_mic(self):
        print "stop recording"
        if self.record:
            print self.active
            self.active = False
       

    def play_stream(self, data):
        self.play.play_stream(data)
Ejemplo n.º 38
0
 def __init__(self):
     self.audio = Audio()
     self.people={
         'Anthony Blardo': 'downfall.wav',
         'Matt Hardwick': 'fall.wav',
         'Daniel Hawkins': 'media.wav',
         'Allan Davis': 'vader.wav',
         'Tiffanie Robinson': 'girl.wav',
     }
class Scene(object):
    def __init__(self, scenecfg, basepath='./', length=1):
        self.videofiles = []
        self.video = Video(scenecfg['video'], basepath, length)
        self.audio = Audio(scenecfg['sound'], basepath, self.video)
        self.length = scenecfg['video']['length']

    def __len__(self):
        return len(self.files)

    def get_video_input_list(self, fixedlength=1):
        return self.video.get_input_list(fixedlength)

    def get_audio_input_list(self):
        return self.audio.get_input_list()

    def generate_complex_video(self, scr):
        cofilter = []
        cofilter.append(' -filter_complex ')
        #generate total screen
        cofilter.append('"color=c=black:size={0}x{1} [base]'.format(
            scr.get_total_width(), scr.get_total_height()))

        v, videocount = self.video.get_video_layers(scr)
        cofilter += v
        cofilter += self.audio.get_audio_layers(self.video.files)
        cofilter.append(' " ')
        return cofilter


    def get_finish_lines(self):
        finishlines = []
        finishlines.append(self.audio.get_finish_line())
        if self.length == 'shortest':
            finishlines.append(' -shortest')
        else:
            finishlines.append(' -t ' + str(self.length))

        finishlines.append(self.audio.get_distort_line())
        finishlines.append(self.video.get_distort_line())

        finishlines.append(' -c:v libx264 -threads 4')

        return finishlines
Ejemplo n.º 40
0
    def resetToDefault(self):

        self.video = Video()
        self.audio = Audio()

        self.populateVideoComboBox()
        self.populateAudioComboBox()

        self.recordPushButton.setIcon(QtGui.QIcon(os.path.join(scriptDir, 'rec_btn_off.svg')))
        self.recordPushButton.setChecked(False)
        self.recordPushButton.setEnabled(False)
Ejemplo n.º 41
0
    def __init__(self):
        
        # config
        self.config_provider = ConfigProvider()

        # disk
        self.disk = Disk()

        # display
        self.display = None
        if (self.config_provider.edit_enabled or self.config_provider.effects_enabled or
            self.config_provider.audio_enabled or self.config_provider.screen_enabled):
            self.display = Display()

        # replay
        self.replay = None
        if (self.config_provider.edit_enabled or self.config_provider.audio_enabled or 
            self.config_provider.screen_enabled):
            self.replay = Replay()

        # graphics
        self.graphics = None
        if (self.config_provider.effects_enabled or self.config_provider.audio_enabled or
            self.config_provider.screen_enabled):
            self.graphics = Graphics()

        # record
        self.record = None
        if self.config_provider.record_enabled:
            self.record = Record(self.config_provider, self.disk)

        # edit
        self.edit = None
        if self.config_provider.edit_enabled:
            self.edit = Edit(self.config_provider, self.disk, self.display, self.replay)

        # effects
        self.effects = None
        if self.config_provider.effects_enabled:
            self.effects = Effects(self.config_provider, self.disk, self.display, self.graphics)

        # audio
        self.audio = None
        if self.config_provider.audio_enabled:
            self.audio = Audio(self.config_provider, self.disk, self.display, self.replay, self.graphics)

        # screen
        self.screen = None
        if self.config_provider.screen_enabled:
            self.screen = Screen(self.config_provider, self.disk, self.display, self.replay, self.graphics)

        # frame number
        self.frame_number = 0 
Ejemplo n.º 42
0
 def __init__(self, origin, duration=None, get_audio=True, path="/tmp"):
     self.assets = {}
     self.path = origin
     self.temp = path
     if duration is None:
         self.width, self.heigth, self.duration = self._get_video_info()
     else:
         self.duration = duration
     self.freeze_in_time = 0
     self.freeze_out_time = self.duration
     if get_audio is True:
         self.audio = Audio(self._get_audio_from_video(), path=self.temp)
Ejemplo n.º 43
0
class Client2(threading.Thread):

    def __init__(self,fifo,lock,**kw):
        threading.Thread.__init__(self,**kw)
        self.fifo = fifo
        self.lock = lock
        self.audio = Audio(byte=True,seconds=1)
        self.sleep = 1
        self.BS = 18
    

    def run(self):
        while True:         
            self.lock.acquire()
            out = []
            if len(self.fifo) >= self.BS:
                for i in range(self.BS):
                    out += self.fifo.pop(0)
                self.lock.release()
            else:
                self.lock.release()
            if len(out):
                self.audio.out_chunck(out)
Ejemplo n.º 44
0
 def set_track(self, file):
     self.current_time = 0
     self.file = file
     self.filename = file.split("/")[-1]
     self.audio = Audio(file)
     #cmd = ['python', 'data_analizer_routine.py', self.directory, file, str(self.audio.real_duration)]
     cmd = ['java', '-Xmx2g', '-d64', 'data_analizer_routine', self.directory, file, str(self.audio.real_duration)]
     self.locked = True
     p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
     p.wait()
     self.locked = False
     data = p.communicate()
     self.new_track = data[0].split("\n")[0]
     self.filedata = []
Ejemplo n.º 45
0
    def __init__(self, drive_autonomous=False, loop=None):
        """ Initialise Robot """

        self.board = PyMata3()
        self.dc_motors = DcMotors(self.board)
        self.sonar = Sonar(self.board)
        self.controller = RemoteControl()
        self.audio = Audio()

        if loop:
            self.loop = loop
        else:
            self.loop = self.board.loop
        self.drive_autonomous = drive_autonomous
Ejemplo n.º 46
0
def main():
    if __name__ == "__main__":
        #GPIO part
        GPIO.init(radio_on_cb=radio_cor_on_callback,radio_off_cb=radio_cor_off_callback)

        #Audio part
        global audio
        player = config_get_audio_player() #Could be None/aplay/sox
        audio = Audio(player=player,record_callback=recorder_callback)
        audio.open()

        global WS_DEBUG, ws
        if WS_DEBUG:
            websocket.enableTrace(True)
        else:
            websocket.enableTrace(False)
        callsign = config_get_callsign()
        print "CALLSIGN: %s" % callsign
        url = "ws://aprs.hamclub.net:20880/mtracker/talkbox0?type=sub&udid=" + callsign
        ws = websocket.WebSocketApp(url,
        #ws = websocket.WebSocketApp("ws://localhost:8080/mclub/talk0?type=sub&udid=bg5hxe",
                                   #on_message = ws_on_message,
                                    on_data = ws_on_data,
                                    on_error = ws_on_error,
                                    on_close = ws_on_close)
        ws.on_open = ws_on_open

        stopFlag = Event()
        timer = KeepAliveThread(stopFlag,ws)
        timer.start()

        ws.run_forever()
        stopFlag.set() #stop the timer

        audio.close()
        GPIO.cleanup()
Ejemplo n.º 47
0
    def __init__(self):
        gobject.GObject.__init__(self)
        self.myself = None
        self.players_score = {}
        self.players = []
        self.waiting_players = []
        self.current_player = None
        self.last_flipped = -1
        self.last_highlight = 1
        self._flop_card_timeout = -1
        self.messenger = None
        self.sentitive = True

        self.model = Model()
        self.flip_block = False
        self._flop_cards = None

        self.audio = Audio()
Ejemplo n.º 48
0
class Recognize(object):

    def __init__(self):
        self.audio = Audio()
        self.people={
            'Anthony Blardo': 'downfall.wav',
            'Matt Hardwick': 'fall.wav',
            'Daniel Hawkins': 'media.wav',
            'Allan Davis': 'vader.wav',
            'Tiffanie Robinson': 'girl.wav',
        }

    def is_person(self, image_file):
        path = 'media/image/face.png'
        # path = os.path.join('media/image/', image_file.filename)
        image_file.save(path)

        # import ipdb; ipdb.set_trace()
        f = File(path)
        api = API(API_KEY, API_SECRET, SERVER)

        print("Sending face to api")

        response = api.recognition.identify(group_name='Hackathoners', img=f)

        print(response)

        faces = response['face']
        if len(faces) > 0:
            face = faces[0]
            name = face['candidate'][0]['person_name']

            if name in self.people.keys():
                print("Found " + name)
                self.audio.add_to_playlist(self.people[name])
                self.audio.play()
            else:
                self.audio.add_to_playlist('run.wav')
                self.audio.play()
                print("Did not match a person.")
Ejemplo n.º 49
0
	def __init__(self, **kwargs):
		"""Initialize the keycode bindings and all controllers, create output dir."""
		settings = {
			'experimentName' : 'someNameHere',
			'outputRootDir' : os.path.normpath(os.path.expanduser("~/kauferdata")),
			'keyBindings' : {
				'quit' : 'q',
				'toggleStimulator' : 't',
				'triggerStimulator' : 's'
				},
			'audio' : {},
			'gui' : {},
			'stimulator' : { 'activeProtocolName' : 'nucleusAccumbensExample' },
			'tracker' : {},
			'videoIn' : {},
			'videoOut' : {},
			'writer' : {},
			}
		settings.update(kwargs)
		trialDir = "{0}_{1}_{2}".format(time.strftime("%y%m%d%H%M%S"),
				settings['experimentName'], settings['stimulator']['activeProtocolName'])
		settings['outputDataDir'] = os.path.join(settings['outputRootDir'], trialDir)
		self.keyBindings = settings['keyBindings']
		self.keycodeBindings = {k: getattr(opencvgui.keycodes, v)
				for k,v in self.keyBindings.items() }
		os.makedirs(os.path.join(settings['outputDataDir'], 'audio'))
		self.writeExperimentData(settings)
		for x in ['audio', 'videoOut', 'writer']:
			kwargs[x] = {} if not kwargs.has_key(x) else kwargs[x]
			kwargs[x]['outputDataDir'] = settings['outputDataDir']

		# gui and writer both need a reference to the app instance because they
		# draw on the state of so many other controllers
		self.audio = Audio(**settings['audio'])
		self.gui = Gui(self, **settings['gui'])
		self.stimulator = Stimulator(**settings['stimulator'])
		self.tracker = Tracker(**settings['tracker'])
		self.videoIn = VideoIn(**settings['videoIn'])
		self.videoOut = VideoOut(**settings['videoOut'])
		self.writer = Writer(self, **settings['writer'])
Ejemplo n.º 50
0
def run_server():
    bot = None
    cam = None
    try:
        try:
            app.bot_config = Config.read()
            bot = CoderBot.get_instance(servo=(app.bot_config.get("move_motor_mode") == "servo"),
                                        motor_trim_factor=float(app.bot_config.get('move_motor_trim', 1.0)))
            audio = Audio.get_instance()
            audio.say(app.bot_config.get("sound_start"))
            try:
                cam = Camera.get_instance()
                Motion.get_instance()
            except picamera.exc.PiCameraError:
                logging.error("Camera not present")

            CNNManager.get_instance()
            EventManager.get_instance("coderbot")

            if app.bot_config.get('load_at_start') and app.bot_config.get('load_at_start'):
                app.prog = app.prog_engine.load(app.bot_config.get('load_at_start'))
                app.prog.execute()
        except ValueError as e:
            app.bot_config = {}
            logging.error(e)

        bot.set_callback(PIN_PUSHBUTTON, button_pushed, 100)

        remove_doreset_file()

        app.run(host="0.0.0.0", port=5000, debug=True, use_reloader=False, threaded=True)
    finally:
        if cam:
            cam.exit()
        if bot:
            bot.exit()
        app.shutdown_requested = True
Ejemplo n.º 51
0
    def __init__(self):
        """
        Constructor.

        """
        Audio.__init__(self)
Ejemplo n.º 52
0
class MemorizeGame(GObject.GObject):

    __gsignals__ = {
        'reset_scoreboard': (GObject.SignalFlags.RUN_FIRST, None, []),
        'reset_table': (GObject.SignalFlags.RUN_FIRST, None, []),
        'load_mode': (GObject.SignalFlags.RUN_FIRST, None,
                      [GObject.TYPE_PYOBJECT]),
        'load_game': (GObject.SignalFlags.RUN_FIRST, None,
                      2 * [GObject.TYPE_PYOBJECT]),
        'change_game': (GObject.SignalFlags.RUN_FIRST, None,
                        2 * [GObject.TYPE_PYOBJECT]),
        'change_game_signal': (GObject.SignalFlags.RUN_FIRST, None,
                               5 * [GObject.TYPE_PYOBJECT]),
        'set-border': (GObject.SignalFlags.RUN_FIRST, None,
                       3 * [GObject.TYPE_PYOBJECT]),
        'flip-card': (GObject.SignalFlags.RUN_FIRST, None, [int, bool]),
        'flip-card-signal': (GObject.SignalFlags.RUN_FIRST, None, [int]),
        'cement-card': (GObject.SignalFlags.RUN_FIRST, None, [int]),
        'flop-card': (GObject.SignalFlags.RUN_FIRST, None, [int]),
        'highlight-card': (GObject.SignalFlags.RUN_FIRST, None,
                           2 * [GObject.TYPE_PYOBJECT]),
        'add_buddy': (GObject.SignalFlags.RUN_FIRST, None,
                      2 * [GObject.TYPE_PYOBJECT]),
        'rem_buddy': (GObject.SignalFlags.RUN_FIRST, None,
                      [GObject.TYPE_PYOBJECT]),
        'increase-score': (GObject.SignalFlags.RUN_FIRST, None,
                           [GObject.TYPE_PYOBJECT]),
        'wait_mode_buddy': (GObject.SignalFlags.RUN_FIRST, None,
                            2 * [GObject.TYPE_PYOBJECT]),
        'msg_buddy': (GObject.SignalFlags.RUN_FIRST, None,
                      2 * [GObject.TYPE_PYOBJECT]),
        'change-turn': (GObject.SignalFlags.RUN_FIRST, None,
                        [GObject.TYPE_PYOBJECT]), }

    def __init__(self):
        GObject.GObject.__init__(self)
        self.myself = None
        self.players_score = {}
        self.players = []
        self.waiting_players = []
        self.current_player = None
        self.last_flipped = -1
        self.last_highlight = 1
        self._flop_card_timeout = -1
        self.messenger = None
        self.sentitive = True

        self.model = Model()
        self.flip_block = False
        self._flop_cards = None

        self.audio = Audio()
        self._audio_play_finished_id = 0

    def load_game(self, game_name, size, mode):
        self.set_load_mode('Loading game')
        if self.model.read(game_name) == 0:
            logging.debug('load_game set is_demo mode %s', mode)
            self.model.is_demo = (mode == 'demo')
            self.model.def_grid(size)
            self.model.data['running'] = 'False'
            self.model.data['mode'] = mode
            logging.debug(' Read setup file %r: %r ',
                          game_name, self.model.grid)
            self.emit('load_game', self.model.data, self.model.grid)
        else:
            logging.error(' Reading setup file %s', game_name)

    def load_remote(self, grid, data, mode, signal=False):
        self.set_load_mode(_('Loading game...'))
        self.model.grid = grid
        self.model.data = data
        self.model.data['mode'] = mode
        self.emit('reset_scoreboard')
        if not signal:
            self.emit('change_game_signal', mode, self.get_grid(),
                      self.model.data, self.waiting_players,
                      self.model.data['game_file'])
        self.emit('change_game', self.model.data, self.get_grid())
        for buddy in self.players:
            self.players_score[buddy] = 0
        self.current_player = None
        self.last_flipped = -1
        self.last_highlight = 1
        self.change_turn()
        self.model.data['running'] = 'False'

        for card in self.model.grid:
            if card['state'] == '1':
                self.emit('flip-card', self.model.grid.index(card), False)
                self.last_flipped = self.model.grid.index(card)
            elif card['state'] != '0':
                stroke_color, fill_color = card['state'].split(',')
                self.emit('flip-card', self.model.grid.index(card), False)
                self.emit('set-border', self.model.grid.index(card),
                          stroke_color, fill_color)
        logging.debug('load_remote set is_demo mode %s', mode)
        if mode != 'reset':
            self.model.is_demo = (mode == 'demo')

    def add_buddy(self, buddy, score=0):
        logging.debug('Buddy %r was added to game', buddy.props.nick)
        self.players.append(buddy)
        self.players.sort(lambda a, b: cmp(a.props.nick, b.props.nick))
        self.players_score[buddy] = score
        self.emit('add_buddy', buddy, score)
        logging.debug(str(buddy))

        if self.current_player is None:
            self.current_player = buddy
            self.change_turn()

    def rem_buddy(self, buddy):
        logging.debug('Buddy %r was removed from game', buddy.props.nick)
        if self.current_player == buddy and len(self.players) >= 2:
            if self.last_flipped != -1:
                self.emit('flop-card', self.last_flipped)
                self.model.grid[self.last_flipped]['state'] = '0'
                self.last_flipped = -1
            self.change_turn()
        index = self.players.index(buddy)
        del self.players[index]
        del (self.players_score[buddy])
        self.emit('rem_buddy', buddy)

    def buddy_message(self, buddy, text):
        self.emit('msg_buddy', buddy, text)

    def update_turn(self):
        self.set_sensitive(self.current_player == self.myself)
        self.emit('change-turn', self.current_player)

    def change_turn(self):
        if len(self.players) <= 1:
            self.current_player = self.players[0]
        if self.current_player is None:
            self.current_player = self.players[0]
        elif self.current_player == self.players[-1]:
            self.current_player = self.players[0]
        else:
            next_player = self.players.index(self.current_player) + 1
            self.current_player = self.players[next_player]
        self.update_turn()

    def card_flipped(self, widget, identifier, signal=False):
        self.model.count = self.model.count + 1

        # Check if is my turn
        if (not self.sentitive and not signal) or \
                self.last_flipped == identifier:
            return

        # Handle groups if needed
        if self.model.data.get('divided') == '1':
            if self.last_flipped == -1 and identifier \
                    >= (len(self.model.grid) / 2):
                return
            if self.last_flipped != -1 and identifier \
                    < (len(self.model.grid) / 2):
                return

        # do not process flips when flipping back
        if self.flip_block:
            return
        else:
            self.flip_block = True

        self.model.data['running'] = 'True'

        def flip_card(full_animation):
            self.emit('flip-card', identifier, full_animation)
            if not signal:
                self.emit('flip-card-signal', identifier)

        snd = self.model.grid[identifier].get('snd', None)
        if snd is not None:
            sound_file = join(self.model.data.get('pathsnd'), snd)

            if self._audio_play_finished_id != 0:
                self.audio.disconnect(self._audio_play_finished_id)

            self.audio.play(sound_file)

        # First card case
        if self.last_flipped == -1:
            flip_card(full_animation=True)

            self.last_flipped = identifier
            self.model.grid[identifier]['state'] = '1'
            self.flip_block = False

        # Second card case
        else:
            # Pair matched
            pair_key_1 = self.model.grid[self.last_flipped]['pairkey']
            pair_key_2 = self.model.grid[identifier]['pairkey']

            if pair_key_1 == pair_key_2:
                if not signal:
                    self.emit('flip-card-signal', identifier)

                stroke_color, fill_color = \
                    self.current_player.props.color.split(',')
                self.emit('set-border', identifier, stroke_color, fill_color)
                self.emit('set-border', self.last_flipped,
                          stroke_color, fill_color)

                self.increase_point(self.current_player)
                self.model.grid[identifier]['state'] = \
                    self.current_player.props.color
                self.model.grid[self.last_flipped]['state'] = \
                    self.current_player.props.color
                self.flip_block = False

                self.emit('cement-card', identifier)
                self.emit('cement-card', self.last_flipped)

            # Pair didn't match
            else:
                flip_card(full_animation=True)

                self.model.grid[identifier]['state'] = '1'
                self.set_sensitive(False)
                self._flop_cards = (identifier, self.last_flipped)
                self._flop_card_timeout = GLib.timeout_add(
                    FLOP_BACK_TIMEOUT,
                    self.flop_card, identifier, self.last_flipped)
            self.last_flipped = -1

    def flop_card(self, identifier, identifier2):
        self._flop_card_timeout = -1
        self._flop_cards = None

        self.emit('flop-card', identifier)
        self.model.grid[identifier]['state'] = '0'
        self.emit('flop-card', identifier2)
        self.model.grid[identifier2]['state'] = '0'

        # if self.model.data['divided'] == '1':
        #    self.card_highlighted(widget, -1, False)
        self.set_sensitive(True)
        self.flip_block = False
        self.change_turn()

    def card_highlighted(self, widget, identifier, mouse):
        self.emit('highlight-card', self.last_highlight, False)
        self.last_highlight = identifier

        if identifier == -1 or not self.sentitive:
            return

        if self.model.data['divided'] == '1':
            if self.last_flipped == -1 and identifier \
                    >= (len(self.model.grid) / 2):
                return
            if self.last_flipped != -1 and identifier \
                    < (len(self.model.grid) / 2):
                return

        if mouse and self.model.grid[identifier]['state'] == '0' or not mouse:
            self.emit('highlight-card', identifier, True)

    def increase_point(self, buddy, inc=1):
        self.players_score[buddy] += inc
        for i_ in range(inc):
            self.emit('increase-score', buddy)

    def get_grid(self):
        return self.model.grid

    def collect_data(self):
        for player, score in self.players_score.items():
            index = self.players.index(player)
            score = self.players_score[player]
            self.model.data[str(index)] = str(score)
        return self.model.data

    def change_game(self, widget, game_name, size, mode,
                    title=None, color=None):
        if mode in ['file', 'demo']:
            logging.debug('change_game set is_demo mode %s', mode)
            self.model.is_demo = (mode == 'demo')
            if self.model.read(game_name) != 0:
                logging.error(' Reading setup file %s', game_name)
                return
        if mode == 'art4apps':
            # NOTE: i am using the same variables from the signal
            # to avoid addding more code
            category = game_name
            language = title
            color = None
            title = None
            self.model.is_demo = True
            self.model.read_art4apps(category, language)

        if size is None:
            size = int(self.model.data['size'])
        self.model.def_grid(size)

        if title is not None:
            self.model.data['title'] = title
        if color is not None:
            self.model.data['color'] = color
        self.load_remote(self.model.grid, self.model.data, mode, False)

    def reset_game(self, size=None):
        if size is None:
            size = int(self.model.data['size'])
        self.model.count = 0
        self.model.def_grid(size)
        self.load_remote(self.model.grid, self.model.data,
                         self.model.data['mode'], False)

    def set_load_mode(self, msg):
        self.emit('load_mode', msg)

    def set_messenger(self, messenger):
        self.messenger = messenger

    def set_sensitive(self, status):
        self.sentitive = status
        if not status:
            self.emit('highlight-card', self.last_highlight, False)

    def get_sensitive(self):
        return self.sentitive

    def get_current_player(self):
        return self.current_player

    def get_players_data(self):
        data = []
        for player, score in self.players_score.items():
            data.append([player.props.key, player.props.nick,
                         player.props.color, score])
        return data

    def set_wait_list(self, wait_list):
        self.waiting_players = wait_list
        for w in wait_list:
            for p in self.players:
                if w[0] == p.props.key:
                    list.remove(w)
                    for i_ in range(w[3]):
                        self.increase_point(p)

    def set_myself(self, buddy):
        self.myself = buddy

    def add_to_waiting_list(self, buddy):
        self.players.remove(buddy)
        self.waiting_players.append(buddy)
        self.emit('wait_mode_buddy', buddy, True)

    def rem_to_waiting_list(self, buddy):
        self.waiting_players.remove(buddy)
        self.players.append(buddy)
        self.emit('wait_mode_buddy', buddy, False)

    def load_waiting_list(self, wait_list):
        for buddy in wait_list:
            self.add_to_waiting_list(buddy)

    def empty_waiting_list(self):
        for buddy in self.waiting_players:
            self.rem_to_waiting_list(buddy)
Ejemplo n.º 53
0
class compClient():
	def __init__(self):
		self.PORT_NUMBER = 5555
		self.au = Audio()
		self.conn = None
		
	def connect(self):
		self.comp_conn = socket.create_connection(('localhost', self.PORT_NUMBER))
		self.conf_conn = socket.create_connection(('localhost', self.PORT_NUMBER))
		
	def send_comp_data(self):
		Y_abs = self.au.recordHz()		
		send_data = Y_abs.tostring()
		self.comp_conn.send(send_data + END_SIGN)
		
	def recv_config(self):
		# TODO: make it nonblocking
		# TODO: better mapping of the settings -> this and apply_config	
		recv_data = self.conf_conn.recv(100)
		recv_data = recv_data.split(END_SIGN)[0]
		recv_data = recv_data.split(SEP)
		
		new_conf = {}
		
		for conf in recv_data:
			conf = conf.split(EQUAL)
			setting = conf[0]
			value = conf[1]
			
			new_conf[setting] = value
			
		return new_conf
		
	def apply_recv_config(self):
		config_dict = self.recv_config()
		
		self.au.set_threshold(config_dict['TH'])
		
	def run(self):
	
		recv_data = ""
		
		# auth compnode
		self.comp_conn.send("compnode" + END_SIGN)
		
		while recv_data != "ready":
			recv_data = self.comp_conn.recv(50)
			recv_data = recv_data.split(END_SIGN)[0]
			
		recv_data = ""
		
		# auth settingconn
		self.conf_conn.send("settnode" + END_SIGN)
		
		while recv_data != "ready":
			recv_data = self.comp_conn.recv(50)
			recv_data = recv_data.split(END_SIGN)[0]
		
		# computational loop
		while True:
			self.send_comp_data()
			self.
			
Ejemplo n.º 54
0
class Player:
    def __init__(self, directory):#, master):
        self.directory = directory
        self.files = glob.glob(os.path.join(directory, '*.wav'))
        self.times = 0
        self.size = 8
        self.locked = False
        self.beat_counter = [0,0]
        self.beat_buffer = [0,0]
        self.beat_time = time.time()
        self.my_values = [0 for i in range(self.size)]
        self.current_time = 0
        self.frame = 0
        self.filedata = []
        Thread(target = self.set_track, args = (self.files[random.randint(0, len(self.files)-1)], )).start()
        #self.set_track(self.files[random.randint(0, len(self.files)-1)])

    def transition(old_track, new_track):
        pass
    
    def stop(self):
        self.current_time = self.audio.current_time
        self.event = False
    
    def set_track(self, file):
        self.current_time = 0
        self.file = file
        self.filename = file.split("/")[-1]
        self.audio = Audio(file)
        #cmd = ['python', 'data_analizer_routine.py', self.directory, file, str(self.audio.real_duration)]
        cmd = ['java', '-Xmx2g', '-d64', 'data_analizer_routine', self.directory, file, str(self.audio.real_duration)]
        self.locked = True
        p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
        p.wait()
        self.locked = False
        data = p.communicate()
        self.new_track = data[0].split("\n")[0]
        self.filedata = []

    def play(self):
        self.file_label.set(self.filename)
        self.time = time.time()
        self.audio_thread = Thread(target = self.update_audio)
        self.event = True
        self.audio_thread.start()
    
    def set_gui(self, _time, _filename):
        self.t = time.time()
        self.play_time = _time
        self.file_label = _filename
    
    def freq_analizer(self, pcm):
        #triangle=np.array(range(len(pcm)/2)+range(len(pcm)/2)[::-1])
        #pcm = pcm * triangle
        fft=np.fft.fft(pcm)
        freq=np.fft.fftfreq(np.arange(len(pcm)).shape[-1])[:len(pcm)/2]
        freq=freq * self.audio.file.getframerate()/1000
        current_time = datetime.timedelta(seconds=int(self.audio.duration)-int(self.audio.current_time))
        if time.time()-self.t >=0.3:
            self.play_time.set(str(current_time))
            self.t = time.time()
        #print "Playing:",self.filename, "\tTime:", current_time,
        if real:
            fftr=10*np.log10(np.sqrt(fft.imag**2+fft.real**2))[:len(pcm)/2]
            defv = len(fftr)/self.size
            deff = 0
            defl = len(fftr)/self.size
            bpm = [False for i in range(self.size)]
            for i in range(self.size):
                fftr[deff:defl] = np.average(fftr[deff:defl])
                value = (math.ceil(np.average(fftr[deff:defl])*100)/100)
                mult = 5
                if self.my_values[i]+mult < value:#\or self.my_values[i] > mult+value:
                    bpm[i]=True
                else:
                    bpm[i]=False
                self.my_values[i] = value
                if np.isinf(value):
                    value = 0
                deff+=(defv)
                defl+=(defv)
            if bpm.count(True)>=bpm.count(False):
                self.times+=1
                if self.times == 1:
                    self.beat_counter[0] = self.beat_counter[1]
                    self.beat_counter[1] = self.audio.current_time
                    self.filedata.append(str(self.audio.current_time)+", "+str(60/(self.beat_counter[1]-self.beat_counter[0]))+"\n")
            else:
                self.times=0
            #sys.stdout.write('\r')
            #sys.stdout.flush()
            return (freq, fftr)

    def update_audio(self):
        self.data = self.audio.get_data()
        while self.data != "" and self.event != False:
            pcm = np.fromstring(self.data, "Int16")
            self.freq_analizer(pcm)
            self.audio.stream.write(self.data)
            self.data = self.audio.get_data()
        if self.data == "":
            #self.filedata.close()
            if len(self.filedata)>0:
                f = open(self.file+".dat", "w")
                for line in self.filedata:
                    f.write(line)
                f.close()
            self.filedata = []
            if self.locked == False:
            	Thread(target = self.set_track, args = (self.new_track, )).start()
            	print "New process"
            else:
            	print "Another process is running"
            #self.set_track(self.new_track)
            self.time_counter = 0
            self.current_time = 0
            self.play()
Ejemplo n.º 55
0
	def __init__(self):
		self.PORT_NUMBER = 5555
		self.au = Audio()
		self.conn = None
Ejemplo n.º 56
0
from flask import Flask
from flask import request

app = Flask('walkout')


from audio import Audio
from recognition import Recognize

a = Audio()

playlist = (
    'run.mp3',
    'media.mp3',
    'fall.mp3',
    'downfall.mp3',
    'vader.mp3',
    'girl.mp3',
)

for mp3 in playlist:
    a.convert_mp3_to_wav(mp3)

recog = Recognize()

ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])


def allowed_file(filename):
    return '.' in filename and \
           filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
Ejemplo n.º 57
0
import sys, os
from audio import Audio

fname = 'samples/avril.aif'
if len(sys.argv) > 2:
    print('Usage: python3 example.py [file type .mp3, .wav, .aif, .ogg, etc.]')
    sys.exit()
elif len(sys.argv) == 2:
    fname = sys.argv[1]

print('\nExample script for the audio class.\n\n{}'.format('~'*40))
print('Loading in {}...'.format(fname))

a = Audio('samples/avril.aif')

print('\n{} is now inside an Audio object - you can read and write its time-series data.'.format(a.title))
print('\nBy default, it writes to both ./download_files/{}.wav and ../Web_Interface/static/temp_wet.wav.'.format(a.title))
print('\nYou can also write to a directory of your choice with the optional "custom" arg in write_to_wav().')
print('\nOn a mac, you can play audio from the command line with afplay. On ubuntu, you\'ll have to download vlc or another audio player.')
print('\nWriting now...')

a.write_to_wav()

print('\nNow let\'s load another Audio instance and convolve the two...')

b = Audio('impulses/Booth_atrium.wav')
c = a.convolve(b)
c.write_to_wav()

print('\nThe convolution has been written to ./download_files and its title has been formatted to reflect the transformation. If you use pitch shifting, ring modulation, or delay, these steps are the same except for the method you call and the args you give.\n')
Ejemplo n.º 58
0
class App(object):
	"""Manage application controllers, respond to user input, run master loop.

	This class wraps together the various components of the application. It is
	the application entry point. On startup, it creates an instance of each
	controller. The run() method starts each controller and runs the master loop
	until the user quits.

	Settings:
		experimentName: used to label the output directory and in output metadata
		keyBindings: a dictionary where the keys are 'quit', 'toggleStimulator',
			and 'triggerStimulator', and the values are one-character strings
			representing the button that triggers the action specified by the key
		outputRootDir: directory where the folder containing trial data will be
			created
	"""

	def __init__(self, **kwargs):
		"""Initialize the keycode bindings and all controllers, create output dir."""
		settings = {
			'experimentName' : 'someNameHere',
			'outputRootDir' : os.path.normpath(os.path.expanduser("~/kauferdata")),
			'keyBindings' : {
				'quit' : 'q',
				'toggleStimulator' : 't',
				'triggerStimulator' : 's'
				},
			'audio' : {},
			'gui' : {},
			'stimulator' : { 'activeProtocolName' : 'nucleusAccumbensExample' },
			'tracker' : {},
			'videoIn' : {},
			'videoOut' : {},
			'writer' : {},
			}
		settings.update(kwargs)
		trialDir = "{0}_{1}_{2}".format(time.strftime("%y%m%d%H%M%S"),
				settings['experimentName'], settings['stimulator']['activeProtocolName'])
		settings['outputDataDir'] = os.path.join(settings['outputRootDir'], trialDir)
		self.keyBindings = settings['keyBindings']
		self.keycodeBindings = {k: getattr(opencvgui.keycodes, v)
				for k,v in self.keyBindings.items() }
		os.makedirs(os.path.join(settings['outputDataDir'], 'audio'))
		self.writeExperimentData(settings)
		for x in ['audio', 'videoOut', 'writer']:
			kwargs[x] = {} if not kwargs.has_key(x) else kwargs[x]
			kwargs[x]['outputDataDir'] = settings['outputDataDir']

		# gui and writer both need a reference to the app instance because they
		# draw on the state of so many other controllers
		self.audio = Audio(**settings['audio'])
		self.gui = Gui(self, **settings['gui'])
		self.stimulator = Stimulator(**settings['stimulator'])
		self.tracker = Tracker(**settings['tracker'])
		self.videoIn = VideoIn(**settings['videoIn'])
		self.videoOut = VideoOut(**settings['videoOut'])
		self.writer = Writer(self, **settings['writer'])

	def run(self):
		"""Respond to user input and run the master application loop.

		This method has three basic sections. Before the loop starts, the start()
		method is called on all controllers. The central loop calls update() on
		each controller until it is terminated by a user-issued quit keystroke.
		Keystrokes are listened for at the start of each loop iteration.
		"""
		self.printKeybindings()
		self.startTime = self.lastTime = time.clock()
		self.audio.start()
		self.gui.start()
		self.stimulator.start()
		self.tracker.start()
		self.videoIn.start()
		self.videoOut.start()
		self.writer.start()

		while True:

			# respond to user input
			lastKeyStroke = cv2.waitKey(20)  # 20 is the number of ms to wait for key
			if lastKeyStroke != -1: # -1 means there was no keystroke
				if lastKeyStroke == self.keycodeBindings['quit']:
					break
				if lastKeyStroke == self.keycodeBindings['triggerStimulator']:
					self.stimulator.trigger()
				if lastKeyStroke == self.keycodeBindings['toggleStimulator']:
					self.stimulator.toggle()

			# update state
			self.currTime = time.clock()
			self.totalTimeElapsed = self.currTime - self.startTime
			self.audio.update()
			self.videoIn.update()
			self.tracker.update(self.videoIn, self.currTime)
			self.stimulator.update(self.currTime, self.tracker)
			self.videoOut.update(self.videoIn, self.tracker, self.stimulator)
			self.gui.update()
			self.lastTime = self.currTime
			self.writer.update()

		# closing
		self.audio.stop()
		self.videoIn.stop()
		self.videoOut.stop()
		self.gui.stop()
		self.stimulator.stop()
		self.tracker.stop()
		self.writer.stop()

	def writeExperimentData(self, settings, console=True):
		"""Write experiment metadata to a file in the output directory.
		
		Args:
			settings: The full settings dictionary passed to the App's __init__
			console: Optional. Boolean. Also print experiment data to console.
		"""
		with open(os.path.join(settings['outputDataDir'], 'experiment.txt'), 'w') as f:
			lines = [
					"name: " + settings['experimentName'],
					"date: " + time.strftime("%m/%y/%d"),
					"time: " + time.strftime("%H:%M"),
					"protocol: "  + settings['stimulator']['activeProtocolName'],
					str(settings['stimulator']['protocols'][settings['stimulator']['activeProtocolName']])
					]
			f.write("\n".join(lines))
			f.close()
		if console:
			print "\n".join(lines)
	
	def printKeybindings(self):
		"""Print the available keyboard commands to the terminal."""
		print "Key Bindings:"
		for k,v in self.keyBindings.items():
			print "{0}: {1}".format(k,v)
		print ""
Ejemplo n.º 59
0
#!/usr/bin/python

from audio import Audio
import time
import sys
import wave

if len(sys.argv) < 2:
    print "Usage: %s file_name player_type" % (sys.argv[0])
    sys.exit()

wf = wave.open(sys.argv[1], 'rb')

a = None
if len(sys.argv) >= 3:
    a = Audio(player=sys.argv[2])
else:
    a = Audio(player=None)

a.open()

chunk = 640
data = wf.readframes(chunk)
# play stream (looping from beginning of file to the end)
while data != '':
    # writing to the stream is what *actually* plays the sound.
    a.play(data)
    data = wf.readframes(chunk)

print "Play done."
i = 0
Ejemplo n.º 60
0
 def __init__(self):
     self.record = Audio()
     self.active = False
     self.play =Audio()
     Vad.__init__(self)