コード例 #1
0
ファイル: QVoiceRecorder.py プロジェクト: nano13/tambi
class QVoiceRecorder(object):
    def __init__(self):
        pass

    def initAudioInput(self, filepath):

        self.recorder = QAudioRecorder()

        self.settings = QAudioEncoderSettings()
        self.settings.setCodec("audio/vorbis")
        self.recorder.setContainerFormat("ogg")
        #self.settings.setQuality(QtMultimedia.HighQuality)

        self.recorder.setEncodingSettings(self.settings)

        url = QtCore.QUrl.fromLocalFile(
            QtCore.QFileInfo(filepath).absoluteFilePath())
        self.recorder.setOutputLocation(url)

    def start(self):
        self.recorder.record()

    def stop(self):
        self.recorder.pause()
        self.recorder.stop()
        self.recorder.stop()
コード例 #2
0
    def __setRecordParams(self):  ##设置音频输入参数
        selectedFile = self.ui.editOutputFile.text().strip()
        if (selectedFile == ""):
            QMessageBox.critical(self, "错误", "请先设置录音输出文件")
            return False

        if os.path.exists(selectedFile):
            os.remove(selectedFile)  # 删除已有文件
        ##         QMessageBox.critical(self,"错误","录音输出文件被占用,无法删除")
        ##         return False

        recordFile = QUrl.fromLocalFile(selectedFile)
        self.recorder.setOutputLocation(recordFile)  # 设置输出文件

        recordDevice = self.ui.comboDevices.currentText()
        self.recorder.setAudioInput(recordDevice)  # 设置录入设备

        settings = QAudioEncoderSettings()  # 音频编码设置
        settings.setCodec(self.ui.comboCodec.currentText())  # 编码

        sampRate = int(self.ui.comboSampleRate.currentText())
        settings.setSampleRate(sampRate)  # 采样率

        channelCount = int(self.ui.comboChannels.currentText())
        settings.setChannelCount(channelCount)  # 通道数

        settings.setEncodingMode(QMultimedia.ConstantBitRateEncoding)  # 固定比特率

        self.recorder.setAudioSettings(settings)  # 音频设置
        return True
コード例 #3
0
 def __init__(self, data: ConverterData, transcription: Transcription,
              app_settings: AppSettings) -> None:
     super().__init__()
     self.app_settings = app_settings
     self.temp = data.get_temp_file()
     self.transcription = transcription
     self.file_path = os.path.join(self.temp,
                                   f'{self.transcription.id}.wav')
     self.settings = QAudioEncoderSettings()
コード例 #4
0
ファイル: QVoiceRecorder.py プロジェクト: nano13/tambi
    def initAudioInput(self, filepath):

        self.recorder = QAudioRecorder()

        self.settings = QAudioEncoderSettings()
        self.settings.setCodec("audio/vorbis")
        self.recorder.setContainerFormat("ogg")
        #self.settings.setQuality(QtMultimedia.HighQuality)

        self.recorder.setEncodingSettings(self.settings)

        url = QtCore.QUrl.fromLocalFile(
            QtCore.QFileInfo(filepath).absoluteFilePath())
        self.recorder.setOutputLocation(url)
コード例 #5
0
    def __init__(self, parent=None):
        super(Camera, self).__init__(parent)

        self.ui = Ui_AgeGender()
        self.ui.setupUi(self)

        self.camera = None
        self.imageCapture = None
        self.mediaRecorder = None
        self.isCapturingImage = False
        self.applicationExiting = False

        self.imageSettings = QImageEncoderSettings()
        self.audioSettings = QAudioEncoderSettings()
        self.videoSettings = QVideoEncoderSettings()
        self.videoContainerFormat = ''

        # self.ui.setupUi(self)

        self.face_cascade = cv.CascadeClassifier(
            'haarcascade_frontalface_alt.xml')
        # OpenCv face Cascade classifier
        self.faceProto = "AgeGender/opencv_face_detector.pbtxt"
        self.faceModel = "AgeGender/opencv_face_detector_uint8.pb"

        self.ageProto = "AgeGender/age_deploy.prototxt"
        self.ageModel = "AgeGender/age_net.caffemodel"

        self.genderProto = "AgeGender/gender_deploy.prototxt"
        self.genderModel = "AgeGender/gender_net.caffemodel"

        # Load network
        self.ageNet = cv.dnn.readNet(self.ageModel, self.ageProto)
        self.genderNet = cv.dnn.readNet(self.genderModel, self.genderProto)
        self.faceNet = cv.dnn.readNet(self.faceModel, self.faceProto)

        self.MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
        self.ageList = [
            '(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)',
            '(48-53)', '(60+)'
        ]
        self.genderList = ['Male', 'Female']
        self.addToolBar(NavigationToolbar(self.ui.genderWidget.canvas, self))
        self.addToolBar(NavigationToolbar(self.ui.ageWidget.canvas, self))

        # create a timer
        self.timer = QTimer()

        # set timer timeout callback function
        self.timer.timeout.connect(self.detectFace)

        #When the start camera got clicked invoke controlTimer() function
        self.ui.startCamera.clicked.connect(self.controlTimer)

        #When the upload image bottom got clicked invoke getImage() function
        self.ui.uploadImage.clicked.connect(self.detectInImage)

        cameraDevice = QByteArray()
コード例 #6
0
    def __init__(self, parent=None, standalone=False):
        super(Camera, self).__init__(parent)

        # This prevents doing unneeded initialization
        # when QtDesginer loads the plugin.
        if parent is None and not standalone:
            return

        if not multimedia_available:
            return

        self.ui = uic.loadUi(os.path.join(WIDGET_PATH, "camera.ui"), self)

        self.camera = None
        self.imageCapture = None
        self.mediaRecorder = None
        self.isCapturingImage = False
        self.applicationExiting = False

        self.imageSettings = QImageEncoderSettings()
        self.audioSettings = QAudioEncoderSettings()
        self.videoSettings = QVideoEncoderSettings()
        self.videoContainerFormat = ''

        camera_device = QByteArray()

        videoDevicesGroup = QActionGroup(self)

        videoDevicesGroup.setExclusive(True)

        if not QCamera.availableDevices():
            self.ui.devicesCombo.addItem("No Device")
        else:
            for deviceName in QCamera.availableDevices():
                description = QCamera.deviceDescription(deviceName)
                self.ui.devicesCombo.addItem(description)

                videoDeviceAction = QAction(description, videoDevicesGroup)
                videoDeviceAction.setCheckable(True)
                videoDeviceAction.setData(deviceName)

                if camera_device.isEmpty():
                    cameraDevice = deviceName
                    videoDeviceAction.setChecked(True)

                self.ui.devicesCombo.addAction(videoDeviceAction)

        videoDevicesGroup.triggered.connect(self.updateCameraDevice)

        self.ui.captureWidget.currentChanged.connect(self.updateCaptureMode)

        self.ui.devicesCombo.currentIndexChanged.connect(
            self.get_device_action)

        self.ui.lockButton.hide()

        # Start camera 2s after the UI has loaded
        QTimer.singleShot(2000, lambda: self.setCamera(camera_device))
コード例 #7
0
ファイル: audioRecording.py プロジェクト: nano13/tambi
    def record(self, filename):
        url = QtCore.QUrl.fromLocalFile(
            QtCore.QFileInfo(filename).absoluteFilePath())
        #content = QMediaObject(url)

        #self.recorder = QAudioRecorder()
        #source = QAudioInput()
        #source = QMediaObject()
        self.recorder = QAudioRecorder()

        settings = QAudioEncoderSettings()
        settings.setChannelCount(1)
        settings.setSampleRate(44100)
        settings.setBitRate(32)
        settings.setCodec("audio/vorbis")
        #settings.setEncodingMode(QtMultimedia.ConstantQualityEnconding)

        self.recorder.setContainerFormat("ogg")
        self.recorder.setEncodingSettings(settings)
        self.recorder.setOutputLocation(url)
コード例 #8
0
    def initAudioListWidget(self, dbAdapter, deckpath, current_rowid):
        self.audioItemsDict = []

        self.dbAdapter = dbAdapter
        self.deckpath = deckpath
        self.current_rowid = current_rowid

        self.audioPlayer = QMediaPlayer()
        self.audioPlayer.mediaStatusChanged.connect(self.mediaStatusChanged)

        os_name = platform.uname()[0]
        if os_name == "Windows" or os_name == "Darwin":
            self.audioRecorder = QAudioRecorder()
        else:
            from modules.deck.gstAudioRecorder import GstAudioRecorder
            self.audioRecorder = GstAudioRecorder()

        settings = QAudioEncoderSettings()

        audioformat = self.config.readVar('vocable', 'audioformat')
        if audioformat == 'ogg':
            settings.setCodec("audio/vorbis")
            self.audioRecorder.setContainerFormat("ogg")
        elif audioformat == 'mp3':
            settings.setCodec("audio/mpeg")
            self.audioRecorder.setContainerFormat("mp3")
        elif audioformat == 'amr':
            settings.setCodec("audio/amr")
        else:
            settings.setCodec("audio/PCM")
            self.audioRecorder.setContainerFormat("wav")

        self.audioRecorder.setEncodingSettings(settings)

        self.setColumnCount(6)
        self.setHorizontalHeaderLabels(
            ["description", "", "", "", "", "filename"])
        self.setRowCount(0)

        self.itemChanged.connect(self.onItemChanged)
コード例 #9
0
    def __init__(self, parent=None):
        super(Camera, self).__init__(parent)
        global API
        API = AlgorithmAPIs(template_dir="templates",
                            threshold=0.5,
                            use_multiprocessing=False)

        self.ui = Ui_Camera()

        self.camera = None
        self.imageCapture = None
        # self.mediaRecorder = None
        self.isCapturingImage = False
        self.applicationExiting = False

        self.imageSettings = QImageEncoderSettings()
        self.audioSettings = QAudioEncoderSettings()
        self.ui.setupUi(self)
        cameraDevice = QByteArray()
        videoDevicesGroup = QActionGroup(self)
        videoDevicesGroup.setExclusive(True)

        for deviceName in QCamera.availableDevices():
            description = QCamera.deviceDescription(deviceName)
            videoDeviceAction = QAction(description, videoDevicesGroup)
            videoDeviceAction.setCheckable(True)
            videoDeviceAction.setData(deviceName)

            if cameraDevice.isEmpty():
                cameraDevice = deviceName
                videoDeviceAction.setChecked(True)

            self.ui.menuDevices.addAction(videoDeviceAction)

        videoDevicesGroup.triggered.connect(self.updateCameraDevice)

        self.ui.lcdNumber_2.display(0)

        self.ui.dial.valueChanged.connect(self.dial_display)

        global dial_value
        dial_value = 3
        self.ui.lcdNumber_2.display(dial_value)
        self.setCamera(cameraDevice)

        # Create and load model
        path_pretrained = "apis/models/facenet/20180402-114759.pb"
        path_SVM = "apis/models/SVM/SVM.pkl"
        self.recognizer = Recognizer()
        self.recognizer.create_graph(path_pretrained, path_SVM)
コード例 #10
0
    def __init__(self, arg):
        super(Camera, self).__init__(parent)
        # self.arg = arg
        # Attributes variables
        self.ui = Ui_Camera()
        self.camera = None
        self.imageCapture = None
        self.mediaRecorder = None
        self.isCapturingImage = False
        self.applicationExiting = False

        self.imageSettings = QImageEncoderSettings()
        self.audioSettings = QAudioEncoderSettings()
        self.videoSettings = QVideoEncoderSettings()
        self.videoContainerFormat = ''

        self.ui.setupUi(self)

        #get device camera
        cameraDevice = QByteArray()

        videoDevicesGroup = QActionGroup(self)
        videoDevicesGroup.setExclusive(True)

        # Get informations about available cameras
        for deviceName in QCamera.availableDevices():
            description = QCamera.deviceDescription(deviceName)
            videoDeviceAction = QAction(description, videoDevicesGroup)
            videoDeviceAction.setCheckable(True)
            videoDeviceAction.setData(deviceName)

            if cameraDevice.isEmpty():
                cameraDevice = deviceName
                videoDeviceAction.setChecked(True)

            self.ui.menuDevices.addAction(videoDeviceAction)

        videoDevicesGroup.triggered.connect(self.updateCameraDevice)
        self.ui.captureWidget.currentChanged.connect(self.updateCaptureMode)

        self.ui.lockButton.hide()

        self.setCamera(cameraDevice)
コード例 #11
0
ファイル: camera.py プロジェクト: SJPMechaEagles/Capture
    def startVid(self):
        self.cam.load()
        # self.camvfind.show()
        self.cam.setViewfinder(self.camvfind)
        self.cam.setCaptureMode(QCamera.CaptureVideo)
        self.cam.start()

        audio = QAudioEncoderSettings()
        audio.setCodec("audio/amr")
        audio.setQuality(QtMultimedia.QMultimedia.NormalQuality)
        video = QVideoEncoderSettings()
        # video.setCodec("video/mp4")
        video.setQuality(QtMultimedia.QMultimedia.NormalQuality)
        video.setResolution(1920, 1080)
        video.setFrameRate(30.0)
        # self.recorder.setAudioSettings(audio)
        self.recorder.setVideoSettings(video)
        self.recorder.setContainerFormat("mp4")
コード例 #12
0
ファイル: app.py プロジェクト: greenhandatsjtu/PyQt5-IM
    def __init__(self):
        super(App, self).__init__()
        self.loginButton.clicked.connect(self.showLoginDialog)
        self.client_thread.text_signal.connect(self.showText)  # 显示文本消息
        self.client_thread.usr_signal.connect(self.showUserList)  # 更新在线用户
        self.client_thread.file_signal.connect(self.showFile)  # 显示文件消息
        self.emojis.emoji_signal.connect(self.addEmoji)

        # 通过QListWidget的当前item变化来切换QStackedWidget中的序号
        self.userListWidget.currentRowChanged.connect(self.dialogChanged)

        self.usrList = []  # 保存上一次的在线用户列表
        self.groupList = []  # 群组列表

        self.md5 = hashlib.md5()  # 用于加密密码

        # 录音机
        self.recorder = QAudioRecorder(self)
        settings = QAudioEncoderSettings()
        settings.setChannelCount(2)
        settings.setSampleRate(16000)
        self.recorder.setEncodingSettings(settings)
コード例 #13
0
class SimpleAudioRecorder(QAudioRecorder):
    def __init__(self, data: ConverterData, transcription: Transcription,
                 app_settings: AppSettings) -> None:
        super().__init__()
        self.app_settings = app_settings
        self.temp = data.get_temp_file()
        self.transcription = transcription
        self.file_path = os.path.join(self.temp,
                                      f'{self.transcription.id}.wav')
        self.settings = QAudioEncoderSettings()

    def start_recording(self) -> None:
        LOG_RECORDER.info("Audio recording started.")
        self.settings.setCodec('audio/pcm')
        self.settings.setChannelCount(1)
        self.settings.setBitRate(96000)
        self.settings.setSampleRate(44100)
        self.settings.setQuality(self.app_settings.audio_quality)
        self.settings.setEncodingMode(QMultimedia.ConstantQualityEncoding)
        container = 'audio/x-wav'
        self.setEncodingSettings(self.settings, QVideoEncoderSettings(),
                                 container)
        self.setOutputLocation(QUrl.fromLocalFile(self.file_path))
        self.record()

    def stop_recording(self) -> Union[str, None]:
        LOG_RECORDER.info("Audio recording finished.")
        LOG_RECORDER.info(f"Audio file: {self.file_path}")
        self.stop()
        return self.file_path
コード例 #14
0
ファイル: Recorder.py プロジェクト: r2-lee/learn-git
    def __setRecordParams(self):
        selectedFile=self.ui.editOutputFile.text().strip()
        if (selectedFile ==""):
            QMessageBox.critical(self,"错误","请先设置录音输出文件")
            return False
        
        if os.path.exists(selectedFile):
            os.remove(selectedFile)#删除已有文件
                 
        recordFile=QUrl.fromLocalFile(selectedFile)
        self.recorder.setOutputLocation(recordFile)  #设置输出文件

        recordDevice=self.ui.comboDevices.currentText()
        self.recorder.setAudioInput(recordDevice)    #设置录入设备

        settings=QAudioEncoderSettings()    #音频编码设置
        settings.setCodec(self.ui.comboCodec.currentText())   #编码

        sampRate=int(self.ui.comboSampleRate.currentText())
        settings.setSampleRate(sampRate)    #采样率

        bitRate=int(self.ui.comboBitrate.currentText())
        settings.setBitRate(bitRate)  #比特率

        channelCount=int(self.ui.comboChannels.currentText())
        settings.setChannelCount(channelCount)    #通道数

        quality=QMultimedia.EncodingQuality(self.ui.sliderQuality.value())
        settings.setQuality(quality)     #品质

        if self.ui.radioQuality.isChecked():  #编码模式为固定品质,自动决定采样率,采样点大小
            settings.setEncodingMode(QMultimedia.ConstantQualityEncoding)
        else:
            settings.setEncodingMode(QMultimedia.ConstantBitRateEncoding)  #固定比特率
        
        self.recorder.setAudioSettings(settings) #音频设置
        return True
コード例 #15
0
class MainWindow_EXEC():
    def __init__(self):

        #-------------------Init QT Setup---------------------------

        app = QtWidgets.QApplication(sys.argv)

        self.MainWindow = QtWidgets.QMainWindow()
        self.ui = Ui_MainWindow()

        self.ui.setupUi(self.MainWindow)
        app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())

        #------------------Exporting Setup------------------------------

        self.ui.export_midi.clicked.connect(self.openDirectory_midi)
        self.ui.export_midi.setFocusPolicy(QtCore.Qt.NoFocus)
        self.ui.export_audio.clicked.connect(self.openDirectory_audio)
        self.ui.export_audio.setFocusPolicy(QtCore.Qt.NoFocus)

        #------------------Metronome Setup------------------------------

        self.ui.metronome_button.clicked.connect(self.metro_thread)

        #------------------Recording Setup------------------------------

        self.ui.start_stop_rec.clicked.connect(self.start_stop_recording)
        self.ui.play_gui.clicked.connect(self.play)

        # QAudio setup
        self.settings = QAudioEncoderSettings()
        self.settings.setBitRate(16)
        self.settings.setChannelCount(1)
        self.audioRecorder = QAudioRecorder()
        self.audioRecorder.setEncodingSettings(self.settings)
        self.file_path = os.path.abspath(
            os.path.join(os.path.dirname(__file__),
                         resource_path("resources/output.wav")))

        self.url = QUrl.fromLocalFile(self.file_path)
        self.audioRecorder.setOutputLocation(self.url)

        #------------------Audio Terrain Gui Setup------------------------------

        self.terrain = Terrain()
        self.terrain.update()
        self.terrain_widget = self.terrain.getwidget()
        self.layout = QtGui.QGridLayout()
        self.layout.setContentsMargins(0, 0, 0, 0)
        self.ui.t_widget.setLayout(self.layout)
        self.layout.addWidget(self.terrain_widget, 0, 0, 1, 1)

        #------------------Audio Trimmer Setup------------------------------

        self.ui.audio_trimmer.clicked.connect(self.trim_audio)

        if os.path.isfile("resources/output.wav"):
            self.y, self.sr = librosa.load(
                resource_path("resources/output.wav"), sr=44100)
        else:
            new_wav = AudioSegment.empty()
            new_wav.export("resources/output.wav", format="wav")
            self.y, self.sr = librosa.load(
                resource_path("resources/output.wav"), sr=44100)

        self.duration = round(
            librosa.core.get_duration(y=self.y, sr=self.sr) * self.sr)
        self.maxv = np.iinfo(np.int16).max

        self.win = pg.GraphicsLayoutWidget()
        self.p = self.win.addPlot()

        #removes X & Y Axis and disables mouse movement
        self.p.showAxis('bottom', show=False)
        self.p.showAxis('left', show=False)
        self.p.setMouseEnabled(x=False, y=False)

        self.region = pg.LinearRegionItem(brush=(100, 100, 100, 60),
                                          bounds=(0, self.duration))
        self.region.setRegion([0, self.duration])

        self.p.addItem(self.region, ignoreBounds=True)
        self.p.plot(self.y, pen="w")

        self.layout.addWidget(self.win)
        self.win.hide()

        #------------------Midi Setup------------------------------

        self.ui.convert_midi.clicked.connect(self.convertMidi)
        self.ui.midi_play.clicked.connect(self.midiplayer_thread)
        self.ui.tempo_slider.valueChanged[int].connect(self.tempo_value)
        self.ui.midi_loop.toggle()

        # default bpm is 120
        self.current_tempo = 120
        self.detected_tempo = 120

        #------------------Drum Kit Selector Setup----------------------

        self.ui.drum_kits.clicked.connect(self.select_drumkit)
        self.drum_number = 0
        self.drum_folders = [
            'Drum_Kit_1', 'Drum_Kit_2', 'Drum_Kit_3', 'Drum_Kit_4'
        ]
        self.drum_current = self.drum_folders[self.drum_number]

        #------------------EXEC Window---------------------------------
        self.MainWindow.show()
        sys.exit(app.exec_())
#---------------------------------------------------------------

#------------------Functions----------------------------------

#------------------Drum Kit Selector------------------------------

    def select_drumkit(self):
        if self.drum_number < 3:
            self.drum_number += 1
            self.drum_current = self.drum_folders[self.drum_number]
            self.ui.drum_kits.setText(self.drum_current.replace("_", " "))
        else:
            self.drum_number = 0
            self.drum_current = self.drum_folders[self.drum_number]
            self.ui.drum_kits.setText(self.drum_current.replace("_", " "))

        #------------------Audio Trimmer------------------------------

    def trim_audio(self):
        # Switch to Trimmer widget
        self.layout.removeWidget(self.terrain_widget)
        self.terrain_widget.hide()
        self.win.show()
        self.trim_values = self.region.getRegion()
        self.updateaudio()
        # Trims signal array with region values
        self.y = self.y[round(self.trim_values[0]):round(self.trim_values[1])]

        # save the new signal values to wav
        librosa.output.write_wav(resource_path("resources/output.wav"),
                                 (self.y * self.maxv).astype(np.int16),
                                 self.sr)
        self.updateplot()

    def updateplot(self):
        # Replot the trimmed wav and update region bounds
        self.duration = round(
            librosa.core.get_duration(y=self.y, sr=self.sr) * self.sr)
        self.p.plot(clear=True)
        self.p.plot(self.y, pen="w")
        self.region = pg.LinearRegionItem(brush=(100, 100, 100, 50),
                                          bounds=(0, self.duration))
        self.p.addItem(self.region, ignoreBounds=True)
        self.region.setRegion([0, self.duration])

    def updateaudio(self):
        self.y, self.sr = librosa.load(resource_path("resources/output.wav"),
                                       sr=44100)

        #------------------Metronome Threading------------------------------

    def metro_thread(self):

        if self.ui.metronome_button.isChecked():
            print('metronome is On')
            self.thread = QThread(
            )  # a new thread to run our background tasks in
            self.worker = Worker(
                self.current_tempo)  # a new worker to perform those tasks
            self.worker.moveToThread(
                self.thread
            )  # move the worker into the thread, do this first before connecting the signals
            self.thread.started.connect(
                self.worker.work
            )  # begin our worker object's loop when the thread starts running
            self.thread.start()

        else:
            print('metronome is Off')
            self.stop_loop()
            self.worker.finished.connect(
                self.loop_finished
            )  # do something in the gui when the worker loop ends
            self.worker.finished.connect(
                self.thread.quit)  # tell the thread it's time to stop running
            self.worker.finished.connect(self.thread.wait)
            self.worker.finished.connect(
                self.worker.deleteLater
            )  # have worker mark itself for deletion
            self.thread.finished.connect(self.thread.deleteLater)

    def stop_loop(self):
        self.worker.working = False

    def loop_finished(self):
        # print('Worker Finished')
        pass

    #---------------------------------------------------------

    #------------------ MIDI ------------------------------

    def tempo_value(self, value):
        self.current_tempo = value

    def convertMidi(self):
        self.ui.convert_midi.setEnabled(False)
        self.thread2 = QThread()
        self.worker2 = ConvertMidi_Worker()
        self.worker2.moveToThread(self.thread2)
        self.thread2.started.connect(self.worker2.work)
        self.thread2.start()
        self.worker2.finished.connect(self.convert_finished)
        self.worker2.finished.connect(self.thread2.quit)
        self.worker2.finished.connect(self.thread2.wait)
        self.worker2.finished.connect(self.worker2.deleteLater)
        self.thread2.finished.connect(self.thread2.deleteLater)

    def convert_finished(self, tempo):
        self.detected_tempo = tempo
        self.ui.tempo_slider.setValue(self.detected_tempo)
        self.ui.convert_midi.clearFocus()
        self.ui.convert_midi.setEnabled(True)
        print('Midi Conversion finished')

    def midiplayer_thread(self):

        if self.ui.midi_play.isChecked() and self.ui.midi_loop.isChecked(
        ) == False:

            self.ui.midi_play.setEnabled(False)
            self.win.hide()
            self.terrain_widget.show()
            self.terrain.animate()
            self.thread3 = QThread()
            self.worker3 = MidiPlayer_Worker(self.current_tempo,
                                             self.drum_current)
            self.worker3.moveToThread(self.thread3)
            self.thread3.started.connect(self.worker3.workonce)

            self.thread3.start()
            self.worker3.finished2.connect(self.midi_loop_finished2)
            self.worker3.finished2.connect(self.thread3.quit)
            self.worker3.finished2.connect(self.thread3.wait)
            self.worker3.finished2.connect(self.worker3.deleteLater)
            self.thread3.finished.connect(self.thread3.deleteLater)

        elif self.ui.midi_play.isChecked() and self.ui.midi_loop.isChecked(
        ) == True:
            self.win.hide()
            self.terrain_widget.show()
            self.start_Midi_Thread()
            self.terrain.animate()

        elif self.ui.midi_play.isChecked() == False:
            self.terrain.stop_animate()
            self.stop_Midi_Thread()

    def start_Midi_Thread(self):
        self.thread3 = QThread()
        self.worker3 = MidiPlayer_Worker(self.current_tempo, self.drum_current)
        self.worker3.moveToThread(self.thread3)
        self.thread3.started.connect(self.worker3.work)
        self.thread3.start()

    def stop_Midi_Thread(self):
        self.worker3.working = False

        self.worker3.stop()
        self.worker3.finished.connect(self.midi_loop_finished)
        self.worker3.finished.connect(self.thread3.quit)
        self.worker3.finished.connect(self.thread3.wait)
        self.worker3.finished.connect(self.worker3.deleteLater)
        self.thread3.finished.connect(self.thread3.deleteLater)
        print('done')

    def midi_loop_finished(self):
        print('Midi loop Finished')

    def midi_loop_finished2(self):
        print('Midi Player Finished')
        self.ui.midi_play.toggle()
        self.ui.midi_play.setEnabled(True)
        self.terrain.stop_animate()

    #---------------------------------------------------------

    #------------------ Recorder & Player ------------------------------

    def start_stop_recording(self):
        if self.ui.start_stop_rec.isChecked():
            self.ui.play_gui.setEnabled(False)
            self.ui.audio_trimmer.setEnabled(False)

            self.win.hide()
            self.terrain_widget.show()

            self.layout.addWidget(self.terrain_widget)
            self.audioRecorder.record()
            self.terrain.update()

            self.terrain.animate()
            print('Recording...')

        else:
            self.ui.play_gui.setEnabled(True)
            self.ui.audio_trimmer.setEnabled(True)

            self.terrain.stop_animate()
            self.audioRecorder.stop()
            self.layout.removeWidget(self.terrain_widget)
            self.terrain_widget.hide()

            self.updateaudio()
            self.win.show()
            self.updateplot()
            print('Stop Recording')

    def play(self):
        if self.ui.play_gui.isChecked():
            self.win.hide()
            self.terrain_widget.show()

            self.player = QSound(resource_path("resources/output.wav"))
            self.terrain.animate()
            self.player.play()
            # if self.player.isFinished():
            # 	self.ui.play_gui.toggle()
            # 	print('done')

        else:
            self.terrain.stop_animate()
            self.player.stop()
            self.player.deleteLater()

    #------------------ Exporting ------------------------------

    def openDirectory_midi(self):
        self.openDirectoryDialog = QtGui.QFileDialog.getExistingDirectory(
            self.MainWindow, "Save Midi File")
        if self.openDirectoryDialog:
            self.saveMidi(self.openDirectoryDialog)
        else:
            pass

    def openDirectory_audio(self):
        self.openDirectoryDialog = QtGui.QFileDialog.getExistingDirectory(
            self.MainWindow, "Save Audio File")
        if self.openDirectoryDialog:
            self.saveAudio(self.openDirectoryDialog)
        else:
            pass

    def saveMidi(self, directory):
        shutil.copy("resources/beatbox.mid", directory)

    def saveAudio(self, directory):
        shutil.copy("resources/output.wav", directory)
コード例 #16
0
    def setupWindow(self):
        """Set up widgets in the main window and the QAudioRecorder instance."""
        # Set up two push buttons (the app's first "screen")
        self.select_path_button = QPushButton("Select Audio Path")
        self.select_path_button.setObjectName("SelectFile")
        self.select_path_button.setFixedWidth(140)
        self.select_path_button.clicked.connect(self.selectAudioPath)

        self.start_button = QPushButton()
        self.start_button.setObjectName("StartButton")
        self.start_button.setEnabled(False)
        self.start_button.setFixedSize(105, 105)
        self.start_button.clicked.connect(self.startRecording)

        # Set up the labels and stop button (the app's second "screen")
        self.recording_label = QLabel("Recording...")
        self.recording_label.setFont(QFont("Helvetica [Cronyx]", 32))
        self.recording_label.setVisible(False)
        self.recording_label.setAlignment(Qt.AlignHCenter)
        self.time_label = QLabel("00:00")
        self.time_label.setFont(QFont("Helvetica [Cronyx]", 18))
        self.time_label.setObjectName("Time")
        self.time_label.setVisible(False)
        self.time_label.setAlignment(Qt.AlignHCenter)

        self.stop_button = QPushButton()
        self.stop_button.setObjectName("StopButton")
        self.stop_button.setFixedSize(65, 65)
        self.stop_button.setVisible(False)
        self.stop_button.clicked.connect(self.stopRecording)

        # Set up the main layout
        self.main_v_box = QVBoxLayout()
        self.main_v_box.setAlignment(Qt.AlignHCenter)
        self.main_v_box.addWidget(self.select_path_button)
        # Force select_path_button to be centered in the window
        self.main_v_box.setAlignment(self.select_path_button, Qt.AlignCenter)
        self.main_v_box.addStretch(3)
        self.main_v_box.addWidget(self.start_button)
        self.main_v_box.setAlignment(self.start_button, Qt.AlignCenter)
        self.main_v_box.addWidget(self.recording_label)
        self.main_v_box.addWidget(self.time_label)
        self.main_v_box.addStretch(3)
        self.main_v_box.addWidget(self.stop_button)
        self.main_v_box.setAlignment(self.stop_button, Qt.AlignCenter)
        self.main_v_box.addStretch(1)

        self.setLayout(self.main_v_box)  # Set the beginning layout

        # Specify audio encoder settings
        audio_settings = QAudioEncoderSettings()
        # Depending upon your platform or the codecs that you have available, you
        # will need to change the codec. For Linux users if you are having issues
        # use "audio/x-vorbis", and then select the .ogg extension when saving
        # the file
        audio_settings.setCodec("audio/wav")
        audio_settings.setQuality(QMultimedia.HighQuality)

        # Create instance of QAudioRecorder for recording audio
        self.audio_recorder = QAudioRecorder()
        # Uncomment to discover possible codecs supported on your platform
        #print(self.audio_recorder.supportedAudioCodecs())
        self.audio_recorder.setEncodingSettings(audio_settings)
        self.audio_recorder.durationChanged.connect(self.displayTime)
コード例 #17
0
    def __init__(self):

        #-------------------Init QT Setup---------------------------

        app = QtWidgets.QApplication(sys.argv)

        self.MainWindow = QtWidgets.QMainWindow()
        self.ui = Ui_MainWindow()

        self.ui.setupUi(self.MainWindow)
        app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())

        #------------------Exporting Setup------------------------------

        self.ui.export_midi.clicked.connect(self.openDirectory_midi)
        self.ui.export_midi.setFocusPolicy(QtCore.Qt.NoFocus)
        self.ui.export_audio.clicked.connect(self.openDirectory_audio)
        self.ui.export_audio.setFocusPolicy(QtCore.Qt.NoFocus)

        #------------------Metronome Setup------------------------------

        self.ui.metronome_button.clicked.connect(self.metro_thread)

        #------------------Recording Setup------------------------------

        self.ui.start_stop_rec.clicked.connect(self.start_stop_recording)
        self.ui.play_gui.clicked.connect(self.play)

        # QAudio setup
        self.settings = QAudioEncoderSettings()
        self.settings.setBitRate(16)
        self.settings.setChannelCount(1)
        self.audioRecorder = QAudioRecorder()
        self.audioRecorder.setEncodingSettings(self.settings)
        self.file_path = os.path.abspath(
            os.path.join(os.path.dirname(__file__),
                         resource_path("resources/output.wav")))

        self.url = QUrl.fromLocalFile(self.file_path)
        self.audioRecorder.setOutputLocation(self.url)

        #------------------Audio Terrain Gui Setup------------------------------

        self.terrain = Terrain()
        self.terrain.update()
        self.terrain_widget = self.terrain.getwidget()
        self.layout = QtGui.QGridLayout()
        self.layout.setContentsMargins(0, 0, 0, 0)
        self.ui.t_widget.setLayout(self.layout)
        self.layout.addWidget(self.terrain_widget, 0, 0, 1, 1)

        #------------------Audio Trimmer Setup------------------------------

        self.ui.audio_trimmer.clicked.connect(self.trim_audio)

        if os.path.isfile("resources/output.wav"):
            self.y, self.sr = librosa.load(
                resource_path("resources/output.wav"), sr=44100)
        else:
            new_wav = AudioSegment.empty()
            new_wav.export("resources/output.wav", format="wav")
            self.y, self.sr = librosa.load(
                resource_path("resources/output.wav"), sr=44100)

        self.duration = round(
            librosa.core.get_duration(y=self.y, sr=self.sr) * self.sr)
        self.maxv = np.iinfo(np.int16).max

        self.win = pg.GraphicsLayoutWidget()
        self.p = self.win.addPlot()

        #removes X & Y Axis and disables mouse movement
        self.p.showAxis('bottom', show=False)
        self.p.showAxis('left', show=False)
        self.p.setMouseEnabled(x=False, y=False)

        self.region = pg.LinearRegionItem(brush=(100, 100, 100, 60),
                                          bounds=(0, self.duration))
        self.region.setRegion([0, self.duration])

        self.p.addItem(self.region, ignoreBounds=True)
        self.p.plot(self.y, pen="w")

        self.layout.addWidget(self.win)
        self.win.hide()

        #------------------Midi Setup------------------------------

        self.ui.convert_midi.clicked.connect(self.convertMidi)
        self.ui.midi_play.clicked.connect(self.midiplayer_thread)
        self.ui.tempo_slider.valueChanged[int].connect(self.tempo_value)
        self.ui.midi_loop.toggle()

        # default bpm is 120
        self.current_tempo = 120
        self.detected_tempo = 120

        #------------------Drum Kit Selector Setup----------------------

        self.ui.drum_kits.clicked.connect(self.select_drumkit)
        self.drum_number = 0
        self.drum_folders = [
            'Drum_Kit_1', 'Drum_Kit_2', 'Drum_Kit_3', 'Drum_Kit_4'
        ]
        self.drum_current = self.drum_folders[self.drum_number]

        #------------------EXEC Window---------------------------------
        self.MainWindow.show()
        sys.exit(app.exec_())