예제 #1
0
 def print(self):
     self.idx_playback.print()
     self.gated_playback.print()
     all_files = []
     if self.settings.random_sounds is not None:
         for file in self.settings.random_sounds:
             fileID = 0
             fp = self.settings.source_path + os.sep + file["filename"]
             af = AudioFile(fp, fileID, 0)
             all_files.append(af)
     print("[bold white]Random Sounds")
     print(AudioFile.file_table(all_files))
     all_files = []
     for k, fid in zip(
         [
             "set_off_sound",
             "rapid_accel_loop",
             "brake_stop_sound",
             "rapid_decel_loop",
         ],
         [0xFB, 0xFC, 0xFE, 0xFD],
     ):
         if self.settings.__dict__[k] is not None:
             for file in self.settings.__dict__[k]:
                 fileID = fid
                 fp = self.settings.source_path + os.sep + file
                 af = AudioFile(fp, fileID, 0)
                 all_files.append(af)
     print("[bold white]Special Files")
     print(AudioFile.file_table(all_files))
예제 #2
0
def shift(sigin, pitch):
    if np.isnan(pitch):
        return sigin
    input_filepath = "./.shift_input.wav"
    output_filepath = "./.shift_output.wav"

    shift_input = AudioFile.gen_default_wav(
        input_filepath,
        overwrite_existing=True,
        mode='w',
        channels=1,
    )
    # Write grain to be shifted to file
    shift_input.write_frames(sigin)
    # Close file
    del shift_input

    cents = 1200. * np.log2(pitch)
    p_shift_args = [
        "sox", input_filepath, output_filepath, "pitch",
        str(cents)
    ]

    p = subprocess.Popen(p_shift_args,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    (output, err) = p.communicate()

    with AudioFile(output_filepath, mode='r') as shift_output:
        # Read result
        result = shift_output.read_grain()
    return result
예제 #3
0
    def __init__(self):

        super(WidgetAudioPlayer, self).__init__()

        # Global Objects
        self.afile = AudioFile()

        # setup
        self.timerrate = 10  # ms
        self.guirate = 3  # xtimerrate

        self.setWindowTitle("MP3-USB Audio Player")

        self.label_title = QLabel("USB CDC MP3 Audio Player")
        self.customize_ui()

        self.wdgAudioStat = WidgetAudioStat()
        self.wdgSelectFile = WidgetSelectFile(self.on_file_selected)
        self.wdgPosition = WidgetPosition()
        self.wdgSerialPort = WidgetSelectSerialPort()
        self.wdgBuffer = WidgetBuffer()
        self.wdgOscLR = WidgetStereoOsc()

        self.wdgChunks = WidgetMP3Frames()

        self.setLayout(self.genlayout())

        self.timer10ms = QtCore.QTimer(self)
        self.timer10ms.timeout.connect(self.timerevent)
        self.timer10ms.setSingleShot(False)
        self.timer10ms.start(10)
예제 #4
0
 def GetKeyFromFile(file):
     musf = AudioFile()
     musf.Open(file)
     buf = musf.ReadAudioStream(100*1024)	# 100KB from audio data
     musf.Close()
     # calculate hashkey
     m = hashlib.md5(); m.update(buf);
     return m.hexdigest()
예제 #5
0
 def print(self):
     print("[bold white]Idle Loop Files")
     print(AudioFile.file_table(self.idle_loops.loops))
     print("[bold white]Accel Loop Files")
     print(AudioFile.file_table(self.accel_loops.loops))
     print("[bold white]Decel Loop Files")
     print(AudioFile.file_table(self.decel_loops.loops))
     if self.startup is not None or self.shutdown is not None:
         print("[bold white]Startup/Shutdown Files")
         print(AudioFile.file_table([self.startup, self.shutdown]))
 def GetKeyFromFile(file):
     musf = AudioFile()
     musf.Open(file)
     buf = musf.ReadAudioStream(100*1024)	# 100KB from audio data
     musf.Close()
     # buffer will be empty for streaming audio
     if not buf:
         return
     # calculate hashkey
     m = hashlib.md5(); m.update(buf);
     return m.hexdigest()
예제 #7
0
    def fetch_from_brick(self, brick):
        self.levels = brick.config.settings.notchCount
        self.level_bounds = [0] * (self.levels - 1)
        for level in range(self.levels - 1):
            self.level_bounds[level] = brick.config.settings.notchBounds[level]

        # find files assigned to loops
        self.startup = None
        self.shutdown = None
        self.idle_loops = None
        self.accel_loops = None
        self.decel_loops = None
        idle_loops = {}
        accel_loops = {}
        decel_loops = {}
        for f in brick.filedir.files:
            idx = (f.attributes & 0x1C) >> 2
            role = f.attributes & 0xF0
            if f.attributes == 0x5C:
                self.startup = AudioFile(f.name,
                                         fileid=0xEF,
                                         attr=0x5C,
                                         virtual=True)
            elif f.attributes == 0x7C:
                self.shutdown = AudioFile(f.name,
                                          fileid=0xF7,
                                          attr=0x7C,
                                          virtual=True)
            elif role == 0x20 or role == 0x30:
                idle_loops[idx] = f.name
            elif role == 0x40 or role == 0x50:
                accel_loops[idx] = f.name
            elif role == 0x60 or role == 0x70:
                decel_loops[idx] = f.name
        if len(idle_loops) > 0:
            group = [idle_loops[x] for x in range(len(idle_loops))]
            self.idle_loops = IdleLoops("", group, virtual=True)
        if len(accel_loops) > 0:
            group = [accel_loops[x] for x in range(len(accel_loops))]
            self.accel_loops = AccelLoops("", group, virtual=True)
        if len(decel_loops) > 0:
            group = [decel_loops[x] for x in range(len(decel_loops))]
            self.decel_loops = DecelLoops("", group, virtual=True)

        # find any configured startup actions
        action = find_startup_action(brick, EVT_SOUND_PLAY_IDX_MOTOR)
        if action is not None:
            self.motor_speed = "current" if action.soundParam1 & 0x04 else "target"
            self.skip_startup = True if action.soundParam2 & 0x08 else False
            self.motor_ch = action.soundParam1 & 0x03
            if not action.soundParam2 & 0x04:
                self.startup = None
예제 #8
0
def main():
    af1 = AudioFile("~/Desktop/GMDLoop1.wav", 0xE0)
    afg1 = AudioFileGui(title="Gate Loop 1",
                        fileid=0xE0,
                        filename="abc123.wav",
                        highlighted=True)
    afg1.set_audiofile(af1)

    afg2 = AudioFileGui(title="Gate Loop 2", fileid=0xD1, filename="")
    layout = [[afg1.get_layout(), afg2.get_layout()]]

    window = sg.Window("Widget Test", layout, finalize=True)
    afiles = [afg1, afg2]
    for f in afiles:
        f.set_graph_el(window)
    x = 0
    while True:  # Event Loop
        event, values = window.read()
        afg1.update(progress=x)
        afg2.update(progress=x)
        x += 0.1
        if event == sg.WIN_CLOSED or event == "Exit":
            break
        for f in afiles:
            f.process_event(event)

    window.close()
예제 #9
0
def shift(sigin, pitch):
    if np.isnan(pitch):
        return sigin
    input_filepath = "./.shift_input.wav"
    output_filepath = "./.shift_output.wav"

    shift_input = AudioFile.gen_default_wav(
        input_filepath,
        overwrite_existing=True,
        mode='w',
        channels=1,
    )
    # Write grain to be shifted to file
    shift_input.write_frames(sigin)
    # Close file
    del shift_input

    cents = 1200. * np.log2(pitch)
    p_shift_args = ["sox", input_filepath, output_filepath, "pitch", str(cents)]

    p = subprocess.Popen(p_shift_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (output, err) = p.communicate()

    with AudioFile(output_filepath, mode='r') as shift_output:
        # Read result
        result = shift_output.read_grain()
    return result
예제 #10
0
    def __init__(self, path, files, fileid_base, attr_base=None, **kwargs):
        self.loops = []
        prefix = None
        virtual = False
        if "prefix" in kwargs:
            prefix = kwargs["prefix"]
        if "virtual" in kwargs:
            virtual = kwargs["virtual"]

        for idx, file in enumerate(files):
            fileid = fileid_base + idx
            if attr_base is not None:
                attr = attr_base + idx * 4
            else:
                attr = 0
            if prefix is not None:
                new_name = prefix + file
            else:
                new_name = None
            if virtual:
                fn = file
            else:
                fn = path + os.sep + file
            loop = AudioFile(fn,
                             fileid=fileid,
                             attr=attr,
                             new_name=new_name,
                             virtual=virtual)
            self.loops.append(loop)
예제 #11
0
	def load_audio(self, filename):
		"""Load wav audio data from filename into class

		Reads audio file into the raw_data attribute. 

		The raw_data persists and remains unmutated for the lifecycle
		of the class.

		The raw_data attribute contains an AudioFile() object, so we are
		able to easily access attributes such as sampling rate, bit depth, etc.
		"""
		logger.info("Loading audio file.")
		audio = AudioFile()
		audio.load(filename)
		self.raw_data = audio
		self.N = self.raw_data.num_samples()
		self.fs = self.raw_data.fs
		self.num_channels = self.raw_data.num_channels()
예제 #12
0
 def process_event(self, event, values, profile=None):
     if event == self.ekey:
         self.disabled = False if self.disabled else True
         if self.disabled and self.audiofile is not None:
             self.audiofile.stop()
         self.update()
     elif event == self.pkey:
         if self.audiofile is not None and not self.disabled:
             self.audiofile.playpause()
     elif event == self.fkey:
         fs = FileOps()
         if fs.verify_dir_not_file(values[self.fkey]):
             return
         self.audiofile = AudioFile(values[self.fkey], self.fileid,
                                    self.attr)
         self.filename = self.audiofile.name
         self.disabled = False
         if profile is not None:
             profile.update_audiofile_with_id(self.audiofile, self.fileid)
         self.update()
예제 #13
0
    def get_audio_entries(self, force=False, thread=None):
        if self.AUDIOS and not force:
            return

        new_audios = []
        for file in os.listdir(self.MUSIC_PATH):
            if file.lower().endswith(".mp3"):
                audio_file = AudioFile(os.path.join(self.MUSIC_PATH, file))
                new_audios.append(Audio(audio_file))
        self.AUDIOS = new_audios

        if thread:
            thread.join()
예제 #14
0
 def set_with_dict(self, d):
     for k, v in d.items():
         if k == "startup":
             fn = d["source"] + os.sep + v
             self.startup = AudioFile(fn, fileid=0xEF, attr=0x5C)
         elif k == "shutdown":
             fn = d["source"] + os.sep + v
             self.shutdown = AudioFile(fn, fileid=0xF7, attr=0x7C)
         elif k == "notch_levels":
             self.levels = v
         elif k == "notch_bounds":
             new_bounds = [0] * 8
             for i, e in enumerate(v):
                 new_bounds[i] = e
             self.level_bounds = new_bounds
         elif k == "notch_loops":
             self.idle_loops = IdleLoops(d["source"], v)
         elif k == "accel_loops":
             self.accel_loops = AccelLoops(d["source"], v)
         elif k == "decel_loops":
             self.decel_loops = DecelLoops(d["source"], v)
         elif k == "skip_startup":
             self.skip_startup = v
         elif k == "motor_channel":
             if str(v).lower() in ["a", "0"]:
                 self.motor_ch = 0
             elif str(v).lower() in ["b", "1"]:
                 self.motor_ch = 1
         elif k == "motor_speed":
             if v.lower() not in ["target", "current"]:
                 raise ValueError(
                     "motor_speed key must either be 'target' or 'current' not %s"
                     % v.lower())
             else:
                 self.motor_speed = v.lower()
     if not "notch_bounds" in d:
         new_bounds = IndexedPlayback.bounds_from_notchcount(self.levels)
         self.level_bounds = new_bounds
예제 #15
0
 def GetKeyFromFile(file):
     musf = AudioFile()
     musf.Open(file)
     ext = file[file.rfind('.'):].lower()
     if ext == '.ogg':
         buf = musf.ReadAudioStream(160 * 1024,
                                    11)  # 160KB excluding header
     elif ext == '.wma':
         buf = musf.ReadAudioStream(160 * 1024,
                                    24)  # 160KB excluding header
     else:
         buf = musf.ReadAudioStream(160 * 1024)  # 160KB from audio data
     musf.Close()
     # calculate hashkey
     m = hashlib.md5()
     m.update(buf)
     return m.hexdigest()
예제 #16
0
 def append(self, path, files, fileid_base=None, attr_base=None, **kwargs):
     prefix = None
     virtual = False
     if fileid_base is not None:
         self.fileid_base = fileid_base
     if "prefix" in kwargs:
         prefix = kwargs["prefix"]
     if "virtual" in kwargs:
         virtual = kwargs["virtual"]
     for idx, file in enumerate(files):
         fileid = self.fileid_base + idx
         vs = file.split()
         if len(vs) > 1:
             vf = vs[0]
             vn = float(vs[1])
         else:
             vf = file
             vn = None
         if attr_base is not None:
             attr = attr_base + idx * 4
         else:
             attr = 0
         if prefix is not None:
             f0 = full_path(path + os.sep + vf)
             p, f1 = split_path(f0)
             name, ext = split_filename(f1)
             new_name = prefix + name + ext
         else:
             new_name = None
         if virtual:
             fn = vf
         else:
             fn = path + os.sep + vf
         loop = AudioFile(
             fn,
             fileid=fileid,
             attr=attr,
             new_name=new_name,
             virtual=virtual,
             norm=vn,
         )
         self.loops.append(loop)
예제 #17
0
 def copy_to_brick(self, brick):
     self.idx_playback.copy_to_brick(brick)
     self.gated_playback.copy_to_brick(brick)
     if self.settings.random_sounds is not None:
         for file in self.settings.random_sounds:
             fileID = brick.filedir.find_available_file_id()
             fp = self.settings.source_path + os.sep + file["filename"]
             af = AudioFile(fp, fileID, 0)
             af.copy_to_brick(brick)
     for k, fid in zip(
         [
             "set_off_sound",
             "rapid_accel_loop",
             "brake_stop_sound",
             "rapid_decel_loop",
         ],
         [0xFB, 0xFC, 0xFE, 0xFD],
     ):
         if self.settings.__dict__[k] is not None:
             for file in self.settings.__dict__[k]:
                 fileID = fid
                 fp = self.settings.source_path + os.sep + file
                 af = AudioFile(fp, fileID, 0)
                 af.copy_to_brick(brick, use_fileid=True)
예제 #18
0
class WidgetAudioPlayer(QWidget):

    updateuicnt = 0

    def __init__(self):

        super(WidgetAudioPlayer, self).__init__()

        # Global Objects
        self.afile = AudioFile()

        # setup
        self.timerrate = 10  # ms
        self.guirate = 3  # xtimerrate

        self.setWindowTitle("MP3-USB Audio Player")

        self.label_title = QLabel("USB CDC MP3 Audio Player")
        self.customize_ui()

        self.wdgAudioStat = WidgetAudioStat()
        self.wdgSelectFile = WidgetSelectFile(self.on_file_selected)
        self.wdgPosition = WidgetPosition()
        self.wdgSerialPort = WidgetSelectSerialPort()
        self.wdgBuffer = WidgetBuffer()
        self.wdgOscLR = WidgetStereoOsc()

        self.wdgChunks = WidgetMP3Frames()

        self.setLayout(self.genlayout())

        self.timer10ms = QtCore.QTimer(self)
        self.timer10ms.timeout.connect(self.timerevent)
        self.timer10ms.setSingleShot(False)
        self.timer10ms.start(10)

    def customize_ui(self):
        self.label_title.setAlignment(Qt.AlignTop | Qt.AlignCenter)
        self.label_title.setFixedHeight(42)
        self.label_title.setFrameShape(QFrame.Box)
        self.label_title.setFrameShadow(QFrame.Raised)
        self.label_title.setFont(QFont('Arial', 14, 200, True))

    def genlayout(self):
        lay = QHBoxLayout()
        lay.addLayout(self.genlayout_main())
        lay.addWidget(self.wdgChunks)
        return lay

    def genlayout_serial_buff(self):
        lay = QHBoxLayout()
        lay.addWidget(self.wdgSerialPort)
        lay.addWidget(self.wdgBuffer)
        return lay

    def genlayout_main(self):

        lay = QVBoxLayout()
        lay.setAlignment(Qt.AlignTop)

        lay.addWidget(self.label_title)
        lay.addWidget(self.wdgAudioStat)
        lay.addWidget(self.wdgSelectFile)
        lay.addWidget(self.wdgPosition)
        lay.addLayout(self.genlayout_serial_buff())
        lay.addWidget(self.wdgOscLR)

        return lay

    def timerevent(self):

        if self.wdgSerialPort.isSerialPortOpen():
            self.ioproc()
        else:
            self.wdgSerialPort.ReopenPort()

    def stat_update(self, inbufval: int, buff: bytes) -> None:
        self.wdgAudioStat.setParam({'bsent': len(buff)})
        self.wdgBuffer.setValue(inbufval)
        self.wdgOscLR.update_osc_data(buff)

    def ioproc(self):

        inbuff = self.wdgSerialPort.ReadSerial()

        if not inbuff:
            return

        bufval = int(inbuff[-1])
        if bufval < 2:
            return

        outbuff = self.afile.read1k()

        self.wdgSerialPort.WriteSerial(outbuff)

        # UI update 1/5 sec
        self.updateuicnt = self.updateuicnt + 1
        if self.updateuicnt < self.guirate:
            return

        self.updateuicnt = 0
        self.stat_update(bufval, outbuff)

    def on_file_selected(self, jcmd: Dict):

        print("File Selected", jcmd)

        filename = jcmd['open-file']

        self.afile.set_file(filename)
        self.wdgChunks.scanChunks(filename)

        self.wdgAudioStat.setParam({'sample-rate': 33000})
예제 #19
0
파일: main.py 프로젝트: bitgeeky/audiomixer
def recordplay():
    recorder = RecordWave()
    recorder.record_to_file('demo.wav')
    a = AudioFile("demo.wav")
    a.play()
예제 #20
0
    def set_with_dict(self, d):
        fid = 1
        self.other_sounds = None
        for k, v in d.items():
            if k == "source":
                self.source_path = v
            elif k in ["accel", "acceleration"]:
                self.acceleration = v
            elif k in ["decel", "deceleration"]:
                self.deceleration = v
            elif k in ["default_volume", "volume"]:
                self.default_volume = v
            elif k in ["rapid_accel_thr"]:
                self.rapid_accel_thr = v
            elif k in ["rapid_decel_thr"]:
                self.rapid_decel_thr = v
            elif k in ["brake_decel_thr"]:
                self.brake_decel_thr = v
            elif k in ["brake_speed_thr"]:
                self.brake_speed_thr = v

            elif k in fileid_dict:
                fk = fileid_dict[k]
                if k in attr_dict:
                    attr = attr_dict[k]
                else:
                    attr = 0
                vs = v.split()
                if len(vs) > 1:
                    vf = vs[0]
                    vn = float(vs[1])
                else:
                    vf = v
                    vn = None
                fn = d["source"] + os.sep + vf
                self.__dict__[k] = AudioFile(fn, fileid=fk, attr=attr, norm=vn)
            elif k in ["random_sounds", "random", "other_sounds"]:
                dd = []
                for vv in v:
                    dk = {}
                    for kk, vk in vv.items():
                        if vk is None:
                            fn = d["source"] + os.sep + kk
                            dk["audiofile"] = AudioFile(fn, fileid=fid, attr=0)
                        else:
                            dk[kk] = vk
                    dd.append(dk)
                    fid += 1
                if self.other_sounds is None:
                    self.other_sounds = dd
                else:
                    self.other_sounds.extend(dd)
            elif k in ["notch_levels", "notch_count"]:
                self.notch_count = v
            elif k == "notch_bounds":
                new_bounds = [0] * 8
                for i, e in enumerate(v):
                    new_bounds[i] = e
                self.notch_bounds = new_bounds
            elif k == "notch_loops":
                self.idle_loops = IdleLoops(d["source"], v)
            elif k == "accel_loops":
                self.accel_loops = AccelLoops(d["source"], v)
            elif k == "decel_loops":
                self.decel_loops = DecelLoops(d["source"], v)
            elif k in [
                "gated_notch1",
                "gated_notch2",
                "gated_notch3",
                "gated_notch4",
                "gated_loops",
            ]:
                if k in ["gated_notch1", "gated_loops"]:
                    base = 0xD0
                    prefix = "L1"
                elif k == "gated_notch2":
                    base = 0xD4
                    prefix = "L2"
                elif k == "gated_notch3":
                    base = 0xD8
                    prefix = "L3"
                elif k == "gated_notch4":
                    base = 0xDC
                    prefix = "L4"
                if self.gated_loops is not None:
                    self.gated_loops.append(
                        d["source"], v, fileid_base=base, prefix=prefix
                    )
                else:
                    self.gated_loops = GatedLoops(
                        d["source"], v, fileid_base=base, prefix=prefix
                    )
            elif k == "skip_startup":
                self.skip_startup = v
            elif k == "vmin":
                self.vmin = int(v)
            elif k == "vmid":
                self.vmid = int(v)
            elif k == "vmax":
                self.vmax = int(v)
            elif k in ["increase_speed", "decrease_speed", "stop", "change_dir"]:
                self.__dict__[k] = v
            elif k == "motor_channel":
                if str(v).lower() in ["a", "0"]:
                    self.motor_ch = 0
                elif str(v).lower() in ["b", "1"]:
                    self.motor_ch = 1
            elif k == "motor_speed":
                if v.lower() not in ["target", "current"]:
                    raise ValueError(
                        "motor_speed key must either be 'target' or 'current' not %s"
                        % v.lower()
                    )
                else:
                    self.motor_speed = v.lower()
        if not "notch_bounds" in d:
            new_bounds = self.bounds_from_notchcount(self.notch_count)
            self.notch_bounds = new_bounds
예제 #21
0
    def synthesize(self, grain_size=None, overlap=None):
        """
        Synthesized output from the match data in the output database to create
        audio in the output database.
        """
        if not grain_size:
            grain_size = self.config.synthesizer["grain_size"]
        if not overlap:
            overlap = self.config.synthesizer["overlap"]
        jobs = [(i, self.output_db.data["match"][i])
                for i in self.output_db.data["match"]]
        # TODO: insert error here if there are no jobs.
        if not jobs:
            raise RuntimeError(
                "There is no match data to synthesize. The match program may need to be run first."
            )

        for job_ind, (name, job) in enumerate(jobs):
            # Generate output file name/path
            filename, extension = os.path.splitext(name)
            output_name = ''.join((filename, '_output', extension))
            output_path = os.path.join(self.output_db.subdirs["audio"],
                                       output_name)
            # Create audio file to save output to.
            output_config = self.config.output_file
            grain_matches = self.output_db.data["match"][name]
            # Get the grain size and overlap used for analysis.
            match_grain_size = grain_matches.attrs["grain_size"]
            match_overlap = grain_matches.attrs["overlap"]

            _grain_size = grain_size
            with AudioFile(output_path,
                           "w",
                           samplerate=output_config["samplerate"],
                           format=output_config["format"],
                           channels=output_config["channels"]) as output:
                hop_size = (grain_size / overlap) * output.samplerate / 1000
                _grain_size *= int(output.samplerate / 1000)
                output_frames = np.zeros(_grain_size * 2 +
                                         (int(hop_size * len(grain_matches))))
                offset = 0
                for target_grain_ind, matches in enumerate(grain_matches):
                    # If there are multiple matches, choose a match at random
                    # from available matches.
                    match_index = np.random.randint(matches.shape[0])
                    match_db_ind, match_grain_ind = matches[match_index]
                    with self.match_db.analysed_audio[
                            match_db_ind] as match_sample:
                        self.logger.info(
                            "Synthesizing grain:\n"
                            "Source sample: {0}\n"
                            "Source grain index: {1}\n"
                            "Target output: {2}\n"
                            "Target grain index: {3} out of {4}".format(
                                match_sample, match_grain_ind, output_name,
                                target_grain_ind, len(grain_matches)))
                        match_sample.generate_grain_times(match_grain_size,
                                                          match_overlap,
                                                          save_times=True)

                        # TODO: Make proper fix for grain index offset of 1
                        try:
                            match_grain = match_sample[match_grain_ind - 1]
                        except:
                            pdb.set_trace()

                        if self.enforce_intensity_bool:
                            # Get the target sample from the database
                            target_sample = self.target_db[job_ind]

                            # Calculate garin times for sample to allow for
                            # indexing.
                            target_sample.generate_grain_times(
                                match_grain_size,
                                match_overlap,
                                save_times=True)

                            match_grain = self.enforce_intensity(
                                match_grain, match_sample, match_grain_ind,
                                target_sample, target_grain_ind)

                        if self.enforce_f0_bool:
                            # Get the target sample from the database
                            target_sample = self.target_db[job_ind]

                            # Calculate grain times for sample to allow for
                            # indexing.
                            target_sample.generate_grain_times(
                                match_grain_size,
                                match_overlap,
                                save_times=True)

                            match_grain = self.enforce_pitch(
                                match_grain, match_sample, match_grain_ind,
                                target_sample, target_grain_ind)

                        # Apply hanning window to grain
                        match_grain *= np.hanning(match_grain.size)
                        try:
                            output_frames[offset:offset +
                                          match_grain.size] += match_grain
                        except:
                            pass
                    offset += hop_size
                # If output normalization is active, normalize output.
                if self.config.synthesizer["normalize"]:
                    output_frames = (output_frames /
                                     np.max(np.abs(output_frames))) * 0.9
                output.write_frames(output_frames)
예제 #22
0
 def __init__(self, path):
     AudioFile.__init__(self, path)
             
     self.instance = (self.extension_to_class(self.path))(self.path)
        chunk_timestamps.append(time_for_chunk)
        srt_idx += 1

    # get the basename of the file so each transript.txt file is unique when generated
    audio_fname = os.path.basename(gcs_uri).strip(".wav")

    # modify transcript to include the timestamp at the start of each chunk of transcription.
    transcript = "\n\n".join([
        f"{x}" + y
        for (x, y) in zip(chunk_timestamps, transcript.split("\n\n"))
    ])  # here we add timestamps to each chunk of speech

    # write the stream of chunk to a txt file once the whole audio is transcribed.
    with open(f"FINAL_TRANSCRIPT_{audio_fname}.txt", "w") as f:
        f.write(transcript)

    print("Transcripted the audio")


if __name__ == "__main__":

    # we would need the same file locally (and in GCS bucket if size > 10mb and > 1 minute) so as to get the duration and transcribe it.
    file_path = r"C:\Users\Dell\Desktop\speech\audiofile.wav"

    audio = AudioFile(file_path)
    audio.stereo_to_mono()
    duration = audio.get_duration()

    # this is the uri of the bucket in GCS
    gcs_uri = "gs://audiofiles_bucket/audiofile.wav"
    transcribe_gcs(gcs_uri, duration)
예제 #24
0
 def print(self):
     for i, group in enumerate(self.grouped_loops):
         if group is not None:
             print("[bold white]Gated Loops Group %d" % (i + 1))
             print(AudioFile.file_table(group.loops))
예제 #25
0
 def play(self):
         self.write("out.wav")
         a = AudioFile("out.wav")
         a.play()
         a.close()
예제 #26
0
class AudioFileGui:
    def __init__(self, **kwargs):
        self.size = (125, 20)
        self.graph_el = None
        self.parent = None
        self.title = ""
        self.fileid = 0xFF
        self.attr = None
        self.filename = ""
        self.disabled = False
        self.highlighted = False
        self.show_progress = False
        self.looped = False
        self.show_attr = False
        self.fileid_color = "#800000"
        self.attr_color = "#000080"
        self.filename_color = "#000000"
        self.progress_color = "#00A0E0"
        self.disabled_color = "#404040"
        self.highlight_color = "#C0FFC0"
        self.fileid_size = (5, 1)
        self.attr_size = (5, 1)
        self.filename_size = (15, 1)
        for k, v in kwargs.items():
            if k in self.__dict__:
                self.__dict__[k] = v
        self.gkey = "0x%02Xbar" % (self.fileid)
        self.pkey = "0x%02Xplay" % (self.fileid)
        self.ekey = "0x%02Xerase" % (self.fileid)
        self.fkey = "0x%02Xfile" % (self.fileid)
        self.bkey = "0x%02Xbrowse" % (self.fileid)
        self.audiofile = None
        if self.attr is None:
            if self.fileid >= 0xE0 and self.fileid <= 0xE7:
                self.attr = 0x20 + (self.fileid & 0x07) * 4
            elif self.fileid >= 0xF0 and self.fileid <= 0xF6:
                self.attr = 0x60 + (self.fileid & 0x07) * 4
            elif self.fileid >= 0xE8 and self.fileid <= 0xEE:
                self.attr = 0x40 + (self.fileid & 0x07) * 4
            else:
                self.attr = 0

    def set_graph_el(self, window):
        self.graph_el = window[self.gkey]

    def set_audiofile(self, audiofile):
        self.audiofile = audiofile
        self.audiofile.looped = self.looped
        self.fileid = audiofile.fileid
        self.attr = audiofile.attr
        self.filename = audiofile.filename

    def is_valid(self):
        if self.fileid == 0xFF:
            return False
        if self.filename == "":
            return False
        if self.disabled:
            return False
        return True

    def clear(self):
        self.attr = 0
        self.filename = ""
        self.audiofile = None

    def bg_color(self, base_color=False):
        if self.fileid in [0xEF, 0xF7, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE]:
            bg = "#80E0C0"
        elif self.fileid in range(0xD0, 0xE0):
            bg = "#E0B0C0"
        else:
            bg = "#FFFFFF"
        if base_color:
            return bg
        if self.audiofile is not None:
            if self.audiofile.playback_active():
                bg = "#60C0FF"
        if self.highlighted:
            bg = "#005030"
        if self.filename == "" or self.fileid == 0xFF or self.disabled:
            bg = self.disabled_color
        return bg

    def get_layout(self):
        layout = [
            [
                sg.Text(
                    "0x%02X" % (self.fileid),
                    size=self.fileid_size,
                    background_color=self.bg_color(base_color=True),
                    text_color=self.fileid_color,
                    border_width=0,
                    font="Any 10",
                    visible=True,
                ),
                sg.Text(
                    "0x%02X" % (self.attr),
                    size=self.attr_size,
                    background_color="#FFFFFF",
                    text_color=self.attr_color,
                    border_width=0,
                    font="Any 10",
                    visible=True
                    if self.attr > 0 and self.show_attr else False,
                ),
                sg.Button("✕",
                          font="Any 10",
                          key=self.ekey,
                          enable_events=True),
                sg.Button("▶︎",
                          font="Any 10",
                          key=self.pkey,
                          enable_events=True),
                sg.Input(visible=False, enable_events=True, key=self.fkey),
                sg.FileBrowse("...", font="Any 10"),
            ],
            [sg.Graph(self.size, (0, 0), self.size, key=self.gkey)],
        ]
        return [sg.Frame(self.title, layout)]

    def set_notch_count(self, count):
        if self.fileid >= 0xE0 and self.fileid <= 0xE7:
            idx = self.fileid - 0xE0
        elif self.fileid >= 0xF0 and self.fileid <= 0xF6:
            idx = self.fileid - 0xF0 + 1
        elif self.fileid >= 0xE8 and self.fileid <= 0xEE:
            idx = self.fileid - 0xE8 + 1
        else:
            return
        if idx >= count:
            self.disabled = True
        else:
            self.disabled = False

    def process_event(self, event, values, profile=None):
        if event == self.ekey:
            self.disabled = False if self.disabled else True
            if self.disabled and self.audiofile is not None:
                self.audiofile.stop()
            self.update()
        elif event == self.pkey:
            if self.audiofile is not None and not self.disabled:
                self.audiofile.playpause()
        elif event == self.fkey:
            fs = FileOps()
            if fs.verify_dir_not_file(values[self.fkey]):
                return
            self.audiofile = AudioFile(values[self.fkey], self.fileid,
                                       self.attr)
            self.filename = self.audiofile.name
            self.disabled = False
            if profile is not None:
                profile.update_audiofile_with_id(self.audiofile, self.fileid)
            self.update()

    def copy_to_brick(self, brick):
        if not self.is_valid():
            return
        self.show_progress = True
        if self.audiofile.is_on_brick(brick):
            if self.audiofile.same_crc_as_on_brick(brick):
                self.update(progress=1.0)
                return
        nBytes = os.path.getsize(self.audiofile.exportpath)
        if nBytes > 0:
            msg = [PFX_CMD_FILE_OPEN]
            msg.append(self.fileid)
            msg.append(0x06)  # CREATE | WRITE mode
            msg.extend(uint32_to_bytes(nBytes))
            name = os.path.basename(self.audiofile.exportpath)
            nd = bytes(name, "utf-8")
            for b in nd:
                msg.append(b)
            for i in range(32 - len(nd)):
                msg.append(0)
            res = usb_transaction(brick.dev, msg)
            if not res:
                return
            if fs_error_check(res[1]):
                return
            f = open(self.audiofile.exportpath, "rb")
            nCount = 0
            err = False
            interval = 0
            while (nCount < nBytes) and not err:
                buf = f.read(61)
                nRead = len(buf)
                nCount += nRead
                if nRead > 0:
                    msg = [PFX_CMD_FILE_WRITE]
                    msg.append(self.fileid)
                    msg.append(nRead)
                    for b in buf:
                        msg.append(b)
                    res = usb_transaction(brick.dev, msg)
                    err = fs_error_check(res[1])
                if (interval % 256) == 0 or abs(nBytes - nCount) < 61:
                    sg.one_line_progress_meter(
                        "Copying File to PFx Brick",
                        nCount,
                        nBytes,
                        "Copying %s..." % (self.filename),
                        orientation="horizontal",
                        size=(50, 10),
                    )
                interval += 1
            self.update(progress=1.0)
            f.close()
            msg = [PFX_CMD_FILE_CLOSE]
            msg.append(self.fileid)
            res = usb_transaction(brick.dev, msg)
            fs_error_check(res[1])

    def update(self, progress=None):
        if self.disabled:
            self.show_progress = False
        sp = (progress is not None and not self.filename == ""
              and not self.disabled and not self.highlighted)
        if sp or self.show_progress:
            if progress is None:
                width = 0
            else:
                width = progress * self.size[0]
            self.graph_el.draw_rectangle(
                (1, self.size[1] - 1),
                (width, 1),
                line_color=self.progress_color,
                fill_color=self.progress_color,
            )
        else:
            self.graph_el.draw_rectangle(
                (0, self.size[1]),
                (self.size[0], 0),
                line_color="black",
                fill_color=self.bg_color(),
            )
        tc = self.highlight_color if self.highlighted else self.filename_color
        tc = tc if not self.disabled else self.disabled_color
        self.graph_el.draw_text(self.filename,
                                (self.size[0] / 2, self.size[1] / 2),
                                color=tc,
                                font="Any 12")