def read_sound(fp):
    """
    create a normalized float array and datarate from any audo file
    """
    if fp.endswith('mp3'):
        try:
            oname = 'temp.wav'
            #cmd = 'lame --decode "{0}" {1}'.format( fp ,oname )
            result = subprocess.call(['lame', '--decode', fp, oname])
            assert(result is 0)
            samplerate, data = wav.read(oname)
        except:
            print "couldn't run lame"
            try:
                import moviepy.editor as mpy
                aud_clip = mpy.AudioFileClip(fp)
                samplerate = aud_clip.fps
                data = aud_clip.to_soundarray()
            except:
                print "moviepy not installed?"
    if fp.endswith('aif'):
        #sf = aifc.open(fp)
        oname = fp
        sf = Sndfile(fp, 'r')
        sf.seek(0)
        data = sf.read_frames(sf.nframes)
        samplerate = sf.samplerate
    if fp.endswith('wav'):
        samplerate, data = wav.read(fp)

    if len(data.shape)>1: data = data[:,0]
    data = data.astype('float64')
    data /= data.max()
    return data, samplerate
Exemple #2
0
 def test_bigframes(self):
     """ Try to seek really far."""
     rawname = join(TEST_DATA_DIR, 'test.wav')
     a = Sndfile(rawname, 'r')
     try:
         try:
             a.seek(2**60)
             raise Exception, \
                   "Seek really succeded ! This should not happen"
         except IOError, e:
             pass
     finally:
         a.close()
 def test_bigframes(self):
     """ Try to seek really far."""
     rawname = join(TEST_DATA_DIR, 'test.wav')
     a = Sndfile(rawname, 'r')
     try:
         try:
             a.seek(2 ** 60)
             raise Exception, \
                   "Seek really succeded ! This should not happen"
         except IOError, e:
             pass
     finally:
         a.close()
Exemple #4
0
def load_soundfile(inwavpath, startpossecs, maxdursecs=None):
    """Loads audio data, optionally limiting to a specified start position and duration.
    Must be SINGLE-CHANNEL and matching our desired sample-rate."""
    framelen = 4096
    hopspls = framelen
    unhopspls = framelen - hopspls
    if (framelen % wavdownsample) != 0:
        raise ValueError("framelen needs to be a multiple of wavdownsample: %i, %i" % (
            framelen, wavdownsample))
    if (hopspls % wavdownsample) != 0:
        raise ValueError("hopspls  needs to be a multiple of wavdownsample: %i, %i" % (
            hopspls, wavdownsample))
    if maxdursecs == None:
        maxdursecs = 9999
    sf = Sndfile(inwavpath, "r")
    splsread = 0
    framesread = 0
    if sf.channels != 1:
        raise ValueError(
            "Sound file %s has multiple channels (%i) - mono required." % (inwavpath, sf.channels))
    timemax_spls = int(maxdursecs * sf.samplerate)
    if sf.samplerate != (srate * wavdownsample):
        raise ValueError(
            "Sample rate mismatch: we expect %g, file has %g" % (srate, sf.samplerate))
    if startpossecs > 0:
        # note: returns IOError if beyond the end
        sf.seek(startpossecs * sf.samplerate)
    audiodata = np.array([], dtype=np.float32)
    while(True):
        try:
            if splsread == 0:
                chunk = sf.read_frames(framelen)[::wavdownsample]
                splsread += framelen
            else:
                chunk = np.hstack(
                    (chunk[:unhopspls], sf.read_frames(hopspls)[::wavdownsample]))
                splsread += hopspls
            framesread += 1
            if framesread % 25000 == 0:
                print("Read %i frames" % framesread)
            if len(chunk) != (framelen / wavdownsample):
                print("Not read sufficient samples - returning")
                break
            chunk = np.array(chunk, dtype=np.float32)
            audiodata = np.hstack((audiodata, chunk))
            if splsread >= timemax_spls:
                break
        except RuntimeError:
            break
    sf.close()
    return audiodata
    def test_rw(self):
        """Test read/write pointers for seek."""
        ofilename = join(TEST_DATA_DIR, 'test.wav')
        rfd, fd, cfilename   = open_tmp_file('rwseektest.wav')
        try:
            ref = Sndfile(ofilename, 'r')
            test = Sndfile(fd, 'rw', format=ref.format,
                           channels=ref.channels, samplerate=ref.samplerate)
            n = 1024

            rbuff = ref.read_frames(n, dtype = np.int16)
            test.write_frames(rbuff)
            tbuff = test.read_frames(n, dtype = np.int16)

            assert_array_equal(rbuff, tbuff)

            # Test seeking both read and write pointers
            test.seek(0, 0)
            test.write_frames(rbuff)
            tbuff = test.read_frames(n, dtype = np.int16)
            assert_array_equal(rbuff, tbuff)

            # Test seeking only read pointer
            rbuff1 = rbuff.copy()
            rbuff2 = rbuff1 * 2 + 1
            rbuff2.clip(-30000, 30000)
            test.seek(0, 0, 'r')
            test.write_frames(rbuff2)
            tbuff1 = test.read_frames(n, dtype = np.int16)
            try:
                tbuff2 = test.read_frames(n, dtype = np.int16)
            except IOError, e:
                msg = "write pointer was updated in read seek !"
                msg += "\n(msg is %s)" % e
                raise AssertionError(msg)

            assert_array_equal(rbuff1, tbuff1)
            assert_array_equal(rbuff2, tbuff2)
            if np.all(rbuff2 == tbuff1):
                raise AssertionError("write pointer was updated"\
                        " in read seek !")

            # Test seeking only write pointer
            rbuff3 = rbuff1 * 2 - 1
            rbuff3.clip(-30000, 30000)
            test.seek(0, 0, 'rw')
            test.seek(n, 0, 'w')
            test.write_frames(rbuff3)
            tbuff1 = test.read_frames(n, np.int16)
            try:
                assert_array_equal(tbuff1, rbuff1)
            except AssertionError:
                raise AssertionError("read pointer was updated in write seek !")

            try:
                tbuff3 = test.read_frames(n, np.int16)
            except IOError, e:
                msg = "read pointer was updated in write seek !"
                msg += "\n(msg is %s)" % e
                raise AssertionError(msg)
    def test_float_frames(self):
        """ Check nframes can be a float"""
        rfd, fd, cfilename   = open_tmp_file('pysndfiletest.wav')
        try:
            # Open the file for writing
            format = Format('wav', 'pcm16')
            a = Sndfile(fd, 'rw', format, channels=1, samplerate=22050)
            tmp = np.random.random_integers(-100, 100, 1000)
            tmp = tmp.astype(np.short)
            a.write_frames(tmp)
            a.seek(0)
            a.sync()
            ctmp = a.read_frames(1e2, dtype=np.short)
            a.close()

        finally:
            close_tmp_file(rfd, cfilename)
Exemple #7
0
    def test_float_frames(self):
        """ Check nframes can be a float"""
        rfd, fd, cfilename = open_tmp_file('pysndfiletest.wav')
        try:
            # Open the file for writing
            format = Format('wav', 'pcm16')
            a = Sndfile(fd, 'rw', format, channels=1, samplerate=22050)
            tmp = np.random.random_integers(-100, 100, 1000)
            tmp = tmp.astype(np.short)
            a.write_frames(tmp)
            a.seek(0)
            a.sync()
            ctmp = a.read_frames(1e2, dtype=np.short)
            a.close()

        finally:
            close_tmp_file(rfd, cfilename)
    def file_to_features(self, wavpath):
        "Reads through a mono WAV file, converting each frame to the required features. Returns a 2D array."
        if verbose:
            self.count = self.count + 1
            print("Reading %s :" % wavpath)
#print self.count
        if not os.path.isfile(wavpath):
            raise ValueError("path %s not found" % wavpath)
        sf = Sndfile(wavpath, "r")
        #if (sf.channels != 1) and verbose: print(" Sound file has multiple channels (%i) - channels will be mixed to mono." % sf.channels)
        if sf.samplerate != fs:
            raise ValueError("wanted sample rate %g - got %g." %
                             (fs, sf.samplerate))
        window = np.hamming(framelen / 2)  #check here
        features = []
        while (True):
            try:
                chunk = sf.read_frames(
                    framelen / 2, dtype=np.float32
                )  #read each window sized value from the audio
                sf.seek(-framelen / 4,
                        1)  #take the current pointer backward for overlap
                if len(chunk) != framelen / 2:
                    print("Not read sufficient samples - returning")
                    break
                if sf.channels != 1:
                    chunk = np.mean(chunk, 1)  # mixdown
                framespectrum = np.fft.fft(
                    window * chunk, framelen / 2
                )  # first the window type is implemented then the padding is done here
                magspec = abs(framespectrum[:framelen / 2])

                # do the frequency warping and MFCC computation
                melSpectrum = self.mfccMaker.warpSpectrum(magspec)
                melCepstrum = self.mfccMaker.getMFCCs(melSpectrum, cn=True)
                melCepstrum = melCepstrum[1:]  # exclude zeroth coefficient
                melCepstrum = melCepstrum[:13]  # limit to lower MFCCs

                framefeatures = melCepstrum

                features.append(framefeatures)
            except RuntimeError:
                break
        sf.close()
        return np.array(features)
    def test_simple(self):
        ofilename = join(TEST_DATA_DIR, 'test.wav')
        # Open the test file for reading
        a = Sndfile(ofilename, 'r')
        nframes = a.nframes

        buffsize = 1024
        buffsize = min(nframes, buffsize)

        # First, read some frames, go back, and compare buffers
        buff = a.read_frames(buffsize)
        a.seek(0)
        buff2 = a.read_frames(buffsize)
        assert_array_equal(buff, buff2)

        a.close()

        # Now, read some frames, go back, and compare buffers
        # (check whence == 1 == SEEK_CUR)
        a = Sndfile(ofilename, 'r')
        a.read_frames(buffsize)
        buff = a.read_frames(buffsize)
        a.seek(-buffsize, 1)
        buff2 = a.read_frames(buffsize)
        assert_array_equal(buff, buff2)

        a.close()

        # Now, read some frames, go back, and compare buffers
        # (check whence == 2 == SEEK_END)
        a = Sndfile(ofilename, 'r')
        buff = a.read_frames(nframes)
        a.seek(-buffsize, 2)
        buff2 = a.read_frames(buffsize)
        assert_array_equal(buff[-buffsize:], buff2)
Exemple #10
0
    def test_simple(self):
        ofilename = join(TEST_DATA_DIR, 'test.wav')
        # Open the test file for reading
        a = Sndfile(ofilename, 'r')
        nframes = a.nframes

        buffsize = 1024
        buffsize = min(nframes, buffsize)

        # First, read some frames, go back, and compare buffers
        buff = a.read_frames(buffsize)
        a.seek(0)
        buff2 = a.read_frames(buffsize)
        assert_array_equal(buff, buff2)

        a.close()

        # Now, read some frames, go back, and compare buffers
        # (check whence == 1 == SEEK_CUR)
        a = Sndfile(ofilename, 'r')
        a.read_frames(buffsize)
        buff = a.read_frames(buffsize)
        a.seek(-buffsize, 1)
        buff2 = a.read_frames(buffsize)
        assert_array_equal(buff, buff2)

        a.close()

        # Now, read some frames, go back, and compare buffers
        # (check whence == 2 == SEEK_END)
        a = Sndfile(ofilename, 'r')
        buff = a.read_frames(nframes)
        a.seek(-buffsize, 2)
        buff2 = a.read_frames(buffsize)
        assert_array_equal(buff[-buffsize:], buff2)
Exemple #11
0
class Track(object):
    """Represents a wrapped .wav file."""
    def __init__(self, fn, name="No name", labels=None, labels_in_file=False):
        """Create a Track object

        :param str. fn: Path to audio file (wav preferred, mp3 ok)
        :param str. name: Name of track
        """
        self.filename = fn
        self.name = name

        (base, extension) = os.path.splitext(self.filename)
        if extension == ".mp3":
            try:
                print "Creating wav from {}".format(self.filename)
                new_fn = base + '.wav'
                subprocess.check_output("lame --decode \"{}\" \"{}\"".format(
                    self.filename, new_fn),
                                        shell=True)
                self.filename = new_fn
            except:
                print "Could not create wav from mp3"
                raise

        self.sound = Sndfile(self.filename, 'r')
        self.current_frame = 0
        self.channels = self.sound.channels

        if labels is not None and labels_in_file:
            raise Exception(
                "Must only define one of labels and labels_in_file")
        if labels_in_file and not LIBXMP:
            raise Exception(
                "Cannot use labels_in_file without python-xmp-toolkit")
        if labels_in_file and LIBXMP:
            self.labels = self._extract_labels(fn)
        else:
            self.labels = labels

    def read_frames(self, n, channels=None):
        """Read ``n`` frames from the track, starting
        with the current frame

        :param integer n: Number of frames to read
        :param integer channels: Number of channels to return (default
            is number of channels in track)
        :returns: Next ``n`` frames from the track, starting with ``current_frame``
        :rtype: numpy array
        """
        if channels is None:
            channels = self.channels

        if channels == 1:
            out = np.zeros(n)
        elif channels == 2:
            out = np.zeros((n, 2))
        else:
            print "Input needs to be 1 or 2 channels"
            return
        if n > self.remaining_frames():
            print "Trying to retrieve too many frames!"
            print "Asked for", n
            n = self.remaining_frames()
            print "Returning", n

        if self.channels == 1 and channels == 1:
            out = self.sound.read_frames(n)
        elif self.channels == 1 and channels == 2:
            frames = self.sound.read_frames(n)
            out = np.vstack((frames.copy(), frames.copy())).T
        elif self.channels == 2 and channels == 1:
            frames = self.sound.read_frames(n)
            out = np.mean(frames, axis=1)
        elif self.channels == 2 and channels == 2:
            out[:n, :] = self.sound.read_frames(n)

        self.current_frame += n

        return out

    @property
    def current_frame(self):
        """Get and set the current frame of the track"""
        return self._current_frame

    @current_frame.setter
    def current_frame(self, n):
        """Sets current frame to ``n``

        :param integer n: Frame to set to ``current_frame``
        """
        self.sound.seek(n)
        self._current_frame = n

    def reset(self):
        """Sets current frame to 0
        """
        self.current_frame = 0

    def all_as_mono(self):
        """Get the entire track as 1 combined channel

        :returns: Track frames as 1 combined track
        :rtype: 1d numpy array
        """
        return self.range_as_mono(0, self.duration)

    def range_as_mono(self, start_sample, end_sample):
        """Get a range of frames as 1 combined channel

        :param integer start_sample: First frame in range
        :param integer end_sample: Last frame in range (exclusive)
        :returns: Track frames in range as 1 combined channel
        :rtype: 1d numpy array of length ``end_sample - start_sample``
        """
        tmp_current = self.current_frame
        self.current_frame = start_sample
        tmp_frames = self.read_frames(end_sample - start_sample)
        if self.channels == 2:
            frames = np.mean(tmp_frames, axis=1)
        elif self.channels == 1:
            frames = tmp_frames
        else:
            raise IOError("Input audio must have either 1 or 2 channels")
        self.current_frame = tmp_current
        return frames

    @property
    def samplerate(self):
        """Get the sample rate of the track"""
        return self.sound.samplerate

    def remaining_frames(self):
        """Get the number of frames remaining in the track"""
        return self.sound.nframes - self.current_frame

    @property
    def duration(self):
        """Get the duration of total frames in the track"""
        return self.sound.nframes

    @property
    def duration_in_seconds(self):
        """Get the duration of the track in seconds"""
        "Should not set track length"
        return self.duration / float(self.samplerate)

    def loudest_time(self, start=0, duration=0):
        """Find the loudest time in the window given by start and duration
        Returns frame number in context of entire track, not just the window.

        :param integer start: Start frame
        :param integer duration: Number of frames to consider from start
        :returns: Frame number of loudest frame
        :rtype: integer
        """
        if duration == 0:
            duration = self.sound.nframes
        self.current_frame = start
        arr = self.read_frames(duration)
        # get the frame of the maximum amplitude
        # different names for the same thing...
        # max_amp_sample = a.argmax(axis=0)[a.max(axis=0).argmax()]
        max_amp_sample = int(np.floor(arr.argmax() / 2)) + start
        return max_amp_sample

    def refine_cut(self, cut_point, window_size=1):
        return cut_point

    def zero_crossing_before(self, n):
        """Find nearest zero crossing in waveform before frame ``n``"""
        n_in_samples = int(n * self.samplerate)

        search_start = n_in_samples - self.samplerate
        if search_start < 0:
            search_start = 0

        frame = zero_crossing_last(
            self.range_as_mono(search_start, n_in_samples)) + search_start

        return frame / float(self.samplerate)

    def zero_crossing_after(self, n):
        """Find nearest zero crossing in waveform after frame ``n``"""
        n_in_samples = int(n * self.samplerate)
        search_end = n_in_samples + self.samplerate
        if search_end > self.duration:
            search_end = self.duration

        frame = zero_crossing_first(
            self.range_as_mono(n_in_samples, search_end)) + n_in_samples

        return frame / float(self.samplerate)

    @property
    def labels(self):
        return self._labels

    @labels.setter
    def labels(self, labels):
        if labels is None:
            self._labels = None
        else:
            self._labels = sorted(labels, key=lambda x: x.time)

    def label(self, t):
        """Get the label of the song at a given time in seconds
        """
        if self.labels is None:
            return None
        prev_label = None
        for l in self.labels:
            if l.time > t:
                break
            prev_label = l
        if prev_label is None:
            return None
        return prev_label.name

    def _extract_labels(self, filename):
        if not LIBXMP:
            return None

        xmp = libxmp.utils.file_to_dict(filename)
        meta = libxmp.XMPMeta()
        ns = libxmp.consts.XMP_NS_DM
        p = meta.get_prefix_for_namespace(ns)

        #track_re = re.compile("^" + p + r"Tracks\[(\d+)\]$")
        #n_tracks = 0
        cp_track = None
        new_xmp = {}
        for prop in xmp[ns]:
            new_xmp[prop[0]] = prop[1:]

        # find the cuepoint markers track
        name_re = re.compile("^" + p + r"Tracks\[(\d+)\]/" + p + "trackName$")
        for prop, val in new_xmp.iteritems():
            match = name_re.match(prop)
            if match:
                if val[0] == "CuePoint Markers":
                    cp_track = match.group(1)

        # get all the markers from it
        cp_path = re.compile(r"^%sTracks\[%s\]/%smarkers\[(\d+)\]$" %
                             (p, cp_track, p))
        markers = []
        #sr = float(new_xmp["%sTracks[%s]/%sframeRate" %
        #           (p, cp_track, p)][0].replace('f', ''))
        sr = float(self.samplerate)

        for prop, val in new_xmp.iteritems():
            match = cp_path.match(prop)
            if match:
                markers.append(
                    Label(new_xmp[prop + '/' + p + 'name'][0],
                          float(new_xmp[prop + '/' + p + 'startTime'][0]) /
                          sr))

        if len(markers) is 0:
            return None
        return markers
Exemple #12
0
#frame step between each fft
fourier_step = np.multiply(np.reciprocal(fouriers_per_second), f.samplerate)

#print "samplerate:",f.samplerate
#print "step:",fourier_step
#print "#:", np.true_divide(f.nframes,fourier_step)
#print "window_size:",fourier_window
#print "n frames:",f.nframes
#
one_channel = 1 if f.channels == 1 else 0

i=0
spectrum_data = []
for window in range(int(np.floor_divide(f.nframes, fourier_step))):
    f.seek(i)
    if one_channel:
        chunk = f.read_frames(fourier_window)

    else:

        #average the channels
        chunk = []
        for frame in f.read_frames(fourier_window):
            avg = np.true_divide(np.sum(frame), fourier_window)
            chunk.append(avg)
    
    #fft the samples
    spectrum_data.append(fft(chunk))
    i+=fourier_step
Exemple #13
0
    def test_rw(self):
        """Test read/write pointers for seek."""
        ofilename = join(TEST_DATA_DIR, 'test.wav')
        rfd, fd, cfilename = open_tmp_file('rwseektest.wav')
        try:
            ref = Sndfile(ofilename, 'r')
            test = Sndfile(fd,
                           'rw',
                           format=ref.format,
                           channels=ref.channels,
                           samplerate=ref.samplerate)
            n = 1024

            rbuff = ref.read_frames(n, dtype=np.int16)
            test.write_frames(rbuff)
            tbuff = test.read_frames(n, dtype=np.int16)

            assert_array_equal(rbuff, tbuff)

            # Test seeking both read and write pointers
            test.seek(0, 0)
            test.write_frames(rbuff)
            tbuff = test.read_frames(n, dtype=np.int16)
            assert_array_equal(rbuff, tbuff)

            # Test seeking only read pointer
            rbuff1 = rbuff.copy()
            rbuff2 = rbuff1 * 2 + 1
            rbuff2.clip(-30000, 30000)
            test.seek(0, 0, 'r')
            test.write_frames(rbuff2)
            tbuff1 = test.read_frames(n, dtype=np.int16)
            try:
                tbuff2 = test.read_frames(n, dtype=np.int16)
            except IOError, e:
                msg = "write pointer was updated in read seek !"
                msg += "\n(msg is %s)" % e
                raise AssertionError(msg)

            assert_array_equal(rbuff1, tbuff1)
            assert_array_equal(rbuff2, tbuff2)
            if np.all(rbuff2 == tbuff1):
                raise AssertionError("write pointer was updated"\
                        " in read seek !")

            # Test seeking only write pointer
            rbuff3 = rbuff1 * 2 - 1
            rbuff3.clip(-30000, 30000)
            test.seek(0, 0, 'rw')
            test.seek(n, 0, 'w')
            test.write_frames(rbuff3)
            tbuff1 = test.read_frames(n, np.int16)
            try:
                assert_array_equal(tbuff1, rbuff1)
            except AssertionError:
                raise AssertionError(
                    "read pointer was updated in write seek !")

            try:
                tbuff3 = test.read_frames(n, np.int16)
            except IOError, e:
                msg = "read pointer was updated in write seek !"
                msg += "\n(msg is %s)" % e
                raise AssertionError(msg)
    def execute(self, index_daw, index_ref, q_action=None):
        """
        Execute (wavefile first_wave_file, wavefile second_wave_file, directory d, QAction qa)
        The heart of interstitial - performs a null test on two wav files and returns the first difference
        """

        # initialize useful variables

        values = ''
        file_count = 0

        test_done_for_files = []
        targeted_done = []

        # Ensures That We Have Legitimate Directories To Walk Down
        # And Populates The List Of Files To Test

        if not path.isdir(path.abspath(self.getDawDirsCore(index_daw).getCoreDawText())) or not path.isdir(path.abspath(self.getRefDirsCore(index_ref).getCoreRefText())):
            print self.Interstitial.messages['illegalPaths']
            return

        self.daw_directories = []
        self.ref_directories = []

        self.daw_directories = self.populate(self.getDawDirsCore(index_daw).getCoreDawText())
        print str(len(self.daw_directories)) + self.Interstitial.messages['WAV_found'] + path.abspath(self.getDawDirsCore(index_daw).getCoreDawText())

        self.ref_directories = self.populate(self.getRefDirsCore(index_ref).getCoreRefText())
        print str(len(self.ref_directories)) + self.Interstitial.messages['WAV_found'] + path.abspath(self.getRefDirsCore(index_ref).getCoreRefText())

        try:
            q_action.processEvents()
        except:
            pass


        self.unmatched_flag = False
        for index in xrange(len(self.daw_directories)):
            self.all_daw_files.append(self.daw_directories[index])

        for index in xrange(len(self.ref_directories)):
            self.all_ref_files.append(self.ref_directories[index])

        # Process Each File In The Tester Array
        for index in xrange(len(self.daw_directories)):
            found = False
            if self.daw_directories[index] in self.scanned_daw_files:
                continue


            for e in xrange(len(self.ref_directories)):
                if self.ref_directories[e] in self.scanned_ref_files:
                    continue

                try:
                    q_action.processEvents()
                except:
                    pass

                # If We Haven't Already Processed This File, Process It

                if self.ref_directories[e] not in targeted_done:

                    # find the offset and align the waveforms
                    toff = self.offs(self.daw_directories[index], self.ref_directories[e])

                    try:
                        tester_file_obj = Sndfile(self.daw_directories[index], 'r')
                    except:
                        print('Corrupted File : '+ self.daw_directories[index])
                        return
                        pass

                    try:
                        target_file_obj = Sndfile(self.ref_directories[e], 'r')
                    except:
                        print('Corrupted File : ' +self. ref_directories[e])
                        return
                        pass

                    if toff > 0:
                        tester_file_obj.seek(toff)
                    else:
                        target_file_obj.seek(fabs(toff))

                    # Read The First 1000 Samples Of Each File
                    # If Each Sample Is Within 6dB Of The Other, We Have A Match And Can Begin Processing
                    numpy_matrix_of_track1 = self.mono(tester_file_obj.read_frames(1000))
                    numpy_matrix_of_track2 = self.mono(target_file_obj.read_frames(1000))

                    if np.array_equal(numpy_matrix_of_track1, numpy_matrix_of_track2):
                        print('')
                        print "MATCH: " + self.daw_directories[index] + " matches " + self.ref_directories[e]

                        try:
                            q_action.processEvents()
                        except:
                            pass

                        # mark files as done
                        test_done_for_files.append(self.daw_directories[index])
                        targeted_done.append(self.ref_directories[e])

                        # we can't read the entire file into RAM at once
                        # so instead we're breaking it into one-second parts
                        l = min((tester_file_obj.nframes - toff), (target_file_obj.nframes - toff)) / tester_file_obj.samplerate
                        for n in xrange(0, l, 1):
                            errs = 0
                            try:
                                # drop all but the first channel
                                track_one_response = self.mono(tester_file_obj.read_frames(tester_file_obj.samplerate))
                                track_two_response = self.mono(target_file_obj.read_frames(target_file_obj.samplerate))

                                # are these arrays equivalent? if not, there's an error
                                if not np.array_equal(track_one_response, track_two_response):
                                    file_count += 1
                                    # where's the error?
                                    # we find it by comparing sample by sample across this second of audio
                                    for m in xrange(len(track_one_response)):
                                        if not np.array_equal(track_one_response[m], track_two_response[m]):

                                            # We found it! print a message and we're done with these files
                                            errs = (n * tester_file_obj.samplerate) + m + 1000
                                            print self.Interstitial.messages['errorFoundBw'] +self.daw_directories[index] + " and " + self.ref_directories[e] + " at sample " + str(errs)

                                            try:
                                                q_action.processEvents()
                                            except:
                                                pass

                                            break

                                if errs != 0:
                                    break

                            except RuntimeError:
                                break

                        # Append Metadata For Output
                        values += path.abspath(self.daw_directories[index]) + "," + path.abspath(self.ref_directories[e]) + ","
                        values += datetime.datetime.fromtimestamp(stat(self.daw_directories[index]).st_ctime).strftime("%Y-%m-%d %H:%M:%S") + ","
                        values += str(stat(self.daw_directories[index]).st_size) + "," + str(tester_file_obj.channels) + "," + str(tester_file_obj.samplerate) + ","
                        values += str(datetime.timedelta(seconds=int(tester_file_obj.nframes / tester_file_obj.samplerate))) + "," + str(errs) + ","
                        values += str(datetime.timedelta(seconds=int(errs/tester_file_obj.samplerate)))

                        values += "\n"
                        found = True
                        unmatched_flag = False

                        self.scanned_daw_files.append(self.daw_directories[index])
                        self.scanned_ref_files.append(self.ref_directories[e])

                    else:
                        unmatched_flag = True
                        pass

                    if found:
                        break
             # if found:
            #     break




        # Create Header Information For Manifest
        manifest_info = {'testers': self.daw_directories, 'file_count': file_count, 'values': values}
        return {'manifest_info': manifest_info}
Exemple #15
0
class Track(object):
    """Represents a wrapped .wav file."""

    def __init__(self, fn, name="No name", labels=None, labels_in_file=False):
        """Create a Track object

        :param str. fn: Path to audio file (wav preferred, mp3 ok)
        :param str. name: Name of track
        """
        self.filename = fn
        self.name = name

        (base, extension) = os.path.splitext(self.filename)
        if extension == ".mp3":
            try:
                print "Creating wav from {}".format(self.filename)
                new_fn = base + '.wav'
                subprocess.check_output("lame --decode \"{}\" \"{}\"".format(
                    self.filename, new_fn), shell=True)
                self.filename = new_fn
            except:
                print "Could not create wav from mp3"
                raise

        self.sound = Sndfile(self.filename, 'r')
        self.current_frame = 0
        self.channels = self.sound.channels

        if labels is not None and labels_in_file:
            raise Exception(
                "Must only define one of labels and labels_in_file")
        if labels_in_file and not LIBXMP:
            raise Exception(
                "Cannot use labels_in_file without python-xmp-toolkit")
        if labels_in_file and LIBXMP:
            self.labels = self._extract_labels(fn)
        else:
            self.labels = labels

    def read_frames(self, n, channels=None):
        """Read ``n`` frames from the track, starting
        with the current frame

        :param integer n: Number of frames to read
        :param integer channels: Number of channels to return (default
            is number of channels in track)
        :returns: Next ``n`` frames from the track, starting with ``current_frame``
        :rtype: numpy array
        """
        if channels is None:
            channels = self.channels

        if channels == 1:
            out = np.zeros(n)
        elif channels == 2:
            out = np.zeros((n, 2))
        else:
            print "Input needs to be 1 or 2 channels"
            return
        if n > self.remaining_frames():
            print "Trying to retrieve too many frames!"
            print "Asked for", n
            n = self.remaining_frames()
            print "Returning", n

        if self.channels == 1 and channels == 1:
            out = self.sound.read_frames(n)
        elif self.channels == 1 and channels == 2:
            frames = self.sound.read_frames(n)
            out = np.vstack((frames.copy(), frames.copy())).T
        elif self.channels == 2 and channels == 1:
            frames = self.sound.read_frames(n)
            out = np.mean(frames, axis=1)
        elif self.channels == 2 and channels == 2:
            out[:n, :] = self.sound.read_frames(n)

        self.current_frame += n

        return out

    @property
    def current_frame(self):
        """Get and set the current frame of the track"""
        return self._current_frame

    @current_frame.setter
    def current_frame(self, n):
        """Sets current frame to ``n``

        :param integer n: Frame to set to ``current_frame``
        """
        self.sound.seek(n)
        self._current_frame = n

    def reset(self):
        """Sets current frame to 0
        """
        self.current_frame = 0

    def all_as_mono(self):
        """Get the entire track as 1 combined channel

        :returns: Track frames as 1 combined track
        :rtype: 1d numpy array
        """
        return self.range_as_mono(0, self.duration)

    def range_as_mono(self, start_sample, end_sample):
        """Get a range of frames as 1 combined channel

        :param integer start_sample: First frame in range
        :param integer end_sample: Last frame in range (exclusive)
        :returns: Track frames in range as 1 combined channel
        :rtype: 1d numpy array of length ``end_sample - start_sample``
        """
        tmp_current = self.current_frame
        self.current_frame = start_sample
        tmp_frames = self.read_frames(end_sample - start_sample)
        if self.channels == 2:
            frames = np.mean(tmp_frames, axis=1)
        elif self.channels == 1:
            frames = tmp_frames
        else:
            raise IOError("Input audio must have either 1 or 2 channels")
        self.current_frame = tmp_current
        return frames

    @property
    def samplerate(self):
        """Get the sample rate of the track"""
        return self.sound.samplerate

    def remaining_frames(self):
        """Get the number of frames remaining in the track"""
        return self.sound.nframes - self.current_frame

    @property
    def duration(self):
        """Get the duration of total frames in the track"""
        return self.sound.nframes

    @property
    def duration_in_seconds(self):
        """Get the duration of the track in seconds"""
        "Should not set track length"
        return self.duration / float(self.samplerate)

    def loudest_time(self, start=0, duration=0):
        """Find the loudest time in the window given by start and duration
        Returns frame number in context of entire track, not just the window.

        :param integer start: Start frame
        :param integer duration: Number of frames to consider from start
        :returns: Frame number of loudest frame
        :rtype: integer
        """
        if duration == 0:
            duration = self.sound.nframes
        self.current_frame = start
        arr = self.read_frames(duration)
        # get the frame of the maximum amplitude
        # different names for the same thing...
        # max_amp_sample = a.argmax(axis=0)[a.max(axis=0).argmax()]
        max_amp_sample = int(np.floor(arr.argmax()/2)) + start
        return max_amp_sample

    def refine_cut(self, cut_point, window_size=1):
        return cut_point

    def zero_crossing_before(self, n):
        """Find nearest zero crossing in waveform before frame ``n``"""
        n_in_samples = int(n * self.samplerate)

        search_start = n_in_samples - self.samplerate
        if search_start < 0:
            search_start = 0

        frame = zero_crossing_last(
            self.range_as_mono(search_start, n_in_samples)) + search_start

        return frame / float(self.samplerate)

    def zero_crossing_after(self, n):
        """Find nearest zero crossing in waveform after frame ``n``"""
        n_in_samples = int(n * self.samplerate)
        search_end = n_in_samples + self.samplerate
        if search_end > self.duration:
            search_end = self.duration

        frame = zero_crossing_first(
            self.range_as_mono(n_in_samples, search_end)) + n_in_samples

        return frame / float(self.samplerate)

    @property
    def labels(self):
        return self._labels

    @labels.setter
    def labels(self, labels):
        if labels is None:
            self._labels = None
        else:
            self._labels = sorted(labels, key=lambda x: x.time)

    def label(self, t):
        """Get the label of the song at a given time in seconds
        """
        if self.labels is None:
            return None
        prev_label = None
        for l in self.labels:
            if l.time > t:
                break
            prev_label = l
        if prev_label is None:
            return None
        return prev_label.name

    def _extract_labels(self, filename):
        if not LIBXMP:
            return None

        xmp = libxmp.utils.file_to_dict(filename)
        meta = libxmp.XMPMeta()
        ns = libxmp.consts.XMP_NS_DM
        p = meta.get_prefix_for_namespace(ns)

        #track_re = re.compile("^" + p + r"Tracks\[(\d+)\]$")
        #n_tracks = 0
        cp_track = None
        new_xmp = {}
        for prop in xmp[ns]:
            new_xmp[prop[0]] = prop[1:]

        # find the cuepoint markers track
        name_re = re.compile("^" + p + r"Tracks\[(\d+)\]/" + p + "trackName$")
        for prop, val in new_xmp.iteritems():
            match = name_re.match(prop)
            if match:
                if val[0] == "CuePoint Markers":
                    cp_track = match.group(1)

        # get all the markers from it
        cp_path = re.compile(r"^%sTracks\[%s\]/%smarkers\[(\d+)\]$" %
                             (p, cp_track, p))
        markers = []
        sr = float(new_xmp["%sTracks[%s]/%sframeRate" %
                   (p, cp_track, p)][0].replace('f', ''))

        for prop, val in new_xmp.iteritems():
            match = cp_path.match(prop)
            if match:
                markers.append(Label(
                    new_xmp[prop + '/' + p + 'name'][0],
                    float(new_xmp[prop + '/' + p + 'startTime'][0]) / sr))

        if len(markers) is 0:
            return None
        return markers
from math import floor, log

from scikits.audiolab import Sndfile
import numpy as np
from matplotlib import pyplot as plt

soundfile = Sndfile("test.wav")


samplerate = soundfile.samplerate
start_sec = 0
stop_sec = 5
start_frame = start_sec * soundfile.samplerate
stop_frame = stop_sec * soundfile.samplerate

soundfile.seek(start_frame)

delta_frames = stop_frame - start_frame
sample = soundfile.read_frames(delta_frames)

map = "CMRmap"

fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111)

NFFT = 128
noverlap = 65

pxx, freq, t, cax = ax.specgram(sample, Fs=soundfile.samplerate, NFFT=NFFT, noverlap=noverlap, cmap=plt.get_cmap(map))
plt.colorbar(cax)
plt.xlabel("Times [sec]")
Exemple #17
0
def execute(a, b, d, qa):
    # initialize useful variables
    filename = "manifest_" + strftime("%Y%m%d%H%M%S") + ".csv"
    testdone = []
    targdone = []
    table = "Test File,Reference File,Creation Date,Size,Channels,Sample Rate,Length,First Error Sample,Error At"
    meta = "Interstitial Error Report\n"
    timer = time()
    initiated = strftime("%H:%M:%S")
    count = 0

    # ensures that we have legitimate directories to walk down
    # and populates the list of files to test
    if not path.isdir(path.abspath(a)) or not path.isdir(path.abspath(b)):
        print "Illegal paths given - exiting..."
        return
    testers = populate(a)
    print str(len(testers)) + " WAV files found: " + path.abspath(a)
    targets = populate(b)
    print str(len(targets)) + " WAV files found: " + path.abspath(b)
    qa.processEvents()

    # process each file in the tester array
    for t in xrange(len(testers)):
        found = False
        for e in xrange(len(targets)):
            qa.processEvents()
            # if we haven't already processed this file, process it
            if str(targets[e]) not in targdone:
                # find the offset and align the waveforms
                toff = offs(testers[t], targets[e])
                tX = Sndfile(testers[t], 'r')
                tY = Sndfile(targets[e], 'r')
                if toff > 0:
                    tX.seek(toff)
                else:
                    tY.seek(fabs(toff))
                # read the first 1000 samples of each file
                # if each sample is within 6dB of the other, we have a match and can begin processing
                t1 = mono(tX.read_frames(1000))
                t2 = mono(tY.read_frames(1000))
                if np.array_equal(t1, t2):
                    print "MATCH: " + str(testers[t]) + " matches " + str(
                        targets[e])
                    qa.processEvents()

                    # mark files as done
                    testdone.append(str(testers[t]))
                    targdone.append(str(targets[e]))

                    # we can't read the entire file into RAM at once
                    # so instead we're breaking it into one-second parts
                    l = min((tX.nframes - toff),
                            (tY.nframes - toff)) / tX.samplerate
                    for n in xrange(0, l, 1):
                        errs = 0
                        try:
                            # drop all but the first channel
                            a1 = mono(tX.read_frames(tX.samplerate))
                            a2 = mono(tY.read_frames(tY.samplerate))

                            # are these arrays equivalent? if not, there's an error
                            if not np.array_equal(a1, a2):
                                count += 1
                                # where's the error?
                                # we find it by comparing sample by sample across this second of audio
                                for m in xrange(len(a1)):
                                    if not np.array_equal(a1[m], a2[m]):
                                        # we found it! print a message and we're done with these files
                                        errs = (n * tX.samplerate) + m + 1000
                                        print "ERROR: Interstitial error found between " + str(
                                            testers[t]) + " and " + str(
                                                targets[e]
                                            ) + " at sample " + str(errs)
                                        qa.processEvents()
                                        break
                            if errs != 0:
                                break
                        except RuntimeError:
                            break
                    # append metadata for output
                    table += "\n" + path.abspath(
                        testers[t]) + "," + path.abspath(str(targets[e])) + ","
                    table += datetime.datetime.fromtimestamp(
                        stat(testers[t]).st_ctime).strftime(
                            "%Y-%m-%d %H:%M:%S") + ","
                    table += str(stat(testers[t]).st_size) + "," + str(
                        tX.channels) + "," + str(tX.samplerate) + ","
                    table += str(
                        datetime.timedelta(seconds=int(
                            tX.nframes /
                            tX.samplerate))) + "," + str(errs) + ","
                    table += str(
                        datetime.timedelta(seconds=int(errs / tX.samplerate)))
                    found = True
                if found:
                    break

    # create header information for manifest
    meta += "Date," + strftime("%Y-%m-%d") + "\n"
    meta += "Time initiated," + initiated + "\n"
    meta += "Duration (seconds)," + str(floor(time() - timer)) + "\n"
    meta += "Files analyzed," + str(len(testers)) + "\n"
    meta += "Bad files found," + str(count) + "\n"

    # do we have metadata? if so, write a manifest
    if len(table) > 110:
        try:
            f = open(d + "/" + filename, 'w')
            f.write(meta + table)
            print "Wrote manifest to " + path.abspath(f.name)
            f.close()
        except:
            print "Illegal path: " + d + filename
Exemple #18
0
class Track(object):
    """Represents a wrapped .wav file."""

    def __init__(self, fn, name="No name"):
        """Create a Track object

        :param str. fn: Path to wav file
        :param str. name: Name of track
        """
        self.filename = fn
        self.name = name

        self.sound = Sndfile(self.filename, 'r')
        self.current_frame = 0
        self.channels = self.sound.channels


    def read_frames(self, n, channels=None):
        """Read ``n`` frames from the track, starting
        with the current frame

        :param integer n: Number of frames to read
        :param integer channels: Number of channels to return (default
            is number of channels in track)
        :returns: Next ``n`` frames from the track, starting with ``current_frame``
        :rtype: numpy array
        """
        if channels is None:
            channels = self.channels

        if channels == 1:
            out = N.zeros(n)
        elif channels == 2:
            out = N.zeros((n, 2))
        else:
            print "Input needs to be 1 or 2 channels"
            return
        if n > self.remaining_frames():
            print "Trying to retrieve too many frames!"
            print "Asked for", n
            n = self.remaining_frames()

        if self.channels == 1 and channels == 1:
            out = self.sound.read_frames(n)
        elif self.channels == 1 and channels == 2:
            print "here!"
            frames = self.sound.read_frames(n)
            out = N.vstack((frames.copy(), frames.copy())).T
        elif self.channels == 2 and channels == 1:
            frames = self.sound.read_frames(n)
            out = N.mean(frames, axis=1)
        elif self.channels == 2 and channels == 2:
            out[:n, :] = self.sound.read_frames(n)

        self.current_frame += n

        return out

    @property
    def current_frame(self):
        """Get and set the current frame of the track"""
        return self._current_frame

    @current_frame.setter
    def current_frame(self, n):
        """Sets current frame to ``n``

        :param integer n: Frame to set to ``current_frame``
        """
        self.sound.seek(n)
        self._current_frame = n 

    def reset(self):
        """Sets current frame to 0
        """
        self.current_frame = 0

    def all_as_mono(self):
        """Get the entire track as 1 combined channel

        :returns: Track frames as 1 combined track
        :rtype: 1d numpy array
        """
        return self.range_as_mono(0, self.duration)

    def range_as_mono(self, start_sample, end_sample):
        """Get a range of frames as 1 combined channel

        :param integer start_sample: First frame in range
        :param integer end_sample: Last frame in range (exclusive)
        :returns: Track frames in range as 1 combined channel
        :rtype: 1d numpy array of length ``end_sample - start_sample``
        """
        tmp_current = self.current_frame
        self.current_frame = start_sample
        tmp_frames = self.read_frames(end_sample - start_sample)
        if self.channels == 2:
            frames = N.mean(tmp_frames, axis=1)
        elif self.channels == 1:
            frames = tmp_frames
        else:
            raise IOError("Input audio must have either 1 or 2 channels")
        self.current_frame = tmp_current
        return frames

    @property
    def samplerate(self):
        """Get the sample rate of the track"""
        return self.sound.samplerate

    def remaining_frames(self):
        """Get the number of frames remaining in the track"""
        return self.sound.nframes - self.current_frame
        
    @property
    def duration(self):
        """Get the duration of total frames in the track"""
        return self.sound.nframes
    
    @property
    def duration_in_seconds(self):
        """Get the duration of the track in seconds"""
        return self.duration / float(self.samplerate)
        
    def loudest_time(self, start=0, duration=0):
        """Find the loudest time in the window given by start and duration
        Returns frame number in context of entire track, not just the window.

        :param integer start: Start frame
        :param integer duration: Number of frames to consider from start
        :returns: Frame number of loudest frame
        :rtype: integer
        """
        if duration == 0:
            duration = self.sound.nframes
        self.current_frame = start
        arr = self.read_frames(duration)
        # get the frame of the maximum amplitude
        # different names for the same thing...
        # max_amp_sample = a.argmax(axis=0)[a.max(axis=0).argmax()]
        max_amp_sample = int(N.floor(arr.argmax()/2)) + start
        return max_amp_sample
    
    def refine_cut(self, cut_point, window_size=1):
        return cut_point
        
    def zero_crossing_before(self, n):
        """Find nearest zero crossing in waveform before frame ``n``"""
        n_in_samples = int(n * self.samplerate)

        search_start = n_in_samples - self.samplerate
        if search_start < 0:
            search_start = 0

        frame = zero_crossing_last(
            self.range_as_mono(search_start, n_in_samples)) + search_start

        return frame / float(self.samplerate)

    def zero_crossing_after(self, n):
        """Find nearest zero crossing in waveform after frame ``n``"""
        n_in_samples = int(n * self.samplerate)
        search_end = n_in_samples + self.samplerate
        if search_end > self.duration:
            search_end = self.duration

        frame = zero_crossing_first(
            self.range_as_mono(n_in_samples, search_end)) + n_in_samples

        return frame / float(self.samplerate)
Exemple #19
0
import os
from math import floor, log

from scikits.audiolab import Sndfile
import numpy as np
from matplotlib import pyplot as plt

soundfile = Sndfile("test.wav")

samplerate = soundfile.samplerate
start_sec = 0
stop_sec = 5
start_frame = start_sec * soundfile.samplerate
stop_frame = stop_sec * soundfile.samplerate

soundfile.seek(start_frame)

delta_frames = stop_frame - start_frame
sample = soundfile.read_frames(delta_frames)

# maps = [m for m in plt.cm.datad if not m.endswith("_r")]
map = 'CMRmap'

fig = plt.figure(figsize=(10, 6), )
ax = fig.add_subplot(111)

NFFT = 128
noverlap = 65

pxx, freq, t, cax = ax.specgram(sample,
                                Fs=soundfile.samplerate,
def execute(a, b, d, qa):
	# initialize useful variables
	filename = "manifest_" + strftime("%Y%m%d%H%M%S") + ".csv"
	testdone = []
	targdone = []
	table = "Test File,Reference File,Creation Date,Size,Channels,Sample Rate,Length,First Error Sample,Error At"
	meta = "Interstitial Error Report\n"
	timer = time()
	initiated = strftime("%H:%M:%S")
	count = 0

	# ensures that we have legitimate directories to walk down
	# and populates the list of files to test
	if not path.isdir(path.abspath(a)) or not path.isdir(path.abspath(b)):
		print "Illegal paths given - exiting..."
		return
	testers = populate(a)
	print str(len(testers)) + " WAV files found: " + path.abspath(a)
	targets = populate(b)
	print str(len(targets)) + " WAV files found: " + path.abspath(b)
	qa.processEvents()
	
	# process each file in the tester array
	for t in xrange(len(testers)):
		found = False
		for e in xrange(len(targets)):
			qa.processEvents()
			# if we haven't already processed this file, process it
			if str(targets[e]) not in targdone:
				# find the offset and align the waveforms
				toff = offs(testers[t], targets[e])
				tX = Sndfile(testers[t], 'r')
				tY = Sndfile(targets[e], 'r')
				if toff > 0:
					tX.seek(toff)
				else:
					tY.seek(fabs(toff))
				# read the first 1000 samples of each file
				# if each sample is within 6dB of the other, we have a match and can begin processing
				t1 = mono(tX.read_frames(1000))
				t2 = mono(tY.read_frames(1000))
				if np.array_equal(t1, t2):
					print "MATCH: " + str(testers[t]) + " matches " + str(targets[e])
					qa.processEvents()
					
					# mark files as done
					testdone.append(str(testers[t]))
					targdone.append(str(targets[e]))
					
					# we can't read the entire file into RAM at once
					# so instead we're breaking it into one-second parts
					l = min((tX.nframes - toff), (tY.nframes - toff)) / tX.samplerate
					for n in xrange(0, l, 1):
						errs = 0
						try:
							# drop all but the first channel
							a1 = mono(tX.read_frames(tX.samplerate))
							a2 = mono(tY.read_frames(tY.samplerate))
							
							# are these arrays equivalent? if not, there's an error
							if not np.array_equal(a1, a2):
								count += 1
								# where's the error?
								# we find it by comparing sample by sample across this second of audio
								for m in xrange(len(a1)):
									if not np.array_equal(a1[m], a2[m]):
										# we found it! print a message and we're done with these files
										errs = (n * tX.samplerate) + m + 1000
										print "ERROR: Interstitial error found between " + str(testers[t]) + " and " + str(targets[e]) + " at sample " + str(errs)
										qa.processEvents()
										break
							if errs != 0:
								break
						except RuntimeError:
							break
					# append metadata for output
					table += "\n" + path.abspath(testers[t]) + "," + path.abspath(str(targets[e])) + ","
					table += datetime.datetime.fromtimestamp(stat(testers[t]).st_ctime).strftime("%Y-%m-%d %H:%M:%S") + ","
					table += str(stat(testers[t]).st_size) + "," + str(tX.channels) + "," + str(tX.samplerate) + ","
					table += str(datetime.timedelta(seconds=int(tX.nframes / tX.samplerate))) + "," + str(errs) + ","
					table += str(datetime.timedelta(seconds=int(errs/tX.samplerate)))
					found = True
				if found:
					break

	# create header information for manifest
	meta += "Date," + strftime("%Y-%m-%d") + "\n"
	meta += "Time initiated," + initiated + "\n"
	meta += "Duration (seconds)," + str(floor(time() - timer)) + "\n"
	meta += "Files analyzed," + str(len(testers)) + "\n"
	meta += "Bad files found," + str(count) + "\n"

	# do we have metadata? if so, write a manifest
	if len(table) > 110:
		try:
			f = open(d + "/" + filename, 'w')
			f.write(meta + table)
			print "Wrote manifest to " + path.abspath(f.name)
			f.close()
		except:
			print "Illegal path: " + d + filename