Beispiel #1
0
    def passthrough(self,
                    input_file: Union[BytesIO, str],
                    output_file: str = None,
                    suffix: str = "") -> Union[BytesIO, str]:
        if isinstance(input_file, BytesIO):
            with NamedTemporaryFile() as file:
                file.write(input_file.read())
                file.seek(0)
                audio, *_ = AudioLoader(filename=file.name)()
        else:
            audio, *_ = AudioLoader(filename=input_file)()

        audio_obj = AudioSequence(audio)

        for part in self.parts:
            audio_obj = part.handle(audio_obj)

        if output_file is not None:
            audio_obj.save(output_file)
            return output_file

        with NamedTemporaryFile(suffix=suffix) as file:
            audio_obj.save(file.name)
            file.seek(0)
            return BytesIO(file.read())
Beispiel #2
0
    def passthrough(self, input_file: BytesIO,
                    output_file: str = None) -> BytesIO:
        if isinstance(input_file, BytesIO):
            with NamedTemporaryFile() as file:
                file.write(input_file.read())
                file.seek(0)
                audio, *_ = AudioLoader(filename=file.name)()
        else:
            audio, *_ = AudioLoader(filename=input_file)()

        for part in self.parts:
            audio = part.handle(audio)

        if output_file is not None:
            AudioWriter(filename=output_file)(audio)
            return
        else:
            with NamedTemporaryFile() as file:
                AudioWriter(filename=file.name)(audio)
                file.seek(0)
                return BytesIO(file.read())
Beispiel #3
0
def get(inputDir, descExt):
    exception = {}
    output = {}

    for path, dname, fnames in os.walk(
            inputDir
    ):  # dname directories names in current directory, fnames file names in current directory.
        for fname in fnames:
            if descExt in fname.lower():
                new_pid = os.fork()
                if new_pid == 0:  # Si new_pid == 0 > forked process.
                    try:  # uso un try..except..finally para asegurarme de _siempre_ terminar el proceso
                        file_name = path + "/" + fname
                        [Sound, Fs, nChannels, md5, bit_rate,
                         codec] = AudioLoader(filename=file_name)()  # lo cargo
                        Sound = Sound[:,
                                      0]  # El algoritmo siempre tira dos canales

                        print file_name

                        impulse, FsImpulse = LoadImpulse.get_impulse()
                        impulse = impulse.astype('float32',
                                                 casting='same_kind')

                        if Fs != FsImpulse:
                            Rs = Resample(inputSampleRate=FsImpulse,
                                          outputSampleRate=Fs)
                            impulse = Rs(impulse)

                        final = np.convolve(Sound, impulse)

                        if descExt == '.aif': descExt = '.aiff'

                        mw = MonoWriter(filename=path + '/R1_' + fname,
                                        sampleRate=Fs,
                                        format=descExt.split('.')[1])
                        mw(final)
                        print 'Done!'

                    except Exception:
                        exception[fname] = [
                            'oops'
                        ]  # De esta forma puedo fijarme si hubo alguna excepcion
                        # pass
                    finally:
                        os._exit(0)  # Cierro el fork y vuelvo al proceso padre
                else:
                    child = new_pid
                os.waitpid(
                    child, 0
                )  # evito crear procesos paralelos, lo que limita la performance de mi programa

    return exception
def oopeval(filename):
    return False
    audio, SR, channels, _, br, _ = AudioLoader(filename=filename)()
    if len(audio.shape) == 1:
        return False
    cov = np.cov(audio[:, 0], audio[:, 1])[0][1]
    stdL = np.std(audio[:, 0])
    stdR = np.std(audio[:, 1])
    if (stdR * stdL == 0):
        print("Pearson Correlation = 0")
    else:
        print("Pearson Correlation = ", str(np.clip(cov / (stdR * stdL), -1,
                                                    1)))
    inp = input("Does this file have OUT OF PHASE problems? [y/n]")
    return u.str2bool(inp)
Beispiel #5
0
def main(path, frameSize=1024, hopSize=512):
    mu, sigma = 0, 1  # mean and standard deviation
    whiteNoise = np.random.normal(mu, sigma, 1024)
    noiseCorrelation = autocorr(whiteNoise, mode="centered")

    audio, _, channels, _, _, _ = AudioLoader(filename=path)()
    audio = np.sum(audio, axis=1) / channels
    frames = [
        audio[n * hopSize:min(len(audio), n * hopSize + frameSize)]
        for n in range(int(len(audio) / hopSize + 1))
    ]
    bestFrame = np.argmax([np.sqrt(sum(frame**2)) for frame in frames])
    audio = frames[bestFrame]
    signalCorrelation = autocorr(audio, mode="centered")

    fig, ax = plt.subplots(2, 1, figsize=(16, 9))
    ax[0].stem(noiseCorrelation / max(abs(noiseCorrelation)))
    ax[1].stem(signalCorrelation / max(abs(signalCorrelation)))
    plt.savefig("autocorrelation_comparison.png")
    plt.show()
Beispiel #6
0
 def testEmpty(self):
     filename = join(testdata.audio_dir, 'generated', 'empty', 'empty.aiff')
     audio, _, _, _, _, _ = AudioLoader(filename=filename)()
     left, right = StereoDemuxer()(audio)
     self.assertEqualVector(left , [])
     self.assertEqualVector(right , [])
def main(args, options):

    stereoEstimation = True

    # Median filtering in spectrogram
    HPS = False

    displayEvolution = options.displayEvolution
    if displayEvolution:
        import matplotlib.pyplot as plt
        import imageMatlab

        ## plt.rc('text', usetex=True)
        plt.rc('image', cmap='jet')  ## gray_r
        plt.ion()

    # Compulsory option: name of the input file:
    inputAudioFile = ''
    if len(args) >= 2:
        inputAudioFile = args[0]
        options.pitch_output_file = args[1]
    if len(args) == 1:
        inputAudioFile = args[0]
    if len(args) == 0:
        inputAudioFile = options.input_file

    if inputAudioFile[-4:] != ".wav":
        raise ValueError(
            "File not WAV file? Only WAV format support, for now...")

    #print "Writing the different following output files:"
    if not (options.vit_pitch_output_file is None):
        print "    estimated pitches in", options.vit_pitch_output_file
    if not (options.sal_output_file is None):
        print "    salience file in ", options.sal_output_file

    if options.pitch_output_file is None:
        options.pitch_output_file = inputAudioFile[:-4] + '_pitches.txt'

    try:
        from essentia.standard import AudioLoader
        loaded = AudioLoader(filename=inputAudioFile)()
        audio = loaded[0]
        Fs = loaded[1]
        nchan = loaded[2]
        loaded = AudioLoader(filename=inputAudioFile)()
        audio = loaded[0]
        if nchan == 1:
            data = audio[:, 0].transpose()
        else:
            data = audio.transpose()

        data = np.double(data) / (1.2 * abs(data).max())
    except:
        # Using scipy to import wav
        import scipy.io.wavfile as wav
        Fs, data = wav.read(inputAudioFile)
        # data = np.double(data) /  32768.0 # makes data vary from -1 to 1
        scaleData = 1.2 * data.max()  # to rescale the data.
        data = np.double(data) / scaleData  # makes data vary from -1 to 1
    options.Fs = Fs
    is_stereo = True
    if data.shape[0] == data.size:  # data is multi-channel
        #print "The audio file is not stereo."
        #print "The audio file is not stereo. Making stereo out of mono."
        #print "(You could also try the older separateLead.py...)"
        is_stereo = False
        # data = np.vstack([data,data]).T
        # raise ValueError("number of dimensions of the input not 2")
    if is_stereo and data.shape[1] != 2:
        print "The data is multichannel, but not stereo... \n"
        print "Unfortunately this program does not scale well. Data is \n"
        print "reduced to its 2 first channels.\n"
        data = data[:, 0:2]

    # Processing the options:
    windowSizeInSamples = nextpow2(np.round(options.windowSize * Fs))

    hopsize = np.round(options.hopsize * Fs)
    #if hopsize != windowSizeInSamples/8:
    #    #print "Overriding given hopsize to use 1/8th of window size"
    #    #hopsize = windowSizeInSamples/8
    #    warnings.warn("Chosen hopsize: "+str(hopsize)+\
    #                  ", while windowsize: "+str(windowSizeInSamples))

    options.hopsizeInSamples = hopsize
    if options.fourierSize is None:
        NFT = windowSizeInSamples
    else:
        NFT = options.fourierSize

    # number of iterations for each parameter estimation step:
    niter = options.nbiter
    # number of spectral shapes for the accompaniment
    R = int(options.R)

    eps = 10**-9

    if options.verbose:
        print "Some parameter settings:"
        print "    Size of analysis windows: ", windowSizeInSamples
        print "    Hopsize: ", hopsize
        print "    Size of Fourier transforms: ", NFT
        print "    Number of iterations to be done: ", niter
        print "    Number of elements in WM: ", R

    if is_stereo:
        XR, F, N = stft(data[:, 0],
                        fs=Fs,
                        hopsize=hopsize,
                        window=sinebell(windowSizeInSamples),
                        nfft=NFT)
        XL, F, N = stft(data[:, 1],
                        fs=Fs,
                        hopsize=hopsize,
                        window=sinebell(windowSizeInSamples),
                        nfft=NFT)
        # SX is the power spectrogram:
        ## SXR = np.maximum(np.abs(XR) ** 2, 10 ** -8)
        ## SXL = np.maximum(np.abs(XL) ** 2, 10 ** -8)
        #SXR = np.abs(XR) ** 2
        #SXL = np.abs(XL) ** 2
        SX = np.maximum((0.5 * np.abs(XR + XL))**2, eps)
    else:  # data is mono
        X, F, N = stft(data,
                       fs=Fs,
                       hopsize=hopsize,
                       window=sinebell(windowSizeInSamples),
                       nfft=NFT)
        SX = np.maximum(np.abs(X)**2, eps)

    del data, F, N

    # minimum and maximum F0 in glottal source spectra dictionary
    minF0 = options.minF0
    maxF0 = options.maxF0
    F, N = SX.shape
    stepNotes = options.stepNotes  # this is the number of F0s within one semitone

    K = int(
        options.K_numFilters)  # number of spectral shapes for the filter part
    P = int(options.P_numAtomFilters
            )  # number of elements in dictionary of smooth filters
    chirpPerF0 = 1  # number of chirped spectral shapes between each F0
    # this feature should be further studied before
    # we find a good way of doing that.

    # Create the harmonic combs, for each F0 between minF0 and maxF0:
    F0Table, WF0 = \
             generate_WF0_chirped(minF0, maxF0, Fs, Nfft=NFT, \
                                  stepNotes=stepNotes, \
                                  lengthWindow=windowSizeInSamples, Ot=0.25, \
                                  perF0=chirpPerF0, \
                                  depthChirpInSemiTone=.15, loadWF0=True,\
                                  analysisWindow='sinebell')
    WF0 = WF0[0:F, :]  # ensure same size as SX
    NF0 = F0Table.size  # number of harmonic combs
    # Normalization:
    WF0 = WF0 / np.outer(np.ones(F), np.amax(WF0, axis=0))

    # Create the dictionary of smooth filters, for the filter part of
    # the lead isntrument:
    WGAMMA = generateHannBasis(F, NFT, Fs=Fs, frequencyScale='linear', \
                               numberOfBasis=P, overlap=.75)

    if options.sal_output_file is None or not os.path.exists(
            options.sal_output_file):
        if displayEvolution:
            plt.figure(1)
            plt.clf()
            plt.xticks(fontsize=16)
            plt.yticks(fontsize=16)
            plt.xlabel(r'Frame number $n$', fontsize=16)
            plt.ylabel(r'Leading source number $u$', fontsize=16)
            plt.ion()
            # plt.show()
            ## the following seems superfluous if mpl's backend is macosx...
            ##        raw_input("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"\
            ##                  "!! Press Return to resume the program. !!\n"\
            ##                  "!! Be sure that the figure has been    !!\n"\
            ##                  "!! already displayed, so that the      !!\n"\
            ##                  "!! evolution of HF0 will be visible.   !!\n"\
            ##                  "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")

        ## section to estimate the melody, on monophonic algo:
        # First round of parameter estimation:
        if (HPS):
            from scipy.signal import medfilt
        if (is_stereo & stereoEstimation):
            SXR = np.maximum(np.abs(XR)**2, eps)
            SXL = np.maximum(np.abs(XL)**2, eps)
            if (HPS):
                SXR = medfilt(SXR, 3)
                SXL = medfilt(SXL, 3)

            alphaR, alphaL, HGAMMA, HPHI, HF0, betaR, betaL, HM, WM, recoError1 = SIMM.Stereo_SIMM(
                # the data to be fitted to:
                SXR,
                SXL,
                # the basis matrices for the spectral combs
                WF0,
                # and for the elementary filters:
                WGAMMA,
                # number of desired filters, accompaniment spectra:
                numberOfFilters=K,
                numberOfAccompanimentSpectralShapes=R,
                # putting only 2 elements in accompaniment for a start...
                # if any, initial amplitude matrices for
                HGAMMA0=None,
                HPHI0=None,
                HF00=None,
                WM0=None,
                HM0=None,
                # Some more optional arguments, to control the "convergence"
                # of the algo
                numberOfIterations=niter,
                updateRulePower=1.,
                stepNotes=stepNotes,
                lambdaHF0=0.0 / (1.0 * SX.max()),
                alphaHF0=0.9,
                verbose=options.verbose,
                displayEvolution=displayEvolution)
        else:
            if (HPS):
                SX = medfilt(SX, 3)

            HGAMMA, HPHI, HF0, HM, WM, recoError1 = SIMM.SIMM(
                # the data to be fitted to:
                SX,
                # the basis matrices for the spectral combs
                WF0,
                # and for the elementary filters:
                WGAMMA,
                # number of desired filters, accompaniment spectra:
                numberOfFilters=K,
                numberOfAccompanimentSpectralShapes=R,
                # putting only 2 elements in accompaniment for a start...
                # if any, initial amplitude matrices for
                HGAMMA0=None,
                HPHI0=None,
                HF00=None,
                WM0=None,
                HM0=None,
                # Some more optional arguments, to control the "convergence"
                # of the algo
                numberOfIterations=niter,
                updateRulePower=1.,
                stepNotes=stepNotes,
                lambdaHF0=0.0 / (1.0 * SX.max()),
                alphaHF0=0.9,
                verbose=options.verbose,
                displayEvolution=displayEvolution)
        if displayEvolution:
            h2 = plt.figure(2)
            plt.clf()
            imageMatlab.imageM(20 * np.log10(HF0))
            matMax = (20 * np.log10(HF0)).max()
            matMed = np.median(20 * np.log10(HF0))
            plt.clim([matMed - 100, matMax])

    else:
        print "Loading Salience from file to calculate Melody: " + options.sal_output_file
        loaded = np.loadtxt(options.sal_output_file).T
        times = [loaded[0, :]]
        HF0 = loaded[1:, :]

    # If vit_pitch_output_file is not null, do melody extraction with Viterbi
    if not (options.vit_pitch_output_file is None):
        print "Viterbi decoding"
        # Viterbi decoding to estimate the predominant fundamental
        # frequency line
        # create transition probability matrix - adhoc parameter 'scale'
        # TODO: use "learned" parameter scale (NB: after many trials,
        # provided scale and parameterization seems robust)
        scale = 1.0
        transitions = np.exp(-np.floor(np.arange(0, NF0) / stepNotes) * scale)
        cutoffnote = 2 * 5 * stepNotes
        transitions[cutoffnote:] = transitions[cutoffnote - 1]

        transitionMatrixF0 = np.zeros([NF0 + 1, NF0 + 1])  # toeplitz matrix
        b = np.arange(NF0)
        transitionMatrixF0[0:NF0, 0:NF0] = \
                                  transitions[\
            np.array(np.abs(np.outer(np.ones(NF0), b) \
                            - np.outer(b, np.ones(NF0))), dtype=int)]
        pf_0 = transitions[cutoffnote - 1] * 10**(-90)
        p0_0 = transitions[cutoffnote - 1] * 10**(-100)
        p0_f = transitions[cutoffnote - 1] * 10**(-80)
        transitionMatrixF0[0:NF0, NF0] = pf_0
        transitionMatrixF0[NF0, 0:NF0] = p0_f
        transitionMatrixF0[NF0, NF0] = p0_0

        sumTransitionMatrixF0 = np.sum(transitionMatrixF0, axis=1)
        transitionMatrixF0 = transitionMatrixF0 \
                             / np.outer(sumTransitionMatrixF0, \
                                        np.ones(NF0 + 1))

        # prior probabilities, and setting the array for Viterbi tracking:
        priorProbabilities = 1 / (NF0 + 1.0) * np.ones([NF0 + 1])
        logHF0 = np.zeros([NF0 + 1, N])
        normHF0 = np.amax(HF0, axis=0)
        barHF0 = np.array(HF0)

        logHF0[0:NF0, :] = np.log(barHF0)
        logHF0[0:NF0, normHF0 == 0] = np.amin(logHF0[logHF0 > -np.Inf])
        logHF0[NF0, :] = np.maximum(np.amin(logHF0[logHF0 > -np.Inf]), -100)

        indexBestPath = viterbiTrackingArray(\
            logHF0, np.log(priorProbabilities),
            np.log(transitionMatrixF0), verbose=options.verbose)

        if displayEvolution:
            h2.hold(True)
            plt.plot(indexBestPath, '-b')
            h2.hold(False)
            plt.axis('tight')

        del logHF0

        # detection of silences:
        # computing the melody restricted F0 amplitude matrix HF00
        # (which will be used as initial HF0 for further algo):
        HF00 = np.zeros([NF0 * chirpPerF0, N])
        scopeAllowedHF0 = 2.0 / 1.0
        # computing indices for and around the melody indices,
        # dim1index are indices along axis 0, and dim2index along axis 1
        # of HF0:
        #     TODO: use numpy broadcasting to make this "clearer" (if possible...)
        dim1index = np.array(\
            np.maximum(\
                np.minimum(\
                    np.outer(chirpPerF0 * indexBestPath,
                             np.ones(chirpPerF0 \
                                     * (2 \
                                        * int(np.floor(stepNotes / scopeAllowedHF0)) \
                                        + 1))) \
                    + np.outer(np.ones(N),
                               np.arange(-chirpPerF0 \
                                         * int(np.floor(stepNotes / scopeAllowedHF0)),
                                         chirpPerF0 \
                                         * int((np.floor(stepNotes / scopeAllowedHF0))) \
                                            + 1)),
                    chirpPerF0 * NF0 - 1),
                0),
            dtype=int).reshape(1, N * chirpPerF0 \
                               * (2 * int(np.floor(stepNotes / scopeAllowedHF0)) \
                                  + 1))
        dim2index = np.outer(np.arange(N),
                             np.ones(chirpPerF0 \
                                     * (2 * int(np.floor(stepNotes \
                                                     / scopeAllowedHF0)) + 1), \
                                     dtype=int)\
                             ).reshape(1, N * chirpPerF0 \
                                       * (2 * int(np.floor(stepNotes \
                                                       / scopeAllowedHF0)) \
                                          + 1))
        HF00[dim1index, dim2index] = HF0[dim1index, dim2index]  # HF0.max()

        HF00[:, indexBestPath == (NF0 - 1)] = 0.0
        HF00[:, indexBestPath == 0] = 0.0

        # remove frames with less than (100 thres_energy) % of total energy.
        thres_energy = 0.000584
        SF0 = np.maximum(np.dot(WF0, HF00), eps)
        SPHI = np.maximum(np.dot(WGAMMA, np.dot(HGAMMA, HPHI)), eps)
        SM = np.maximum(np.dot(WM, HM), eps)
        hatSX = np.maximum(SPHI * SF0 + SM, eps)

        energyMel = np.sum((((SPHI * SF0) / hatSX)**2) * SX, axis=0)
        energyMelSorted = np.sort(energyMel)
        energyMelCumul = np.cumsum(energyMelSorted)
        energyMelCumulNorm = energyMelCumul / max(energyMelCumul[-1], eps)
        # normalized to the maximum of energy:
        # expressed in 0.01 times the percentage
        ind_999 = np.nonzero(energyMelCumulNorm > thres_energy)[0][0]
        if ind_999 is None:
            ind_999 = N
        if not os.path.isdir(os.path.dirname((options.vit_pitch_output_file))):
            os.mkdir(os.path.dirname((options.vit_pitch_output_file)))

        np.savetxt(options.vit_pitch_output_file + '.egy',
                   np.array(
                       [np.arange(N) * hopsize / np.double(Fs), energyMel]).T,
                   fmt='%10.5f')

        # energyMel <= energyMelCumul[ind_999]?

        melNotPresent = (energyMel <= energyMelCumulNorm[ind_999])

        # edit: frames predicted as unvoiced will be given negative values
        # indexBestPath[melNotPresent] = 0

        freqMelody = F0Table[np.array(np.minimum(indexBestPath,
                                                 len(F0Table) - 1),
                                      dtype=int)]
        freqMelody[melNotPresent] = -freqMelody[melNotPresent]

        if not os.path.exists(os.path.dirname(options.vit_pitch_output_file)):
            os.makedirs(os.path.dirname(options.vit_pitch_output_file))

        np.savetxt(options.vit_pitch_output_file,
                   np.array(
                       [np.arange(N) * hopsize / np.double(Fs), freqMelody]).T,
                   fmt='%10.7f')

    times = np.array([np.arange(N) * hopsize / np.double(Fs)])

    # Save salience file:
    if not (options.sal_output_file is None):
        if not os.path.exists(os.path.dirname(options.sal_output_file)):
            os.makedirs(os.path.dirname(options.sal_output_file))
        np.savetxt(options.sal_output_file,
                   np.concatenate((times, HF0), axis=0).T,
                   fmt='%10.6f')
        # saveSPHI (timbre related)
        saveSPHI = 0
        if saveSPHI:
            if not os.path.exists(
                    os.path.dirname(options.sal_output_file + '.SPHI')):
                os.makedirs(os.path.dirname(options.sal_output_file))
            WPHI = np.dot(WGAMMA, HGAMMA)
            SPHI = np.dot(WPHI, HPHI)
            np.savetxt(options.sal_output_file + '.SPHI',
                       np.concatenate((times, SPHI), axis=0).T,
                       fmt='%10.4f')
        #np.savetxt(options.sal_output_file+'.WGAMMA',np.concatenate((times,WGAMMA),axis=0).T,fmt='%10.4f')

    # return times[0],freqMelody,HF0
    print "Done!"
    return times[0], HF0, options
Beispiel #8
0
def load_audio_file(path: str) -> AudioSequence:
    audio, sample_rate, *_ = AudioLoader(filename=path)()
    return AudioSequence(audio, freq=sample_rate)
Beispiel #9
0
                         startFromZero=True)

    problematicFrames = 0
    for frameL, frameR in zip(lfg, rfg):
        _, corr = falseStereoDetector(mux(frameL, frameR))
        problematicFrames += corr < correlationThreshold
    falseStereoDetector.reset()

    conf = problematicFrames / lfg.num_frames()

    return conf, conf > percentageThreshold / 100, False


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="calculate correlation for all the sounds in s folder")
    parser.add_argument("path", help="path")
    args = parser.parse_args()
    for file in os.listdir(args.path):
        name = file
        file = os.path.join(args.path, file)
        if not os.path.isfile(file):
            continue
        if os.path.splitext(file)[1] != ".wav":
            continue
        print(name)
        audio, sr, channels, _, _, _ = AudioLoader(filename=file)()
        print("False Stereo: ", essFalsestereoDetector(audio,
                                                       channels=channels))
        print("OutofPhase: ", outofPhaseDetector(audio, channels=channels))
Beispiel #10
0
def single_json_compute(audioPath, jsonFolder, printFlag=False):
    """Calls the audio_problems_detection algorithms and stores the result in a json file

    Args:
        audioPath: string containing the relative path for the audio file
        jsonFolder: string containing the relative path for the json folder
        printFlag: (boolean) True if a preview of the josn file is desired, False otherwise (default = False)

    Returns:
        json_dict: (dict) dictionary with the audio's features
    """
    # print(audioPath)
    if not os.path.exists(audioPath):
        raise ValueError("Audio File does not exist")
    if not os.path.exists(jsonFolder):
        print(jsonFolder + " does not exist, defaulting to audiofolder: " + os.path.dirname(args.audioPath))
        jsonFolder = os.path.dirname(args.audioPath)
    
    # print("Essentia Modules installed:")
    # print(dir(estd))

    audio, sr, channels, _, br, _ = AudioLoader(filename=audioPath)()

    monoAudio = np.sum(audio, axis=1)/channels

    frameSize = int(1024)
    if len(monoAudio)/frameSize < 10:
        frameSize = int(2 ** np.ceil(np.log2(len(monoAudio)/10)))

    hopSize = int(frameSize/2)
    bitDepthContainer = int(br / sr / channels)

    filename = os.path.splitext(os.path.basename(audioPath))[0]

    snr = LowSnrDetector(frameSize=frameSize, hopSize=hopSize)
    bit = BitDepthDetector(bitDepth=bitDepthContainer, chunkLen=100, numberOfChunks=100)
    bandWidth = BwDetection()
    
    satStarts, satEnds, satPercentage, satBool = essSaturationDetector(monoAudio, frameSize=frameSize, hopSize=hopSize)
    humPercentage, humBool = essHumDetector(monoAudio, sr=sr)
    clkStarts, clkEnds, clkPercentage, clkBool = essClickDetector(monoAudio, frameSize=frameSize, hopSize=hopSize)
    nbIndexes, nbPercentage, nbBool = essNoiseburstDetector(monoAudio, frameSize=frameSize, hopSize=hopSize)
    if len(monoAudio) > 1465:
        silPercentage, silBool = essStartstopDetector(monoAudio, frameSize=frameSize, hopSize=hopSize)
    else:
        silPercentage, silBool = "Audio file too short", False
    fsPercentage, fsBool, fsMonoBool = essFalsestereoDetector(audio, frameSize=frameSize, hopSize=hopSize, channels=channels)
    oopPercentage, oopBool, oopMonoBool = outofPhaseDetector(audio, frameSize=frameSize, hopSize=hopSize, channels=channels)

    snr, snrBool = snr(audio)
    extrBitDepth, bitDepthBool = bit(audio)
    bwCutFrequency, bwConfidence, bwBool = bandWidth(audio, sr)

    info = {
        "Saturation": {"Start indexes": len(satStarts), "End indexes": len(satEnds),
                       "Percentage": satPercentage, "Bool": satBool},
        "Hum": {"Percentage": humPercentage, "Bool": humBool},
        "Clicks": {"Start indexes": len(clkStarts), "End indexes": len(clkEnds),
                   "Percentage": clkPercentage, "Bool": clkBool},
        "Silence": {"Percentage": silPercentage, "Bool": silBool},
        "FalseStereo": {"Percentage": fsPercentage, "Bool": fsBool, "monoBool": fsMonoBool},
        "OutofPhase": {"Percentage": oopPercentage, "Bool": oopBool, "monoBool": oopMonoBool},
        "NoiseBursts": {"Indexes": len(nbIndexes), "Percentage": nbPercentage, "Bool": nbBool},
        # "BitDepth": { "BitDepth": bitDepthBool, "extracted": extrBitDepth},
        # "Bandwidth": { "Bandwidth": bwBool, "cutfrequency": bwCutFrequency, "confidence": bwConfidence},
        # "lowSNR": { "lowSNR": snrBool, "SNR": snr}
        # "Saturation": {"Bool": satBool},
        # "Hum": {"Bool": humBool},
        # "Clicks": {"Bool": clkBool},
        # "Silence": {"Bool": silBool},
        # "FalseStereo": {"Bool": fsBool},
        # "OutofPhase": {"Bool": oopBool},
        # "NoiseBursts": {"Bool": silBool},
        "BitDepth": {"Bool": bitDepthBool},
        "Bandwidth": {"Bool": bwBool},
        "lowSNR": {"Bool": snrBool}
    }

    if printFlag:
        print("{0} data: \n \tfilename params: \n \tSample Rate:{1}Hz \n \tNumber of channels:{2} \
              \n \tBit Rate:{3}".format(filename, sr, channels, br))
        print("\n \tLength of the audio file: {0} \n \tFrame Size: {1} \n \tHop Size: {2}".format(
            len(monoAudio), frameSize, hopSize))

        for problem in info:
            string = "{}: \n".format(problem)
            for feature in info[problem]:
                string = "{}\t{}: {} \n".format(string, feature, info[problem][feature])
            print(string)

    jsonpath = os.path.join(jsonFolder, filename + ".json")
    with open(jsonpath, "w") as jsonfile:

        json_dict = info.copy()
        for problem in json_dict:
            if isinstance(json_dict[problem], dict):
                for feature in json_dict[problem]:
                    if feature == "Bool":
                        json_dict[problem]["Bool"] = str(json_dict[problem]["Bool"])

        json.dump(json_dict, jsonfile)
    
    return info