Beispiel #1
0
    def runTest(self):
        pySig = Signal(op.join(audio_filepath, "glocs.wav"), mono=True)
        pySig.crop(0, 5 * pySig.fs)
        pySig.pad(2048)

        scale = 1024
        parallelProjections.initialize_plans(np.array([scale]), np.array([2]))

        classicBlock = mdct_block.Block(scale, pySig, 0, debug_level=3)

        spreadBlock = mdct_block.SpreadBlock(scale, pySig, 0, debug_level=3, penalty=0, maskSize=5)

        # compute the projections, should be equivalent
        classicBlock.update(pySig, 0, -1)
        spreadBlock.update(pySig, 0, -1)

        maxClassicAtom1 = classicBlock.get_max_atom()
        print maxClassicAtom1.length, maxClassicAtom1.frame,
        print maxClassicAtom1.freq_bin, maxClassicAtom1.mdct_value
        maxSpreadcAtom1 = spreadBlock.get_max_atom()
        print maxSpreadcAtom1.length, maxSpreadcAtom1.frame,
        print maxSpreadcAtom1.freq_bin, maxSpreadcAtom1.mdct_value
        # assert equality using the inner comparison method of MDCT atoms
        self.assertEqual(maxClassicAtom1, maxSpreadcAtom1)

        # verifying the masking index construction
        mask_frame_width = 2
        mask_bin_width = 1
        spreadBlock.compute_mask(maxSpreadcAtom1, mask_bin_width, mask_frame_width, 0.5)

        c_frame = int(np.ceil(maxSpreadcAtom1.time_position / (scale / 2)))
        c_bin = int(maxSpreadcAtom1.reduced_frequency * scale)

        z1 = np.arange(int(c_frame - mask_frame_width), int(c_frame + mask_frame_width) + 1)
        z2 = np.arange(int(c_bin - mask_bin_width), int(c_bin + mask_bin_width) + 1)
        #        x, y = np.meshgrid(z1, z2)
        #        print spreadBlock.mask_index_x
        #        np.testing.assert_array_equal(spreadBlock.mask_index_x, z1)
        #        np.testing.assert_array_equal(spreadBlock.mask_index_y, z2)

        pySig.subtract(maxSpreadcAtom1)

        # recompute the projections
        classicBlock.update(pySig, 0, -1)
        spreadBlock.update(pySig, 0, -1)

        #        plt.show()
        maxClassicAtom2 = classicBlock.get_max_atom()
        print maxClassicAtom2.length, maxClassicAtom2.frame, maxClassicAtom2.freq_bin, maxClassicAtom2.mdct_value
        maxSpreadcAtom2 = spreadBlock.get_max_atom()
        print maxSpreadcAtom2.length, maxSpreadcAtom2.frame, maxSpreadcAtom2.freq_bin, maxSpreadcAtom2.mdct_value
        self.assertNotEqual(maxClassicAtom2, maxSpreadcAtom2)

        parallelProjections.clean_plans()
Beispiel #2
0
def _clean_fftw():
    if parproj.clean_plans() != 1:
        raise ValueError("Something failed during FFTW cleaning stage ")
Beispiel #3
0
def mp_joint(orig_sig_list,
             dictionary,
             target_srr,
             max_it_num,
             debug=0,
             pad=True,
             escape=False,
             escape_threshold=0.4,
             escape_thr_map=None,
             bss_eval=False,
             sources=None,
             interval=100,
             clean=True,
             waitbar=True,
             no_adapt=False,
             silent_fail=False):
    """ Joint Matching Pursuit : Takes a bunch of signals in entry and decomposes the common part out of them
        Gives The common model and the sparse residual for each signal in return """

    # back compatibility - use debug levels now
    if debug is not None:
        _Logger.set_level(debug)
    _Logger.info("Call to mp.mp_joint")
    # We now work on a list of signals: we have a list of approx and residuals
    res_sig_list = []
    current_approx_list = []
    res_energy_list = []
    current_srr_list = []

    if escape:
        esc_approx_list = []
        esc_it_list = []
        criterions = []

#    if bss_eval:
#        SDRs = [];
#        SIRs = [];
#        SARs = [];
#    dictionaryList = []
    threshold_dict = {}
    if escape_thr_map is None:
        for size in dictionary.sizes:
            threshold_dict[size] = escape_threshold
    else:
        for size, value in zip(dictionary.sizes, escape_thr_map):
            threshold_dict[size] = value

    # create a mean approx of the background
    mean_approx = Approx.Approx(
        dictionary, [], orig_sig_list[0], debug_level=debug)
    k = 1
    for orig_signal in orig_sig_list:

        _Logger.debug("Initializing Signal Number " + str(k))

#        if pad:
#            orig_signal.pad(dictionary.get_pad())
        res_sig_list.append(orig_signal.copy())

        # initialize approximant
        current_approx_list.append(
            Approx.Approx(dictionary, [], orig_signal, debug_level=debug))

        if escape:
            esc_approx_list.append(Approx.Approx(dictionary, [
            ], orig_signal, debug_level=debug))
            esc_it_list.append([])
        # residualEnergy
        res_energy_list.append([])
        current_srr_list.append(current_approx_list[-1].compute_srr())
        k += 1

    # initialize blocks using the first signal: they shoudl all have the same
    # length
    _Logger.debug("Initializing Dictionary")
    dictionary.initialize(res_sig_list)

    # FFTW Optimization for C code: initialize module global variables
    try:
        if parproj.initialize_plans(np.array(dictionary.sizes), np.array(dictionary.tolerances)) != 1:
            raise ValueError(
                "Something failed during FFTW initialization step ")
    except:
        _Logger.error("Initialization step failed")
        raise

    iterationNumber = 0

    approxSRR = max(current_srr_list)

    # Decomposition loop: stopping criteria is either SNR or iteration number
    while (approxSRR < target_srr) & (iterationNumber < max_it_num):

        _Logger.info("mp LOOP : iteration " + str(iterationNumber + 1))
        # Compute inner products and selects the best atom
        dictionary.update(res_sig_list, iterationNumber)

        if debug > 0:
            maxScale = dictionary.best_current_block.scale
            maxFrameIdx = math.floor(
                dictionary.best_current_block.maxIdx / (0.5 * maxScale))
            maxBinIdx = dictionary.best_current_block.maxIdx - maxFrameIdx * (
                0.5 * maxScale)

            _Logger.debug("It: " + str(iterationNumber) + " Selected atom "
                          + str(dictionary.best_current_block.maxIdx)
                          + " of scale " + str(maxScale) + " frequency bin "
                          + str(maxBinIdx)
                          + " value : " + str(
                          dictionary.max_block_score)
                          + " Frame : " + str(maxFrameIdx))

        # retrieve the best correlated atoms, locally adapted to the signal
        best_atom_list = dictionary.get_best_atom(debug, noAdapt=no_adapt)

        if best_atom_list is None:
            print 'No atom selected anymore'
            raise ValueError('Failed to select an atom')

        escape_current_atom = False

        if escape:
            # Escape mechanism : if variance of amplitudes is too big : assign
            # it to the biggest only
            mean = np.mean([abs(atom.get_value()) for atom in best_atom_list])
            std = np.std([abs(atom.get_value()) for atom in best_atom_list])
            maxValue = np.max(
                [abs(atom.get_value()) for atom in best_atom_list])
            _Logger.debug("Mean : " + str(mean) + " - STD : " + str(std))

            criterions.append(std / mean)
#            print criterions[-1]
            if (std / mean) > threshold_dict[atom.length]:
                escape_current_atom = True
# print "Escaping Iteration ",iterationNumber,": mean " , mean , " std " , std
                _Logger.debug("Escaping!!")

        for sigIdx in range(len(res_sig_list)):

            if not escape_current_atom:
                # add atom to current regular approx
                current_approx_list[sigIdx].add(
                    best_atom_list[sigIdx], clean=False)

                dictionary.compute_touched_zone(sigIdx, best_atom_list[sigIdx])

                # subtract atom from residual
                try:
                    res_sig_list[
                        sigIdx].subtract(best_atom_list[sigIdx], debug)
                except ValueError:
                    if silent_fail:
                        continue
                    else:
                        raise ValueError("Subtraction of atom failed")
            else:
                # Add this atom to the escape approx only if this signal is a
                # maxima
                if abs(best_atom_list[sigIdx].get_value()) == maxValue:
#                if True:
#                    print "Added Atom to signal " + str(sigIdx)
                    esc_approx_list[sigIdx].add(best_atom_list[sigIdx])
                    current_approx_list[sigIdx].add(best_atom_list[sigIdx])
                    esc_it_list[sigIdx].append(iterationNumber)

                    # subtract atom from residual
                    res_sig_list[
                        sigIdx].subtract(best_atom_list[sigIdx], debug)

                    dictionary.compute_touched_zone(
                        sigIdx, best_atom_list[sigIdx])

                else:
                    _Logger.debug("Atom not subtracted in this signal")
                    dictionary.compute_touched_zone(
                        sigIdx, best_atom_list[sigIdx])

            # update energy decay curves
            res_energy_list[sigIdx].append(res_sig_list[sigIdx].energy)

            if debug > 0 or (iterationNumber % interval == 0):
                current_srr_list[sigIdx] = current_approx_list[
                    sigIdx].compute_srr(res_sig_list[sigIdx])

                _Logger.debug("Local adaptation of atom " + str(sigIdx) +
                              " - Position : " + str(best_atom_list[sigIdx].time_position) +
                              " Amplitude : " + str(best_atom_list[sigIdx].proj_score) +
                              " TimeShift : " + str(best_atom_list[sigIdx].time_shift))
#            if clean and sigIdx>0:
#                del best_atom_list[sigIdx].waveform;

        # also add the mean atom to the background model UNLESS this is an
        # escaped atom
        if not escape_current_atom:
#            mean_approx.add(best_atom_list[0] )
            mean_approx.add(
                dictionary.get_mean_atom(getFirstAtom=False), clean=clean)
            _Logger.debug("Atom added to common rep ")

        if clean:
            for sigIdx in range(len(res_sig_list)):
                del best_atom_list[sigIdx].waveform

#        dictionary.compute_touched_zone()

#        approxSRR = currentApprox.compute_srr();

        _Logger.debug("SRRs reached of " + str(current_srr_list) +
                      " at iteration " + str(iterationNumber))

#        if bss_eval and ( (iterationNumber+1) % interval ==0):
#            estimSources = np.zeros(sources.shape)
#            # first estim the source for the common part
#            estimSources[0, :,0] = mean_approx.recomposedSignal.dataVec
#
#            for sigIdx in range(len(res_sig_list)):
# estimSources[sigIdx+1, :,0] =
# current_approx_list[sigIdx].recomposedSignal.dataVec +
# esc_approx_list[sigIdx].recomposedSignal.dataVec;
# estimSources[sigIdx+1, :,0] =
# esc_approx_list[sigIdx].recomposedSignal.dataVec;
#            [SDR,ISR,SIR,SAR] = bss_eval_images_nosort(estimSources,sources)
#
##            print SDR , SIR
#            SDRs.append(SDR)
#            SIRs.append(SIR)
#            SARs.append(SAR)
#
##            print current_srr_list

        iterationNumber += 1
        if (iterationNumber % interval == 0):
            print iterationNumber
            print [resEnergy[-1] for resEnergy in res_energy_list]
    # VERY IMPORTANT CLEANING STAGE!
    if parproj.clean_plans(np.array(dictionary.sizes)) != 1:
        raise ValueError("Something failed during FFTW cleaning stage ")
#    if waitbar and (iterationNumber %(max_it_num/100) ==0):
#        print float(iterationNumber)/float(max_it_num/100) , "%",

    if not escape:
        return mean_approx, current_approx_list, res_energy_list, res_sig_list
    else:
        # time to add the escaped atoms to the corresponding residuals:
#        for sigIdx in range(len(res_sig_list)):
# res_sig_list[sigIdx].dataVec +=
# esc_approx_list[sigIdx].recomposedSignal.dataVec;
#        if bss_eval:
# return mean_approx, current_approx_list, res_energy_list , res_sig_list ,
# esc_approx_list , criterions , SDRs, SIRs , SARs
        return mean_approx, current_approx_list, res_energy_list, res_sig_list, esc_approx_list, esc_it_list
Beispiel #4
0
 def runTest(self):
     print "-----Test mp sur multi-echelle MDCT"
     mdctDico = [32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384]
     tol = [2 for i in mdctDico]
     
     print "test the initialization function"
     if parallelProjections.initialize_plans(np.array(mdctDico), np.array(tol)) != 1:
     
         print "Initiliazing Stage Failed"
     if parallelProjections.clean_plans() != 1:
         print "Initiliazing Stage Failed"
     
     
     pySigOriginal = signals.Signal(op.join(audio_filepath, "ClocheB.wav"),
                                    normalize=True, mono=True)
     pyDico2 = dico.Dico(mdctDico)
     
     pyDico_Lomp = dico.LODico(mdctDico)
     residualSignal = pySigOriginal.copy()
     
     print " profiling test with C integration"
     cProfile.runctx(
         'mp.mp(pySigOriginal, pyDico2, 20, 200 ,0)', globals(), locals())
     
     cProfile.runctx(
         'mp.mp(pySigOriginal, pyDico_Lomp, 20, 200 ,0)', globals(), locals())
     
     
     ################" C binding tests ########
     N = 64
     L = 16
     if parallelProjections.initialize_plans(np.array([L]), np.array([2])) != 1:
         print "Initiliazing Stage Failed"
     
     P = N / (L / 2)
     input_data = 0.42 * np.random.random((N, 1))
     projectionMatrix_real = np.zeros((N, 1))
     projectionMatrix_comp = np.zeros((N, 1), complex)
     scoreTree = np.zeros((P, 1))
     pre_twidVec = np.array(
         [exp(n * (-1j) * pi / L) for n in range(L)]).reshape(L, 1)
     post_twidVec = np.array([exp((
         float(n) + 0.5) * -1j * pi * (L / 2 + 1) / L) for n in range(L / 2)]).reshape(L / 2, 1)
     
     print scoreTree.shape, pre_twidVec.shape, post_twidVec.shape
     
     i = 1
     j = 10
     takeReal = 1
     
     print "Testing Masked Gabor dictionary projections"
     print " ---Testing good call"
     input_data = 0.42 * np.random.random((N, 1))
     projectionMatrix_real = np.zeros((N, 1))
     projectionMatrix_comp = np.zeros((N, 1), complex)
     
     
     projectionMatrix_comp.real = np.random.random((N, 1))
     
     mask_data = np.ones(projectionMatrix_comp.shape)
     maskedindexes = [12,35,20]
     mask_data[maskedindexes] = 0;
     
     res = parallelProjections.project_masked_gabor(input_data, scoreTree,
                         projectionMatrix_comp,
                         mask_data, i, j, L)
     
     
     
     if res is not None:
         print "Survived.."
         if (np.sum(projectionMatrix_comp[maskedindexes]) != 0):
             print".. but these should be zeroes..."
             print projectionMatrix_comp[maskedindexes]
             print mask_data
         else:
             print np.sum(projectionMatrix_comp)
             print ".. and Succeed"
     else:
         print "Died!"
     
     print " --- Testing MDCT projections with penalized masking"
     print " ---Testing good call"
     input_data = 0.42 * np.random.random((N, 1))
     projectionMatrix = np.zeros((N, 1))                            
     penprojectionMatrix = np.zeros((N, 1))  
     # test with an overall penalty
     pen_mask = np.ones(projectionMatrix.shape)
     lamb = 0.01;
     res = parallelProjections.project_penalized_mdct(input_data, scoreTree,
                         projectionMatrix, penprojectionMatrix,
                         pen_mask, pre_twidVec, post_twidVec, i, j, L,lamb,0)
     print np.max(scoreTree), np.max(np.abs(projectionMatrix))
     assert(np.max(scoreTree) == np.max(np.abs(penprojectionMatrix)) )
     
     if res is not None:
         print "Survived.."
     else:
         print "Died!"
     
     #print "Testing Bad call:"
     #computeMCLT.project(input_data )
     
     print " ---Testing good call"
     parallelProjections.project(input_data, scoreTree,
                         projectionMatrix_real,
                         pre_twidVec, post_twidVec, i, j, L, 0)
     
     #if parallelFFT.clean_plans() != 1:
     #    print "Cleaning Stage Failed"
     ###
     ##print  projectionMatrix_real
     print scoreTree
     print "--- OK"
     #
     #
     #if computeMCLT.initialize_plans(np.array([L])) != 1:
     #    print "Initiliazing Stage Failed"
     
     print "---Testing good call: complex"
     res = parallelProjections.project_mclt(input_data, scoreTree,
                              projectionMatrix_comp,
                              pre_twidVec, post_twidVec, i, j, L)
     print scoreTree
     if res is not None:
         print "--- Ok"
     else:
         print "ERROR"
     #
     print "---Testing good call: complex set"
     
     res = parallelProjections.project_mclt_set(input_data, scoreTree,
                                  projectionMatrix_comp,
                                  pre_twidVec, post_twidVec, i, j, L, 1)
     if res is not None:
         print "--- Ok"
     else:
         print "ERRRORRRR"
         raise TypeError("ARf")
     
     
     print "---Testing good call: complex Non Linear set with Median"
     # Feed it with numpy matrices
     sigNumber = 3
     NLinput_data = np.concatenate(
         (input_data, 0.42 * np.random.randn(N, sigNumber - 1)), axis=1)
     
     NLinput_data = NLinput_data.T
     
     print NLinput_data.shape
     NLprojectionMatrix_comp = np.zeros(NLinput_data.shape)
     projResult = np.zeros((N, 1))
     res = parallelProjections.project_mclt_NLset(NLinput_data, scoreTree,
                                  NLprojectionMatrix_comp,
                                  projResult,
                                  pre_twidVec, post_twidVec, i, j, L, 0)
     
     A = np.median((NLprojectionMatrix_comp) ** 2, axis=0)
     #plt.figure()
     #plt.plot(A)
     #plt.plot(projResult,'r:')
     #plt.draw()
     #plt.draw()
     assert np.sum((A.reshape(projResult.shape) - projResult) ** 2) == 0
     if res is not None:
         print "--- Ok", scoreTree
     else:
         print "ERRRORRRR"
         raise TypeError("ARf")
     
     print "---Testing good call: complex Non Linear set with Penalized"
     projResult = np.zeros((N, 1))
     res = parallelProjections.project_mclt_NLset(NLinput_data, scoreTree,
                                  NLprojectionMatrix_comp,
                                  projResult,
                                  pre_twidVec, post_twidVec, i, j, L, 1)
     
     A = (NLprojectionMatrix_comp) ** 2
     B = np.sum(A, axis=0)
     for l in range(sigNumber):
         for m in range(l + 1, sigNumber):
     #                    print i,j
             diff = (abs(NLprojectionMatrix_comp[l, :]) - abs(
                 NLprojectionMatrix_comp[m, :])) ** 2
     #                    print diff
             B[:] += diff
     
     #plt.figure()
     #plt.plot(B)
     #plt.plot(projResult,'r:')
     #plt.draw()
     #plt.draw()
     assert np.sum((B.reshape(projResult.shape) - projResult) ** 2) < 0.000000000001
     if res is not None:
         print "--- Ok", scoreTree
     else:
         print "ERRRORRRR"
         raise TypeError("ARf")
     print "---Testing good call: complex Non Linear set with Weighted"
     projResult = np.zeros((N, 1))
     scoreTree = np.zeros((P, 1))
     res = parallelProjections.project_mclt_NLset(NLinput_data, scoreTree,
                                  NLprojectionMatrix_comp,
                                  projResult,
                                  pre_twidVec, post_twidVec, i, j, L, 2)
     
     A = abs(NLprojectionMatrix_comp)
     flatness = (
         np.exp((1.0 / sigNumber) * np.sum(np.log(A), axis=0)) / np.mean(A, axis=0))
     
     #print flatness
     
     
     B = np.multiply(np.nan_to_num(flatness), np.sum(A ** 2, axis=0))
     #plt.figure()
     #plt.plot(B)
     #plt.plot(np.sum(A**2,axis=0),'g')
     #plt.plot(projResult,'r:')
     #plt.show()
     assert np.sum((B.reshape(projResult.shape) - projResult) ** 2) < 0.000000000001
     
     
     if res is not None:
         print "--- Ok", scoreTree
     else:
         print "ERRRORRRR"
         raise TypeError("ARf")
     
     print "---Testing good call: subprojection"
     # TODO pass this in the library
     res = parallelProjections.subproject(input_data, scoreTree,
          projectionMatrix_real, pre_twidVec, post_twidVec, i, j, L, 0, 4)
     if res is not None:
         print "--- Ok"
     else:
         print "ERRRORRRR"
         raise TypeError("ARf")
     
     #print "---Testing atom projection"
     #scoreVec = np.array([0.0])
     #
     # input2 = np.concatenate( (np.concatenate((np.zeros(L/2) , Atom.waveform) ) ,
     # zeros(self.scale/2) ) )
     #
     #print parallelFFT.project_atom(input_data , input_data , scoreVec )
     #
     #print "--- Ok"
     
     
     print "Cleaning"
     if parallelProjections.clean_plans() != 1:
         print "Cleaning Stage Failed"
     
     
     print "---testing atom projection and creation"
     scale = 128
     k = 14
     if parallelProjections.initialize_plans(np.array([scale]), np.array([2])) != 1:
         print "Initialization Stage Failed"
     
     Atom_test = atom.Atom(scale, 1, 1200, k, 8000)
     #Atom_test.mdct_value = 0.57
     Atom_test.synthesize(value=1)
     #
     ref = Atom_test.waveform.copy()
     ts = 45
     #
     input2 = np.concatenate((np.concatenate(
         (np.zeros(scale / 2), Atom_test.waveform)), np.zeros(scale / 2)))
     input1 = 0.01 * np.random.randn(2 * scale) + np.concatenate((np.concatenate(
         (np.zeros(scale / 2 - ts), Atom_test.waveform)), np.zeros(scale / 2 + ts)))
     
     input3 = np.array(input2)
     input4 = np.array(input1)
     score = np.array([0.0])
     
     import time
     nbIt = 10000
     t = time.clock()
     for j in range(nbIt):
         timeShift = parallelProjections.project_atom(input1, input2, score, scale)
     print "C code Took",  time.clock() - t
     
     t = time.clock()
     for j in range(nbIt):
         Xcor = np.correlate(input4,  input3, "full")
     #    maxI = abs(Xcor).argmax()
     #    max = abs(Xcor).max()
     print "Numpy took ",  time.clock() - t
     
     
     #print "See if numpy correlate is efficient"
     #print score , abs(Xcor).max()
     #print timeShift , abs(Xcor).argmax() - 255
     
     #scoreOld = np.array([0.0])
     #timeShift2 = computeMCLT.project_atom(input4,input3 ,scoreOld)
     
     score3 = np.array([0.0])
     timeShift3 = parallelProjections.project_atom(input1, input2, score3, scale)
     
     #scoreOld2 = np.array([0.0])
     #timeShift4 = computeMCLT.project_atom(input4,input3 ,scoreOld2)
     
     #if not(scoreOld == score):
     #    print "ERROR: new score calculus isn't consistent with old one"
     #    print scoreOld , score
     #    print timeShift , timeShift2
     #    raise TypeError("ARf")
     #print score3 , scoreOld2
     #
     if ts == -timeShift:
         print "---- cross-correlation works!"
     else:
         print "--- ERROR : cross correlation did not pass!"
         print timeShift
         raise TypeError("ARf")
     print timeShift, score
     
     print sum(
         Atom_test.waveform * input1[scale / 2 - ts:scale / 2 - ts + Atom_test.length])
     
     
     
     
     #plt.figure()
     # plt.plot(np.concatenate( (np.concatenate((np.zeros(scale/2) , ref ) )
     # ,np.zeros(scale/2) ) ))
     #plt.plot(input1)
     #plt.plot(input2)
     #plt.legend(('origAtom','signal','newAtom'))
     #plt.show()
     #
     #k = 0
     #wf = parallelProjections.get_atom(scale ,  k)
     #wf_gab = parallelProjections.get_real_gabor_atom(scale ,  k , 0.45)
     #
     #gabAtom = pymp_GaborAtom.py_pursuit_GaborAtom(scale, 1, 1, k, 1, 0.45)
     #wf_gab_test = gabAtom.synthesize()
     
     #plt.figure()
     #plt.plot(wf_gab)
     #plt.plot(wf_gab_test , 'r:')
     #print sum((wf_gab_test - wf_gab)**2)
     #
     #print (sum(wf_gab**2)) , (sum(wf**2)) , (sum(wf_gab_test**2))
     #
     #if sum((wf_gab_test - wf_gab)**2) < 0.0000000001:
     #    print "--- atom construction test OK"
     
     
     
     
     print "Cleaning"
     if parallelProjections.clean_plans() != 1:
         print "Cleaning Stage Failed"
         raise TypeError("ARf")