コード例 #1
0
ファイル: dataloader.py プロジェクト: lonce/MTCRNN.Fork
    def __getitem__(self, index):
        whole_sequence = self.rand_sample(index)
        if self.is_seeded:
            sequence = whole_sequence
            target = whole_sequence
        else:
            assert len(whole_sequence) == self.seqLen + 1, str(
                len(whole_sequence))
            sequence = whole_sequence[:-1]
            target = whole_sequence[1:]

        if self.transform is not None:
            input = self.transform(sequence)

        if self.target_transform is not None:
            target = self.target_transform(target)

        if self.paramdir is not None and len(self.prop) > 0:
            pm = paramManager.paramManager(self.datadir, self.paramdir)
            params = pm.getParams(self.filelist[self.chooseFileIndex])
            paramdict = pm.resampleAllParams(params,
                                             self.seqLen,
                                             self.startoffset,
                                             self.startoffset +
                                             self.seqLenInSec,
                                             self.prop,
                                             verbose=False)
            paramtensor = self.param_transform(paramdict)
            input = torch.cat(
                (input, paramtensor), 1
            )  #input dim: (batch,seq,feature), batch dimension wrapped in automatically in Dataloader later

        return input, target
コード例 #2
0
    def __getitem__(self, index):
        chooseFileIndex, startoffset = choose_sequence_notsame(
            index + 1, self.fileDuration, self.srInSec, self.stride)
        whole_sequence = load_sequence(self.filelist, chooseFileIndex,
                                       startoffset, self.seqLen, self.sr)
        while whole_sequence is None:  #if len(whole_sequence) < self.seqLen+1, pick another random section
            index = np.random.randint(self.indexLen)
            chooseFileIndex, startoffset = choose_sequence_notsame(
                index + 1, self.fileDuration, self.srInSec, self.stride)
            whole_sequence = load_sequence(self.filelist, chooseFileIndex,
                                           startoffset, self.seqLen, self.sr)
        assert len(whole_sequence) == self.seqLen + 1, str(len(whole_sequence))
        whole_sequence = whole_sequence.reshape(-1, 1)
        sequence = whole_sequence[:-1]
        target = whole_sequence[1:]
        if self.transform is not None:
            input = self.transform(sequence)
        if self.target_transform is not None:
            target = self.target_transform(target)
        if self.paramdir is not None:
            pm = paramManager.paramManager(self.datadir, self.paramdir)
            params = pm.getParams(self.filelist[chooseFileIndex])
            paramdict = pm.resampleAllParams(params,
                                             self.seqLen,
                                             startoffset,
                                             startoffset + self.seqLenInSec,
                                             self.prop,
                                             verbose=False)
            if self.param_transform is not None:
                paramtensor = self.param_transform(paramdict)
                #print("param",paramtensor.shape)
                #input = {**input,**paramtensor}  #combine audio samples and parameters here
                input = torch.cat((input, paramtensor),
                                  1)  #input dim: (batch,seq,feature)
        else:
            if self.transform is None:
                input = sequence

        return input, target
コード例 #3
0
ファイル: dataloader.py プロジェクト: lonce/MTCRNN.Fork
    def rand_sample(self, index=None, verbose=False):
        if index is None:
            index = np.random.randint(self.indexLen)
        self.chooseFileIndex, self.startoffset = choose_sequence_notsame(
            index + 1, self.fileDuration, self.srInSec, self.stride)
        if self.is_seeded:
            self.startoffset = self.load_start / self.sr
            self.load_length = self.seqLen
        else:
            self.load_length = self.seqLen + 1
            while self.fileDuration[self.chooseFileIndex] < (
                    self.startoffset + self.seqLenInSec + 1 / self.sr):
                index = np.random.randint(self.indexLen)
                self.chooseFileIndex, self.startoffset = choose_sequence_notsame(
                    index + 1, self.fileDuration, self.srInSec, self.stride)
        if verbose:
            print('loading part of file:', self.filelist[self.chooseFileIndex],
                  'starting at', self.startoffset)

        pm = paramManager.paramManager(self.datadir, self.paramdir)
        self.params = pm.getParams(self.filelist[self.chooseFileIndex])
        generatedict = pm.resampleAllParams(self.params,
                                            self.load_length,
                                            self.startoffset,
                                            self.startoffset +
                                            self.seqLenInSec,
                                            self.generate,
                                            verbose=False)
        if len(self.prop) > 0:
            paramdict = pm.resampleAllParams(self.params,
                                             self.load_length,
                                             self.startoffset,
                                             self.startoffset +
                                             self.seqLenInSec,
                                             self.prop,
                                             verbose=False)

        return generatedict, paramdict
コード例 #4
0
ファイル: spectset2snd.py プロジェクト: lonce/sonyGanFork
def generate(parser):
    args = parser.parse_args()

    argsObj = vars(args)
    print(f"generate args: {argsObj}")

    model, config, model_name = load_model_checkp(**vars(args))
    latentDim = model.config.categoryVectorDim_G

    # We load a dummy data loader for post-processing
    postprocess = AudioPreprocessor(
        **config['transformConfig']).get_postprocessor()
    #### I WANT TO ADD NORMALIZATION HERE  ######################
    print(f"postprocess: {postprocess}")

    # Create output evaluation dir
    output_dir = mkdir_in_path(args.dir, f"generation_tests")
    output_dir = mkdir_in_path(output_dir, model_name)
    output_dir = mkdir_in_path(output_dir, "2D_spectset")
    output_dir = mkdir_in_path(output_dir,
                               datetime.now().strftime('%Y-%m-%d_%H.%M'))

    gen_batch, latents = torch.load(argsObj["gen_batch"])

    interp_steps0 = int(argsObj["d0"])
    interp_steps0norm = interp_steps0 - 1  # because the batch generater will spread the steps out to include both endpoints

    interp_steps1 = int(argsObj["d1"])
    interp_steps1norm = interp_steps1 - 1  # because the batch generater will spread the steps out to include both endpoints

    usePM = argsObj["pm"]

    g = list(gen_batch)

    assert interp_steps0 * interp_steps1 == len(
        g
    ), f"product of d0, d1 interpolation steps({interp_steps0},{interp_steps1}) != batch length ({len(g)})"

    audio_out = map(postprocess, gen_batch)

    if not usePM:  #then just output as usual, including option to write latents if provided
        saveAudioBatch(audio_out,
                       path=output_dir,
                       basename='test_2D4pt',
                       sr=config["transformConfig"]["sample_rate"],
                       latents=latents)

    else:  # save paramManager files, (and don't write latents separately)
        data = list(audio_out)  #LW it was a map, make it a list
        zdata = zip(
            data,
            latents)  #zip so we can enumerate through pairs of data/latents

        vstep = -1  # gets incremented in loop

        #d1nvar=argsObj["d1nvar"]
        d1nvar = 1  # no variations for this spectset generation

        rowlength = interp_steps0 * d1nvar
        print(f'rowlength is {rowlength}')

        for k, (audio, params) in enumerate(zdata):
            istep = int(
                k / rowlength
            )  #the outer counter, orthogonal to the two lines defining the submanifold

            j = k % rowlength
            jstep = int(j / d1nvar)
            vstep = (vstep + 1) % d1nvar

            #print(f'doing row {istep}, col {jstep}, and variation {vstep}')

            if type(audio) != np.ndarray:
                audio = np.array(audio, float)

            path = output_dir
            basename = 'test_spectset2snd'
            sr = config["transformConfig"]["sample_rate"]

            #foo=f'{basename}_{jstep}_{vstep}.wav'

            out_path = os.path.join(
                path, f'{basename}_d1.{istep}_d0.{jstep}_v.{vstep}.wav')
            # paramManager, create
            pm = paramManager.paramManager(
                out_path, output_dir
            )  ##-----------   paramManager  interface ------------------##
            #param_out_path = os.path.join(path, f'{basename}_{i}.params')
            pm.initParamFiles(overwrite=True)

            if not os.path.exists(out_path):
                #write_wav(out_path, audio.astype(float), sr)
                sf.write(out_path, audio.astype(float), sr)

                duration = len(audio.astype(float)) / float(sr)
                #print(f"duration is {duration}")
                if latents != None:
                    #pm.addParam(out_path, "dim1", [0.0,duration], [(p-minpitch)/pitchrange,(p-minpitch)/pitchrange], units="norm, midip in[58,70]", nvals=0, minval='null', maxval='null')
                    pm.addParam(
                        out_path,
                        "dim0", [0.0, duration],
                        [jstep / interp_steps0norm, jstep / interp_steps0norm],
                        units=f'norm, interp steps in[0,{interp_steps0}]',
                        nvals=interp_steps0,
                        minval='null',
                        maxval='null')
                    if interp_steps1norm > 0:  #else just doing 1D interpolation
                        pm.addParam(
                            out_path,
                            "dim1", [0.0, duration], [
                                istep / interp_steps1norm,
                                istep / interp_steps1norm
                            ],
                            units=f'norm, interp steps in[0,{interp_steps1}]',
                            nvals=interp_steps1,
                            minval='null',
                            maxval='null')

                    segments = 11  # to include a full segment for each value including endpoints
                    envTimes, envVals = makesteps(
                        np.linspace(0, duration, segments + 1, True),
                        np.linspace(
                            0, 1, segments,
                            True))  #need one extra time to flank each value
                    pm.addParam(out_path,
                                "envPt",
                                envTimes,
                                envVals,
                                units=f"norm, duration in[0,{duration}]",
                                nvals=0,
                                minval='null',
                                maxval='null')

                    # write paramfile
                    #torch.save(params, param_out_path)
                    #np.savetxt(txt_param_out_path, params.cpu().numpy())
            else:
                print(f"saveAudioBatch: File {out_path} exists. Skipping...")
                continue

    print(f"GRID data output path/pattern: {out_path}\n")
コード例 #5
0
def generate(parser):
    args = parser.parse_args()

    argsObj=vars(args)
    print(f"generate args: {argsObj}")

    model, config, model_name = load_model_checkp(**vars(args))
    latentDim = model.config.categoryVectorDim_G

    # We load a dummy data loader for post-processing
    postprocess = AudioPreprocessor(**config['transformConfig']).get_postprocessor()
    #### I WANT TO ADD NORMALIZATION HERE  ######################
    print(f"postprocess: {postprocess}")

    # Create output evaluation dir
    output_dir = mkdir_in_path(args.dir, f"generation_tests")
    output_dir = mkdir_in_path(output_dir, model_name)
    output_dir = mkdir_in_path(output_dir, "2D")
    output_dir = mkdir_in_path(output_dir, datetime.now().strftime('%Y-%m-%d %H:%M'))




    
    # Create evaluation manager
    eval_manager = StyleGEvaluationManager(model, n_gen=2)

    z0=torch.load(argsObj["z0"])
    z1=torch.load(argsObj["z1"])

    minpitch=int(argsObj["p0"])
    maxpitch=int(argsObj["p1"])
    pitchrange=maxpitch-minpitch
    if pitchrange < 1 : 
        pitchrange=1

    interp_steps1=int(argsObj["d1"])
    interp_steps1norm=interp_steps1 -1 # because the batch generater will spread the steps out to include both endpoints

    usePM=argsObj["pm"]
    print(f"interp_steps1 is {interp_steps1}, and usePM (use ParamManager) is {usePM}")

    for p in range(minpitch, maxpitch+1) :

        #######   ---- with conditioned pitch
        # linear
        #gen_batch, latents = eval_manager.test_single_pitch_latent_interpolation(p_val=p, z0=z0, z1=z1, steps=10)
        #sperical
        #gen_batch, latents = eval_manager.qslerp(pitch=p, z0=z0, z1=z1, steps=10)
        #staggred
        gen_batch, latents = eval_manager.test_single_pitch_latent_staggered_interpolation(p_val=p, z0=z0, z1=z1, steps=interp_steps1, d1nvar=argsObj["d1nvar"], d1var=argsObj["d1var"])




        audio_out = map(postprocess, gen_batch)

        if not usePM :  #then just output as usual, including option to write latents if provided
            saveAudioBatch(audio_out,
                           path=output_dir,
                           basename='test_pitch_sweep'+ "_"+str(p), 
                           sr=config["transformConfig"]["sample_rate"],
                           latents=latents)

        else:                       # save paramManager files, (and don't write latents separately)
            data=list(audio_out) #LW it was a map, make it a list
            zdata=zip(data,latents) #zip so we can enumerate through pairs of data/latents

            istep=0
            vstep=0

            for i, (audio, params) in enumerate(zdata) :

                istep=int(i/argsObj["d1nvar"])
                vstep=(vstep+1)%argsObj["d1nvar"]

                if type(audio) != np.ndarray:
                    audio = np.array(audio, float)

                path=output_dir
                basename='test_pitch_sweep'+ "_"+str(p) 
                sr=config["transformConfig"]["sample_rate"]

                #foo=f'{basename}_{istep}_{vstep}.wav'

                out_path = os.path.join(path, f'{basename}_{istep}_{vstep}.wav')
                # paramManager, create 
                pm=paramManager.paramManager(out_path, output_dir)  ##-----------   paramManager  interface ------------------##
                #param_out_path = os.path.join(path, f'{basename}_{i}.params')
                pm.initParamFiles(overwrite=True)


                if not os.path.exists(out_path):
                    #write_wav(out_path, audio.astype(float), sr)
                    sf.write(out_path, audio.astype(float), sr)

                    duration=len(audio.astype(float))/float(sr)
                    #print(f"duration is {duration}")
                    if latents != None :
                        pm.addParam(out_path, "pitch", [0.0,duration], [(p-minpitch)/pitchrange,(p-minpitch)/pitchrange], units="norm, midip in[58,70]", nvals=0, minval='null', maxval='null')
                        pm.addParam(out_path, "instID", [0.0,duration], [istep/interp_steps1norm,istep/interp_steps1norm], units="norm, interp steps in[0,10]", nvals=10, minval='null', maxval='null')
                        #pm.addParam(out_path, "envPt", [0.0,duration], [0,1.0], units=f"norm, duration in[0,{duration}]", nvals=0, minval='null', maxval='null')
                    
                        segments=11 # to include a full segment for each value including endpoints
                        envTimes, envVals=makesteps(np.linspace(0,duration,segments+1,True) , np.linspace(0,1,segments,True)) #need one extra time to flank each value
                        pm.addParam(out_path, "envPt", envTimes, envVals, units=f"norm, duration in[0,{duration}]", nvals=0, minval='null', maxval='null')

                        # write paramfile 
                        #torch.save(params, param_out_path)
                        #np.savetxt(txt_param_out_path, params.cpu().numpy())
                else:
                    print(f"saveAudioBatch: File {out_path} exists. Skipping...")
                    continue





    print("FINISHED!\n")
コード例 #6
0
def generate(parser):
    args = parser.parse_args()

    argsObj=vars(args)
    print(f"generate args: {argsObj}")

    model, config, model_name = load_model_checkp(**vars(args))
    latentDim = model.config.categoryVectorDim_G

    # We load a dummy data loader for post-processing
    postprocess = AudioPreprocessor(**config['transformConfig']).get_postprocessor()
    #### I WANT TO ADD NORMALIZATION HERE  ######################
    print(f"postprocess: {postprocess}")

    # Create output evaluation dir
    output_dir = mkdir_in_path(args.dir, f"generation_tests")
    output_dir = mkdir_in_path(output_dir, model_name)
    output_dir = mkdir_in_path(output_dir, "2D")
    output_dir = mkdir_in_path(output_dir, datetime.now().strftime('%Y-%m-%d %H:%M'))


    # Create evaluation manager
    eval_manager = StyleGEvaluationManager(model, n_gen=2)

    z0=torch.load(argsObj["z0"])
    z1=torch.load(argsObj["z1"])
    if argsObj["z2"] == None :
        z2=0
    else :
        z2=torch.load(argsObj["z2"])
    if argsObj["z3"] == None : 
        z3 = 0
    else :
        z3=torch.load(argsObj["z3"])


    interp_steps0=int(argsObj["d0"])
    interp_steps0norm=interp_steps0 -1 # because the batch generater will spread the steps out to include both endpoints

    interp_steps1=int(argsObj["d1"])
    interp_steps1norm=interp_steps1 -1 # because the batch generater will spread the steps out to include both endpoints

    usePM=argsObj["pm"]
    print(f"interp_steps0 is {interp_steps0}, interp_steps1 is {interp_steps1}, and usePM (use ParamManager) is {usePM}")



    #######   ---- unconditioned 
    gen_batch, latents = eval_manager.unconditioned_linear_interpolation(line0z0=z0, line0z1=z1, line1z0=z2, line1z1=z3, d0steps=interp_steps0, d1steps=interp_steps1, d1nvar=argsObj["d1nvar"], d1var=argsObj["d1var"])

    g=list(gen_batch)
    #for k in length

    audio_out = map(postprocess, gen_batch)


        #save the .pt file no matter what since we may want to use them to zoom in, resample, or whatever.
    if not usePM :  
        saveAudioBatch(audio_out,
                   path=output_dir,
                   basename='test_2D4pt', 
                   sr=config["transformConfig"]["sample_rate"],
                   latents=latents)

    else :                      # save paramManager files, (and don't write latents separately)
        data=list(audio_out) #LW it was a map, make it a list
        zdata=zip(data,latents) #zip so we can enumerate through pairs of data/latents


        vstep=-1  # gets incremented in loop

        rowlength=interp_steps0*argsObj["d1nvar"]
        print(f'rowlength is {rowlength}')

        for k, (audio, params) in enumerate(zdata) :
            istep = int(k/rowlength)  #the outer counter, orthogonal to the two lines defining the submanifold

            j=k%rowlength
            jstep=int(j/argsObj["d1nvar"])
            vstep=(vstep+1)%argsObj["d1nvar"]

            print(f'doing row {istep}, col {jstep}, and variation {vstep}')

            if type(audio) != np.ndarray:
                audio = np.array(audio, float)

            path=output_dir
            basename='test_2D4pt' 
            sr=config["transformConfig"]["sample_rate"]

            #foo=f'{basename}_{jstep}_{vstep}.wav'

            out_path = os.path.join(path, f'{basename}_d1.{istep}_d0.{jstep}_v.{vstep}.wav')
            # paramManager, create 
            pm=paramManager.paramManager(out_path, output_dir)  ##-----------   paramManager  interface ------------------##
            #param_out_path = os.path.join(path, f'{basename}_{i}.params')
            pm.initParamFiles(overwrite=True)


            #also save pt files in case we want to use them to create other grids, scales, etc.
            pt_param_out_path = os.path.join(path, f'{basename}_d1.{istep}_d0.{jstep}_v.{vstep}.pt')
            torch.save(params, pt_param_out_path)

            if not os.path.exists(out_path):
                #write_wav(out_path, audio.astype(float), sr)
                sf.write(out_path, audio.astype(float), sr)

                duration=len(audio.astype(float))/float(sr)
                #print(f"duration is {duration}")
                if latents != None :
                    #pm.addParam(out_path, "dim1", [0.0,duration], [(p-minpitch)/pitchrange,(p-minpitch)/pitchrange], units="norm, midip in[58,70]", nvals=0, minval='null', maxval='null')
                    pm.addParam(out_path, "dim0", [0.0,duration], [jstep/interp_steps0norm,jstep/interp_steps0norm], units=f'norm, interp steps in[0,{interp_steps0}]', nvals=interp_steps0, minval='null', maxval='null')
                    if interp_steps1norm > 0 : #else just doing 1D interpolation
                        pm.addParam(out_path, "dim1", [0.0,duration], [istep/interp_steps1norm,istep/interp_steps1norm], units=f'norm, interp steps in[0,{interp_steps1}]', nvals=interp_steps1, minval='null', maxval='null')
                
                    segments=11 # to include a full segment for each value including endpoints
                    envTimes, envVals=makesteps(np.linspace(0,duration,segments+1,True) , np.linspace(0,1,segments,True)) #need one extra time to flank each value
                    pm.addParam(out_path, "envPt", envTimes, envVals, units=f"norm, duration in[0,{duration}]", nvals=0, minval='null', maxval='null')

                    # write paramfile 
                    #torch.save(params, param_out_path)
                    #np.savetxt(txt_param_out_path, params.cpu().numpy())
            else:
                print(f"saveAudioBatch: File {out_path} exists. Skipping...")
                continue