def generate(parser): parser.add_argument("--val", dest="val", action='store_true') parser.add_argument("-c", dest="config", type=str) args = parser.parse_args() config = read_json(args.config) # We load a dummy data loader for post-processing transform_config = config['transform_config'] loader_config = config['loader_config'] processor = AudioProcessor(**transform_config) postprocess = processor.get_postprocessor() assert os.path.exists(args.outdir), "Output path does not exist" # Create output evaluation dir trval = 'val' if args.val else 'train' output_dir = mkdir_in_path(args.outdir, f"true_sample_{config['name']}") output_dir = mkdir_in_path( output_dir, f"{trval}_{args.n_gen}_{datetime.now().strftime('%Y-%m-%d_%H_%M')}") dbname = loader_config['dbname'] loader = get_data_loader(dbname)(name=dbname + '_' + transform_config['transform'], preprocessing=processor, **loader_config) if args.val: data, _ = loader.get_validation_set(args.n_gen) else: data = random.sample(loader.data, k=args.n_gen) audio_out = map(postprocess, data) saveAudioBatch(audio_out, path=output_dir, basename='true_sample', sr=config["transform_config"]["sample_rate"]) print("FINISHED!\n")
def generate(parser): args = parser.parse_args() model, config, model_name = load_model_checkp(**vars(args)) latentDim = model.config.categoryVectorDim_G # check if conditioning attribute is present if 'attribute_list' in config['loaderConfig'].keys(): condition_parameter = config['loaderConfig']['attribute_list'][0] else: print("There is no conditioning parameter ('attribute_list' is empty). Exiting!") exit(0) # We load a dummy data loader for post-processing postprocess = AudioPreprocessor(**config['transformConfig']).get_postprocessor() # Create output evaluation dir output_dir = mkdir_in_path(args.dir, f"generation_tests") output_dir = mkdir_in_path(output_dir, model_name) output_dir = mkdir_in_path(output_dir, "randz_constc") output_dir = mkdir_in_path(output_dir, datetime.now().strftime('%Y-%m-%d %H:%M')) # Create evaluation manager p_val = 0.1111111111111111 #75 eval_manager = StyleGEvaluationManager(model, n_gen=20) gen_batch, latents = eval_manager.test_single_pitch_random_z(condition_parameter,p_val) output_path = mkdir_in_path(output_dir, f"one_z_pitch_sweep_"+str(p_val)) audio_out = map(postprocess, gen_batch) saveAudioBatch(audio_out, path=output_path, basename='test_pitch_sweep', sr=config["transformConfig"]["sample_rate"], latents=latents) print("FINISHED!\n")
def generate(parser): args = parser.parse_args() model, config, model_name = load_model_checkp(**vars(args)) latentDim = model.config.categoryVectorDim_G # We load a dummy data loader for post-processing postprocess = AudioPreprocessor(**config['transformConfig']).get_postprocessor() # Create output evaluation dir output_dir = mkdir_in_path(args.dir, f"generation_tests") output_dir = mkdir_in_path(output_dir, model_name) output_dir = mkdir_in_path(output_dir, "interpolation") output_dir = mkdir_in_path(output_dir, datetime.now().strftime('%Y-%m-%d %H:%M')) # Create evaluation manager eval_manager = StyleGEvaluationManager(model, n_gen=100) gen_batch = eval_manager.test_single_pitch_latent_interpolation() output_path = mkdir_in_path(output_dir, f"one_z_pitch_sweep") audio_out = map(postprocess, gen_batch) saveAudioBatch(audio_out, path=output_path, basename='test_pitch_sweep', sr=config["transformConfig"]["sample_rate"]) print("FINISHED!\n")
def run_tests_evaluation_and_visualization(self, scale): scale_output_dir = mkdir_in_path(self.output_dir, f'scale_{scale}') iter_output_dir = mkdir_in_path(scale_output_dir, f'iter_{self.iter}') from utils.utils import saveAudioBatch D_true, true_emb, \ D_fake, fake_emb, \ D_fake_avg, fake_avg_emb, \ true, fake, fake_avg = self.test_GAN() if self.modelConfig.ac_gan: output_dir = mkdir_in_path(iter_output_dir, 'classification_report') if not hasattr(self, 'cls_vis'): from visualization.visualization import AttClassifVisualizer self.cls_vis = AttClassifVisualizer( output_path=output_dir, env=self.modelLabel, save_figs=True, attributes=self.loader.header['attributes'].keys(), att_val_dict=self.loader.header['attributes']) self.cls_vis.output_path = output_dir self.cls_vis.publish( self.ref_labels_str, D_true, name=f'{scale}_true', title=f'scale {scale} True data') self.cls_vis.publish( self.ref_labels_str, D_fake, name=f'{scale}_fake', title=f'scale {scale} Fake data') if self.save_gen: output_dir = mkdir_in_path(iter_output_dir, 'generation') saveAudioBatch( self.loader.postprocess(fake), path=output_dir, basename=f'gen_audio_scale_{scale}') if self.vis_manager != None: output_dir = mkdir_in_path(iter_output_dir, 'audio_plots') if scale >= self.n_scales -2: self.vis_manager.renderAudio = True self.vis_manager.set_postprocessing( self.loader.get_postprocessor()) self.vis_manager.publish( true[:5], # labels=D_true[:][:5], name=f'real_scale_{scale}', output_dir=output_dir) self.vis_manager.publish( fake[:5], # labels=D_fake[0][:5], name=f'gen_scale_{scale}', output_dir=output_dir)
def generate(parser): args = parser.parse_args() device = get_device() model, config, model_name = load_model_checkp(**vars(args)) latentDim = model.config.noiseVectorDim transform_config = config['transform_config'] loader_config = config['loader_config'] # We load a dummy data loader for post-processing processor = AudioProcessor(**transform_config) dbname = loader_config['dbname'] loader_config["criteria"]["size"] = 1000 loader = get_data_loader(dbname)( name=dbname + '_' + transform_config['transform'], preprocessing=processor, **loader_config) label = torch.Tensor(random.sample(loader.metadata, k=1)) labels, _ = model.buildNoiseData(1, inputLabels=label, skipAtts=True) z = labels.repeat(args.n_gen, 1) z_noise = radial_interpolation(latentDim, args.n_gen) z[:, :latentDim] = z_noise gnet = model.getOriginalG() gnet.eval() with torch.no_grad(): out = gnet(z.to(device)).detach().cpu() audio_out = loader.postprocess(out) # Create output evaluation dir output_dir = mkdir_in_path(args.dir, f"generation_tests") output_dir = mkdir_in_path(output_dir, model_name) output_dir = mkdir_in_path(output_dir, "radial_interpolation") output_dir = mkdir_in_path(output_dir, datetime.now().strftime('%Y-%m-%d %H:%M')) saveAudioBatch(audio_out, path=output_dir, basename='test_radial_interpolation', sr=config["transform_config"]["sample_rate"]) print("FINISHED!\n")
def test(parser): parser.add_argument('--size', dest='size', default=1000, type=int) parser.add_argument('--gen', dest='gen', action='store_true') args = parser.parse_args() kargs = vars(args) device = get_device() model, config, model_name = load_model_checkp(**kargs) transform_config = config['transform_config'] loader_config = config['loader_config'] d_net = model.getOriginalD().to(device) g_net = model.netG.to(device).eval() d_net.eval() # We load a dummy data loader for post-processing processor = AudioProcessor(**transform_config) dbname = loader_config['dbname'] loader_config["criteria"]["size"] = args.size loader = get_data_loader(dbname)(name=dbname + '_' + transform_config['transform'], preprocessing=processor, **loader_config) att_dict = loader.header['attributes'] criterion = ACGANCriterion(att_dict) # Create output evaluation dir output_dir = mkdir_in_path(args.dir, f"tests_D") output_dir = mkdir_in_path(output_dir, model_name) output_dir = mkdir_in_path(output_dir, datetime.now().strftime('%Y-%m-%d %H:%M')) batch_size = min(args.batch_size, len(loader)) data_loader = DataLoader(loader, batch_size=batch_size, shuffle=True, num_workers=2) data_iter = iter(data_loader) iter_bar = trange(len(data_iter), desc='epoch-loop') D_loss = [] data = [] for j in iter_bar: with torch.no_grad(): input, target = data_iter.next() if args.gen: z, _ = model.buildNoiseData(target.size(0), inputLabels=target, skipAtts=True) input = g_net(z) pred = d_net(input.float().to(device)).cpu() clf_loss = criterion.getCriterion(pred, target.cpu()) # get D loss D_loss.append(pred[:, -1]) data.append(input.cpu()) state_msg = f'Iter: {j}; avg D_nloss: {sum(pred[:, -1])/len(pred[:, -1]):0.3f}, classif_loss: {clf_loss:0.3f}' iter_bar.set_description(state_msg) # Create evaluation manager D_loss = torch.cat(D_loss) data = torch.cat(data) D_loss, idx = abs(D_loss).sort() audio_out = loader.postprocess(data[idx[:20]]) saveAudioBatch(audio_out, path=output_dir, basename='low_W-distance', sr=config["transform_config"]["sample_rate"]) audio_out = loader.postprocess(data[idx[-20:]]) saveAudioBatch(audio_out, path=output_dir, basename='high_W-distance', sr=config["transform_config"]["sample_rate"]) print("FINISHED!\n")
def generate(parser): args = parser.parse_args() argsObj = vars(args) print(f"generate args: {argsObj}") model, config, model_name = load_model_checkp(**vars(args)) latentDim = model.config.categoryVectorDim_G # We load a dummy data loader for post-processing postprocess = AudioPreprocessor( **config['transformConfig']).get_postprocessor() #### I WANT TO ADD NORMALIZATION HERE ###################### print(f"postprocess: {postprocess}") # Create output evaluation dir output_dir = mkdir_in_path(args.dir, f"generation_tests") output_dir = mkdir_in_path(output_dir, model_name) output_dir = mkdir_in_path(output_dir, "2D_spectset") output_dir = mkdir_in_path(output_dir, datetime.now().strftime('%Y-%m-%d_%H.%M')) gen_batch, latents = torch.load(argsObj["gen_batch"]) interp_steps0 = int(argsObj["d0"]) interp_steps0norm = interp_steps0 - 1 # because the batch generater will spread the steps out to include both endpoints interp_steps1 = int(argsObj["d1"]) interp_steps1norm = interp_steps1 - 1 # because the batch generater will spread the steps out to include both endpoints usePM = argsObj["pm"] g = list(gen_batch) assert interp_steps0 * interp_steps1 == len( g ), f"product of d0, d1 interpolation steps({interp_steps0},{interp_steps1}) != batch length ({len(g)})" audio_out = map(postprocess, gen_batch) if not usePM: #then just output as usual, including option to write latents if provided saveAudioBatch(audio_out, path=output_dir, basename='test_2D4pt', sr=config["transformConfig"]["sample_rate"], latents=latents) else: # save paramManager files, (and don't write latents separately) data = list(audio_out) #LW it was a map, make it a list zdata = zip( data, latents) #zip so we can enumerate through pairs of data/latents vstep = -1 # gets incremented in loop #d1nvar=argsObj["d1nvar"] d1nvar = 1 # no variations for this spectset generation rowlength = interp_steps0 * d1nvar print(f'rowlength is {rowlength}') for k, (audio, params) in enumerate(zdata): istep = int( k / rowlength ) #the outer counter, orthogonal to the two lines defining the submanifold j = k % rowlength jstep = int(j / d1nvar) vstep = (vstep + 1) % d1nvar #print(f'doing row {istep}, col {jstep}, and variation {vstep}') if type(audio) != np.ndarray: audio = np.array(audio, float) path = output_dir basename = 'test_spectset2snd' sr = config["transformConfig"]["sample_rate"] #foo=f'{basename}_{jstep}_{vstep}.wav' out_path = os.path.join( path, f'{basename}_d1.{istep}_d0.{jstep}_v.{vstep}.wav') # paramManager, create pm = paramManager.paramManager( out_path, output_dir ) ##----------- paramManager interface ------------------## #param_out_path = os.path.join(path, f'{basename}_{i}.params') pm.initParamFiles(overwrite=True) if not os.path.exists(out_path): #write_wav(out_path, audio.astype(float), sr) sf.write(out_path, audio.astype(float), sr) duration = len(audio.astype(float)) / float(sr) #print(f"duration is {duration}") if latents != None: #pm.addParam(out_path, "dim1", [0.0,duration], [(p-minpitch)/pitchrange,(p-minpitch)/pitchrange], units="norm, midip in[58,70]", nvals=0, minval='null', maxval='null') pm.addParam( out_path, "dim0", [0.0, duration], [jstep / interp_steps0norm, jstep / interp_steps0norm], units=f'norm, interp steps in[0,{interp_steps0}]', nvals=interp_steps0, minval='null', maxval='null') if interp_steps1norm > 0: #else just doing 1D interpolation pm.addParam( out_path, "dim1", [0.0, duration], [ istep / interp_steps1norm, istep / interp_steps1norm ], units=f'norm, interp steps in[0,{interp_steps1}]', nvals=interp_steps1, minval='null', maxval='null') segments = 11 # to include a full segment for each value including endpoints envTimes, envVals = makesteps( np.linspace(0, duration, segments + 1, True), np.linspace( 0, 1, segments, True)) #need one extra time to flank each value pm.addParam(out_path, "envPt", envTimes, envVals, units=f"norm, duration in[0,{duration}]", nvals=0, minval='null', maxval='null') # write paramfile #torch.save(params, param_out_path) #np.savetxt(txt_param_out_path, params.cpu().numpy()) else: print(f"saveAudioBatch: File {out_path} exists. Skipping...") continue print(f"GRID data output path/pattern: {out_path}\n")
def generate(parser): parser.add_argument("--val", dest="val", action='store_true') parser.add_argument("--train", dest="train", action='store_true') parser.add_argument("--avg-net", dest="avg_net", action='store_true') parser.add_argument("--name", dest="name", default="") parser.add_argument("--dump-labels", dest="dump_labels", action="store_true") args = parser.parse_args() model, config, model_name = load_model_checkp(**vars(args)) latentDim = model.config.categoryVectorDim_G # We load a dummy data loader for post-processing transform_config = config['transform_config'] loader_config = config['loader_config'] processor = AudioProcessor(**transform_config) postprocess = processor.get_postprocessor() # Create output evaluation dir if args.val: name = args.name + '_val_labels' elif args.train: name = args.name + '_train_labels' else: name = args.name + '_rand_labels' if args.outdir == "": args.outdir = args.dir output_dir = mkdir_in_path(args.outdir, f"generation_samples") output_dir = mkdir_in_path(output_dir, model_name) output_dir = mkdir_in_path(output_dir, "random") output_dir = mkdir_in_path( output_dir, name + '_' + datetime.now().strftime('%Y-%m-%d_%H_%M')) dbname = loader_config['dbname'] loader = get_data_loader(dbname)(name=dbname + '_' + transform_config['transform'], preprocessing=processor, **loader_config) labels = None if model.config.ac_gan: if args.val: val_set = loader.get_validation_set()[1] perm = torch.randperm(val_set.size(0)) idx = perm[:args.n_gen] labels = val_set[idx] elif args.train: labels = torch.Tensor(random.sample(loader.metadata, k=args.n_gen)) else: labels = loader.get_random_labels(args.n_gen) z, _ = model.buildNoiseData(args.n_gen, inputLabels=labels, skipAtts=True) data_batch = [] with torch.no_grad(): for i in range(int(np.ceil(args.n_gen / args.batch_size))): data_batch.append( model.test(z[i * args.batch_size:args.batch_size * (i + 1)], toCPU=True, getAvG=args.avg_net).cpu()) data_batch = torch.cat(data_batch, dim=0) audio_out = map(postprocess, data_batch) saveAudioBatch(audio_out, path=output_dir, basename='sample', sr=config["transform_config"]["sample_rate"]) if args.dump_labels: with open(f"{output_dir}/params_in.txt", "a") as f: for i in tqdm(range(args.n_gen), desc='Creating Samples'): params = labels[i, :-1].tolist() f.writelines([f"{i}, {list(params)}\n"]) print("FINISHED!\n")
def generate(parser): args = parser.parse_args() argsObj=vars(args) print(f"generate args: {argsObj}") model, config, model_name = load_model_checkp(**vars(args)) latentDim = model.config.categoryVectorDim_G # We load a dummy data loader for post-processing postprocess = AudioPreprocessor(**config['transformConfig']).get_postprocessor() #### I WANT TO ADD NORMALIZATION HERE ###################### print(f"postprocess: {postprocess}") # Create output evaluation dir output_dir = mkdir_in_path(args.dir, f"generation_tests") output_dir = mkdir_in_path(output_dir, model_name) output_dir = mkdir_in_path(output_dir, "2D") output_dir = mkdir_in_path(output_dir, datetime.now().strftime('%Y-%m-%d %H:%M')) # Create evaluation manager eval_manager = StyleGEvaluationManager(model, n_gen=2) z0=torch.load(argsObj["z0"]) z1=torch.load(argsObj["z1"]) minpitch=int(argsObj["p0"]) maxpitch=int(argsObj["p1"]) pitchrange=maxpitch-minpitch if pitchrange < 1 : pitchrange=1 interp_steps1=int(argsObj["d1"]) interp_steps1norm=interp_steps1 -1 # because the batch generater will spread the steps out to include both endpoints usePM=argsObj["pm"] print(f"interp_steps1 is {interp_steps1}, and usePM (use ParamManager) is {usePM}") for p in range(minpitch, maxpitch+1) : ####### ---- with conditioned pitch # linear #gen_batch, latents = eval_manager.test_single_pitch_latent_interpolation(p_val=p, z0=z0, z1=z1, steps=10) #sperical #gen_batch, latents = eval_manager.qslerp(pitch=p, z0=z0, z1=z1, steps=10) #staggred gen_batch, latents = eval_manager.test_single_pitch_latent_staggered_interpolation(p_val=p, z0=z0, z1=z1, steps=interp_steps1, d1nvar=argsObj["d1nvar"], d1var=argsObj["d1var"]) audio_out = map(postprocess, gen_batch) if not usePM : #then just output as usual, including option to write latents if provided saveAudioBatch(audio_out, path=output_dir, basename='test_pitch_sweep'+ "_"+str(p), sr=config["transformConfig"]["sample_rate"], latents=latents) else: # save paramManager files, (and don't write latents separately) data=list(audio_out) #LW it was a map, make it a list zdata=zip(data,latents) #zip so we can enumerate through pairs of data/latents istep=0 vstep=0 for i, (audio, params) in enumerate(zdata) : istep=int(i/argsObj["d1nvar"]) vstep=(vstep+1)%argsObj["d1nvar"] if type(audio) != np.ndarray: audio = np.array(audio, float) path=output_dir basename='test_pitch_sweep'+ "_"+str(p) sr=config["transformConfig"]["sample_rate"] #foo=f'{basename}_{istep}_{vstep}.wav' out_path = os.path.join(path, f'{basename}_{istep}_{vstep}.wav') # paramManager, create pm=paramManager.paramManager(out_path, output_dir) ##----------- paramManager interface ------------------## #param_out_path = os.path.join(path, f'{basename}_{i}.params') pm.initParamFiles(overwrite=True) if not os.path.exists(out_path): #write_wav(out_path, audio.astype(float), sr) sf.write(out_path, audio.astype(float), sr) duration=len(audio.astype(float))/float(sr) #print(f"duration is {duration}") if latents != None : pm.addParam(out_path, "pitch", [0.0,duration], [(p-minpitch)/pitchrange,(p-minpitch)/pitchrange], units="norm, midip in[58,70]", nvals=0, minval='null', maxval='null') pm.addParam(out_path, "instID", [0.0,duration], [istep/interp_steps1norm,istep/interp_steps1norm], units="norm, interp steps in[0,10]", nvals=10, minval='null', maxval='null') #pm.addParam(out_path, "envPt", [0.0,duration], [0,1.0], units=f"norm, duration in[0,{duration}]", nvals=0, minval='null', maxval='null') segments=11 # to include a full segment for each value including endpoints envTimes, envVals=makesteps(np.linspace(0,duration,segments+1,True) , np.linspace(0,1,segments,True)) #need one extra time to flank each value pm.addParam(out_path, "envPt", envTimes, envVals, units=f"norm, duration in[0,{duration}]", nvals=0, minval='null', maxval='null') # write paramfile #torch.save(params, param_out_path) #np.savetxt(txt_param_out_path, params.cpu().numpy()) else: print(f"saveAudioBatch: File {out_path} exists. Skipping...") continue print("FINISHED!\n")
def generate(parser): args = parser.parse_args() argsObj=vars(args) print(f"generate args: {argsObj}") model, config, model_name = load_model_checkp(**vars(args)) latentDim = model.config.categoryVectorDim_G # We load a dummy data loader for post-processing postprocess = AudioPreprocessor(**config['transformConfig']).get_postprocessor() #### I WANT TO ADD NORMALIZATION HERE ###################### print(f"postprocess: {postprocess}") # Create output evaluation dir output_dir = mkdir_in_path(args.dir, f"generation_tests") output_dir = mkdir_in_path(output_dir, model_name) output_dir = mkdir_in_path(output_dir, "2D") output_dir = mkdir_in_path(output_dir, datetime.now().strftime('%Y-%m-%d %H:%M')) # Create evaluation manager eval_manager = StyleGEvaluationManager(model, n_gen=2) z0=torch.load(argsObj["z0"]) z1=torch.load(argsObj["z1"]) if argsObj["z2"] == None : z2=0 else : z2=torch.load(argsObj["z2"]) if argsObj["z3"] == None : z3 = 0 else : z3=torch.load(argsObj["z3"]) interp_steps0=int(argsObj["d0"]) interp_steps0norm=interp_steps0 -1 # because the batch generater will spread the steps out to include both endpoints interp_steps1=int(argsObj["d1"]) interp_steps1norm=interp_steps1 -1 # because the batch generater will spread the steps out to include both endpoints usePM=argsObj["pm"] print(f"interp_steps0 is {interp_steps0}, interp_steps1 is {interp_steps1}, and usePM (use ParamManager) is {usePM}") ####### ---- unconditioned gen_batch, latents = eval_manager.unconditioned_linear_interpolation(line0z0=z0, line0z1=z1, line1z0=z2, line1z1=z3, d0steps=interp_steps0, d1steps=interp_steps1, d1nvar=argsObj["d1nvar"], d1var=argsObj["d1var"]) g=list(gen_batch) #for k in length audio_out = map(postprocess, gen_batch) #save the .pt file no matter what since we may want to use them to zoom in, resample, or whatever. if not usePM : saveAudioBatch(audio_out, path=output_dir, basename='test_2D4pt', sr=config["transformConfig"]["sample_rate"], latents=latents) else : # save paramManager files, (and don't write latents separately) data=list(audio_out) #LW it was a map, make it a list zdata=zip(data,latents) #zip so we can enumerate through pairs of data/latents vstep=-1 # gets incremented in loop rowlength=interp_steps0*argsObj["d1nvar"] print(f'rowlength is {rowlength}') for k, (audio, params) in enumerate(zdata) : istep = int(k/rowlength) #the outer counter, orthogonal to the two lines defining the submanifold j=k%rowlength jstep=int(j/argsObj["d1nvar"]) vstep=(vstep+1)%argsObj["d1nvar"] print(f'doing row {istep}, col {jstep}, and variation {vstep}') if type(audio) != np.ndarray: audio = np.array(audio, float) path=output_dir basename='test_2D4pt' sr=config["transformConfig"]["sample_rate"] #foo=f'{basename}_{jstep}_{vstep}.wav' out_path = os.path.join(path, f'{basename}_d1.{istep}_d0.{jstep}_v.{vstep}.wav') # paramManager, create pm=paramManager.paramManager(out_path, output_dir) ##----------- paramManager interface ------------------## #param_out_path = os.path.join(path, f'{basename}_{i}.params') pm.initParamFiles(overwrite=True) #also save pt files in case we want to use them to create other grids, scales, etc. pt_param_out_path = os.path.join(path, f'{basename}_d1.{istep}_d0.{jstep}_v.{vstep}.pt') torch.save(params, pt_param_out_path) if not os.path.exists(out_path): #write_wav(out_path, audio.astype(float), sr) sf.write(out_path, audio.astype(float), sr) duration=len(audio.astype(float))/float(sr) #print(f"duration is {duration}") if latents != None : #pm.addParam(out_path, "dim1", [0.0,duration], [(p-minpitch)/pitchrange,(p-minpitch)/pitchrange], units="norm, midip in[58,70]", nvals=0, minval='null', maxval='null') pm.addParam(out_path, "dim0", [0.0,duration], [jstep/interp_steps0norm,jstep/interp_steps0norm], units=f'norm, interp steps in[0,{interp_steps0}]', nvals=interp_steps0, minval='null', maxval='null') if interp_steps1norm > 0 : #else just doing 1D interpolation pm.addParam(out_path, "dim1", [0.0,duration], [istep/interp_steps1norm,istep/interp_steps1norm], units=f'norm, interp steps in[0,{interp_steps1}]', nvals=interp_steps1, minval='null', maxval='null') segments=11 # to include a full segment for each value including endpoints envTimes, envVals=makesteps(np.linspace(0,duration,segments+1,True) , np.linspace(0,1,segments,True)) #need one extra time to flank each value pm.addParam(out_path, "envPt", envTimes, envVals, units=f"norm, duration in[0,{duration}]", nvals=0, minval='null', maxval='null') # write paramfile #torch.save(params, param_out_path) #np.savetxt(txt_param_out_path, params.cpu().numpy()) else: print(f"saveAudioBatch: File {out_path} exists. Skipping...") continue