def main(): #init parameter config = Config() batch_size = config.batch_size epoch = config.epoch optimizer = config.optimizer from_scratch = config.from_scratch random_seed = config.random_seed num_match = config.num_match num_non_match = config.num_non_match non_match_margin = config.non_match_margin non_match_distance_clip = config.non_match_distance_clip model = config.model data = dataset() data.x_y_split(random_seed=random_seed) steps_per_epoch = data.train_size / batch_size + 1 BASE_DIR = PH.Path(__file__).absolute().parent.parent MODEL_WEIGHTS_PATH = BASE_DIR.joinpath('Model').joinpath('Model_Weights') if from_scratch: try: shutil.rmtree(str(MODEL_WEIGHTS_PATH)) except: pass if not os.path.exists(str(MODEL_WEIGHTS_PATH)): os.mkdir(str(MODEL_WEIGHTS_PATH)) weights_path = str(MODEL_WEIGHTS_PATH.joinpath('ckpt')) # define Matching Net Model = TDDD_Net(model, from_scratch, weights_path, optimizer) for i in tqdm(range(epoch), desc="Epoch", position=0, leave=True): for j in tqdm(range(data.train_size // batch_size + 1), desc="Step", position=0, leave=True): #load correspondence and tsdf_volume tsdf_volume_object_batch_train, tsdf_volume_package_batch_train, correspondence_batch_train, non_correspondence_train, _ = data.generate_train_data_batch( num_match, num_non_match, batch_size, non_match_distance_clip) Model.train_and_checkpoint(tsdf_volume_object_batch_train, tsdf_volume_package_batch_train, correspondence_batch_train, non_match=non_correspondence_train, Non_Match_Margin=non_match_margin)
def train_musdb(self, data): smooth = 1. dropout_rate = 0.5 act = "relu" for i in range(1): X, M = dataset(data[60 * i:60 * (i + 1)]) self.model.fit(X[:20, :, :, :], M['vocals'][:20, :, :, :], batch_size=2, epochs=20) # self.model_unet.fit(X[:20,:,:,:], M['vocals'][:20,:,:,:], batch_size=2, epochs=20) # self.model_unetpp.fit(X[:20,:,:,:], M['vocals'][:20,:,:,:], batch_size=2, epochs=20) #print('the ' + i + 'th round') print(psutil.virtual_memory()[1]) self.saveWeights('./model') print("saved") return (self.model, self.model_unet, self.model_unetpp)
def predict_musdb(self, track): X, M = dataset(track) X_origin = stft(track[0].audio.T, nperseg=4096, noverlap=3072)[-1] M_predict = self.model.predict(X) # M2_predict = self.model_unet.predict(X) # M3_predict = self.model_unetpp.predict(X) # print(M_predict.shape) MM_predict = { 'vocals': M_predict, 'drums': M_predict, 'bass': M_predict, 'other': M_predict } # MM_predict = {'vocals': M_predict} newM = ichop(X_origin, MM_predict) # newM = ichop(MM_predict) estimates = estimateSpectro(X_origin, newM) return estimates
'y_shape': config['model']['y_dim'], 'embed_dim': config['model']['embed_dim'], 'checkpoint_dir': config['resources']['checkpoint_dir'] } # pack arguments for training HyperEncoder train_kwargs = { 'learning_rate': config['training']['learning_rate'], 'beta1': config['training']['beta1'], 'epochs': config['training']['epochs'], 'sample_dir': config['resources']['sample_dir'], 'log_dir': config['resources']['log_dir'] } # build data adapter da = dataset(**data_kwargs) init_kwargs['data_controller'] = da # build model encoder = HyperEncoder.build(**init_kwargs) # train model encoder.train(**train_kwargs) elif op == 'freeze': # pack the arguments for creating the HyperEncoder init_kwargs = { 'sess': sess, 'data_controller': None, 'name': config['model']['name'], 'batch_size': config['training']['batch_size'],
def main(): # if FLAGS.debug: # print('non-flag arguments:', argv) # if FLAGS.age is not None: # pass data = dataset() data.x_y_split(random_seed=0) from_scratch = False optimizer = tf.keras.optimizers.Adam() BASE_DIR = PH.Path(__file__).parent.parent MODEL_WEIGHTS_PATH = BASE_DIR.joinpath('Model').joinpath('Model_Weights') weights_path = str(MODEL_WEIGHTS_PATH.joinpath('ckpt')) Results_path = BASE_DIR.joinpath('Model') # define Matching Net Model = TDDD_Net('3D_U_Net', from_scratch, weights_path, optimizer) # Model.optimizer = optimizer # Model.create_ckpt_manager(weights_path) # Model.restore() batch = 0 # x_point_idx = 1800 # y_point_idx = 1800 match_count = {} #load correspondence and tsdf_volume # for i in range(data.test_size): for i in range(1): match_count['Test_Object_{}'.format(i)] = {} match_count['Test_Object_{}'.format(i)]['exact_match'] = 0 match_count['Test_Object_{}'.format(i)]['one_dist_off_match'] = 0 match_count['Test_Object_{}'.format(i)]['two_dist_off_match'] = 0 # tsdf_volume_test_object_batch,tsdf_volume_test_package_batch,match,non_match,ply_train = data.generate_train_data_batch(10, 10,batch_size = 2,Non_Match_Distance_Clip = 5) tsdf_volume_test_object_batch, tsdf_volume_test_package_batch, match, ply_test = data.generate_test_data_batch( 1) #get the descriptor for object and package descriptor_object = Model(tsdf_volume_test_object_batch).numpy() descriptor_package = Model(tsdf_volume_test_package_batch).numpy() print() # for j in tnrange(match[batch].shape[0]): for j in range(1): x_point_idx = j y_point_idx = j x_point_idx = 4000 y_point_idx = 4000 #get the src and destination ground truth for first batch point_idxth point print(match.shape) src = match[batch, x_point_idx, :][:3] dest = match[batch, y_point_idx, :][3:] top_match, top_matching_distance, top_idx = get_top_match( batch, src, descriptor_object, descriptor_package, dest) src_des = descriptor_object[batch, src[0], src[1], src[2]] dest_des = descriptor_package[batch, dest[0], dest[1], dest[2]] print('top_best', top_match) print('top_matching_distance', top_matching_distance) # print('matching descriptor',dest) print('Ground Truth', [dest[0], dest[1], dest[2]]) # print('ground_truth_diff',np.sqrt(np.sum((src_des - dest_des) ** 2))) # print(top_match[0,:].numpy()) # print(dest) if np.sqrt(np.sum((top_match[0, :] - dest)**2)) == 0: match_count['Test_Object_{}'.format(i)]['exact_match'] += 1 if np.sqrt(np.sum((top_match[0, :] - dest)**2)) <= 1: match_count['Test_Object_{}'.format( i)]['one_dist_off_match'] += 1 if np.sqrt(np.sum((top_match[0, :] - dest)**2)) <= 2: match_count['Test_Object_{}'.format( i)]['two_dist_off_match'] += 1 # print('best_match',top_match[0,:]) # print('ground_truth',dest) if j % 500 == 0: print('match_count', match_count) # with open(Results_path.joinpath('Results.pickle'), 'wb') as handle: # pickle.dump(match_count, handle, protocol=pickle.HIGHEST_PROTOCOL) visualize_ground_truth(batch, src, dest, descriptor_object, descriptor_package, top_idx, ply_test, data.shift)