input_conv_data, kcc_subset_dump, kpi_subset_dump = get_data.data_convert_voxel_mc(
        vrm_system, dataset, point_index, kcc_dataset)
    y_pred = np.zeros_like(kcc_dataset)

    y_pred, y_std, y_aleatoric_std = deploy_model.model_inference(
        input_conv_data, inference_model, y_pred, kcc_dataset.values,
        plots_path)

    avg_std = np.array(y_std).mean(axis=0)
    avg_aleatoric_std = np.array(y_std).mean(axis=0)
    print("Average Epistemic Uncertainty of each KCC: ", avg_std)
    print("Average Aleatoric Uncertainty of each KCC: ", avg_aleatoric_std)
    evalerror = 1

    if (evalerror == 1):
        metrics_eval = MetricsEval()
        eval_metrics, accuracy_metrics_df = metrics_eval.metrics_eval_base(
            y_pred, kcc_dataset, logs_path)

        print('Evaluation Metrics: ', eval_metrics)
        accuracy_metrics_df.to_csv(logs_path + '/metrics_test.csv')

        np.savetxt((deploy_path + "predicted.csv"), y_pred, delimiter=",")
        print('Predicted Values saved to disk...')

        np.savetxt((deploy_path + "pred_std.csv"), y_std, delimiter=",")
        print('Predicted Standard Deviation Values saved to disk...')

        np.savetxt((deploy_path + "aleatoric_std.csv"),
                   y_aleatoric_std,
                   delimiter=",")
    def run_train_model(self,
                        model,
                        X_in,
                        Y_out,
                        model_path,
                        logs_path,
                        plots_path,
                        activate_tensorboard=0,
                        run_id=0,
                        tl_type='full_fine_tune'):
        """run_train_model function trains the model on the dataset and saves the trained model,logs and plots within the file structure, the function prints the training evaluation metrics
			
			:param model: 3D CNN model compiled within the Deep Learning Class, refer https://keras.io/models/model/ for more information 
			:type model: keras.models (required)

			:param X_in: Train dataset input (predictor variables), 3D Voxel representation of the cloud of point and node deviation data obtained from the VRM software based on the sampling input
			:type X_in: numpy.array [samples*voxel_dim*voxel_dim*voxel_dim*deviation_channels] (required)
			
			:param Y_out: Train dataset output (variables to predict), Process Parameters/KCCs obtained from sampling
			:type Y_out: numpy.array [samples*assembly_kccs] (required)

			:param model_path: model path at which the trained model is saved
			:type model_path: str (required)
			
			:param logs_path: logs path where the training metrics file is saved
			:type logs_path: str (required)

			:param plots_path: plots path where model training loss convergence plot is saved
			:type plots_path: str (required)

			:param activate_tensorboard: flag to indicate if tensorboard should be added in model callbacks for better visualization, 0 by default, set to 1 to activate tensorboard
			:type activate_tensorboard: int

			:param run_id: Run id index used in data study to conduct multiple training runs with different dataset sizes, defaults to 0
			:type run_id: int			
		"""
        import tensorflow as tf
        from sklearn.model_selection import train_test_split
        from tensorflow.keras.models import load_model
        from tensorflow.keras.callbacks import ModelCheckpoint
        from tensorflow.keras.callbacks import TensorBoard

        model_file_path = model_path + '/trained_model_resnet_hybrid_' + str(
            run_id) + '.h5'

        X_train, X_test, y_train_reg, y_test_reg, y_train_cla, y_test_cla = train_test_split(
            X_in, Y_out[0], Y_out[1], test_size=self.split_ratio)

        y_train = [y_train_reg, y_train_cla]
        y_test = [y_test_reg, y_test_cla]

        print("Data Split Completed")

        #Checkpointer to save the best model
        checkpointer = ModelCheckpoint(model_file_path,
                                       verbose=1,
                                       save_best_only='val_loss',
                                       save_weights_only=True)

        callbacks = [checkpointer]

        if (activate_tensorboard == 1):
            #Activating Tensorboard for Visualization
            tensorboard = TensorBoard(log_dir=logs_path,
                                      histogram_freq=1,
                                      write_graph=True,
                                      write_images=True)
            callbacks = [checkpointer, tensorboard]

        tensorboard = TensorBoard(log_dir=logs_path,
                                  histogram_freq=1,
                                  write_graph=True,
                                  write_images=True)
        history = model.fit(x=X_train,
                            y=y_train,
                            validation_data=(X_test, y_test),
                            epochs=self.epochs,
                            batch_size=self.batch_size,
                            callbacks=callbacks)

        #trainviz=TrainViz()
        #trainviz.training_plot(history,plots_path,run_id)

        model.load_weights(model_file_path)

        y_pred = model.predict(X_test)

        metrics_eval = MetricsEval()
        eval_metrics_reg, accuracy_metrics_df_reg = metrics_eval.metrics_eval_base(
            y_pred[0], y_test[0], logs_path)
        eval_metrics_cla, accuracy_metrics_df_cla = metrics_eval.metrics_eval_classification(
            y_pred[1], y_test[1], logs_path)

        return model, accuracy_metrics_df_reg, accuracy_metrics_df_cla
Beispiel #3
0
    def unet_run_train_model(self,
                             model,
                             X_in,
                             Y_out,
                             Y_cop,
                             X_in_test,
                             Y_out_test,
                             Y_cop_test,
                             model_path,
                             logs_path,
                             plots_path,
                             activate_tensorboard=0,
                             run_id=0,
                             tl_type='full_fine_tune'):
        """run_train_model function trains the model on the dataset and saves the trained model,logs and plots within the file structure, the function prints the training evaluation metrics
			
			:param model: 3D CNN model compiled within the Deep Learning Class, refer https://keras.io/models/model/ for more information 
			:type model: keras.models (required)

			:param X_in: Train dataset input (predictor variables), 3D Voxel representation of the cloud of point and node deviation data obtained from the VRM software based on the sampling input
			:type X_in: numpy.array [samples*voxel_dim*voxel_dim*voxel_dim*deviation_channels] (required)
			
			:param Y_out: Train dataset output (variables to predict), Process Parameters/KCCs obtained from sampling
			:type Y_out: numpy.array [samples*assembly_kccs] (required)

			:param model_path: model path at which the trained model is saved
			:type model_path: str (required)
			
			:param logs_path: logs path where the training metrics file is saved
			:type logs_path: str (required)

			:param plots_path: plots path where model training loss convergence plot is saved
			:type plots_path: str (required)

			:param activate_tensorboard: flag to indicate if tensorboard should be added in model callbacks for better visualization, 0 by default, set to 1 to activate tensorboard
			:type activate_tensorboard: int

			:param run_id: Run id index used in data study to conduct multiple training runs with different dataset sizes, defaults to 0
			:type run_id: int			
		"""
        import tensorflow as tf
        from tensorflow.keras.models import load_model
        import tensorflow.keras.backend as K
        #model_file_path=model_path+'/unet_trained_model_'+str(run_id)+'.h5'
        model_file_path = model_path + '/unet_trained_model_' + str(run_id)

        #tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir='C:\\Users\\sinha_s\\Desktop\\dlmfg_package\\dlmfg\\trained_models\\inner_rf_assembly\\logs',histogram_freq=1)
        checkpointer = tf.keras.callbacks.ModelCheckpoint(
            model_file_path,
            verbose=1,
            save_best_only='mae',
            save_weights_only=True)
        #Check pointer to save the best model
        history = model.fit(x=X_in,
                            y=[Y_out, Y_cop],
                            validation_data=(X_in_test,
                                             [Y_out_test, Y_cop_test]),
                            epochs=self.epochs,
                            batch_size=self.batch_size,
                            callbacks=[checkpointer])

        def mse_scaled(y_true, y_pred):
            return K.mean(K.square((y_pred - y_true) / 10))

        #inference_model=load_model(model_file_path,custom_objects={'mse_scaled': mse_scaled} )
        model.load_weights(model_file_path)
        y_pred, y_cop_pred = model.predict(X_in_test)

        metrics_eval = MetricsEval()
        eval_metrics, accuracy_metrics_df = metrics_eval.metrics_eval_base(
            y_pred, Y_out_test, logs_path)

        return model, eval_metrics, accuracy_metrics_df
Beispiel #4
0
    input_conv_data, kcc_subset_dump, kpi_subset_dump = get_data.data_convert_voxel_mc(
        vrm_system, dataset, point_index, kcc_dataset)

    kcc_regression, kcc_classification = hy_util.split_kcc(kcc_subset_dump)
    y_out_test = [kcc_regression, kcc_classification]

    mean_eval = 0

    #Predict by setting all model param distributions to mean
    #Question asked on tensorflow, waiting for solution....
    #Evaluate mean vector
    if (mean_eval == 1):

        y_preds_reg, y_preds_cla = deploy_model.model_mean_eval(
            input_conv_data, inference_model)
        metrics_eval = MetricsEval()

        eval_metrics_reg, accuracy_metrics_df_reg = metrics_eval.metrics_eval_base(
            y_preds_reg, y_out_test[0], logs_path)
        eval_metrics_cla, accuracy_metrics_df_cla = metrics_eval.metrics_eval_classification(
            y_preds_reg, y_out_test[1], logs_path)

        accuracy_metrics_df_reg.to_csv(logs_path +
                                       '/metrics_test_regression.csv')
        accuracy_metrics_df_cla.to_csv(logs_path +
                                       '/metrics_test_classification.csv')

        print("The Model Validation Metrics for Regression based KCCs")
        print(accuracy_metrics_df_reg)
        accuracy_metrics_df_reg.mean().to_csv(
            logs_path + '/metrics_test_regression_summary.csv')
    def unet_run_model(self,
                       model,
                       X_in_test,
                       model_path,
                       logs_path,
                       plots_path,
                       test_result=0,
                       Y_out_test=0,
                       y_cop_test=0,
                       activate_tensorboard=0,
                       run_id=0,
                       tl_type='full_fine_tune'):
        """run_train_model function trains the model on the dataset and saves the trained model,logs and plots within the file structure, the function prints the training evaluation metrics
			
			:param model: 3D CNN model compiled within the Deep Learning Class, refer https://keras.io/models/model/ for more information 
			:type model: keras.models (required)

			:param X_in: Train dataset input (predictor variables), 3D Voxel representation of the cloud of point and node deviation data obtained from the VRM software based on the sampling input
			:type X_in: numpy.array [samples*voxel_dim*voxel_dim*voxel_dim*deviation_channels] (required)
			
			:param Y_out: Train dataset output (variables to predict), Process Parameters/KCCs obtained from sampling
			:type Y_out: numpy.array [samples*assembly_kccs] (required)

			:param model_path: model path at which the trained model is saved
			:type model_path: str (required)
			
			:param logs_path: logs path where the training metrics file is saved
			:type logs_path: str (required)

			:param plots_path: plots path where model training loss convergence plot is saved
			:type plots_path: str (required)

			:param activate_tensorboard: flag to indicate if tensorboard should be added in model callbacks for better visualization, 0 by default, set to 1 to activate tensorboard
			:type activate_tensorboard: int

			:param run_id: Run id index used in data study to conduct multiple training runs with different dataset sizes, defaults to 0
			:type run_id: int			
		"""
        import tensorflow as tf
        from tensorflow.keras.models import load_model
        import tensorflow.keras.backend as K
        #model_file_path=model_path+'/unet_trained_model_'+str(run_id)+'.h5'
        model_file_path = model_path + '/unet_trained_model_' + str(run_id)

        #tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir='C:\\Users\\sinha_s\\Desktop\\dlmfg_package\\dlmfg\\trained_models\\inner_rf_assembly\\logs',histogram_freq=1)

        #inference_model=load_model(model_file_path,custom_objects={'mse_scaled': mse_scaled} )
        model.load_weights(model_file_path)
        print("Trained Model Weights loaded successfully")
        print("Conducting Inference...")
        y_pred, y_cop_pred = model.predict(X_in_test)
        print("Inference Completed !")

        if (test_result == 1):
            metrics_eval = MetricsEval()
            eval_metrics, accuracy_metrics_df = metrics_eval.metrics_eval_base(
                y_pred, Y_out_test, logs_path)

            #y_cop_pred_flat=y_cop_pred.flatten()
            #y_cop_test_flat=y_cop_test.flatten()

            #combined_array=np.stack([y_cop_test_flat,y_cop_pred_flat],axis=1)
            #filtered_array=combined_array[np.where(combined_array[:,0] >= 0.05)]
            #y_cop_test_vector=filtered_array[:,0:1]
            #y_cop_pred_vector=filtered_array[:,1:2]

            y_cop_pred_vector = np.reshape(y_cop_pred,
                                           (y_cop_pred.shape[0], -1))
            y_cop_test_vector = np.reshape(y_cop_test,
                                           (y_cop_test.shape[0], -1))
            y_cop_pred_vector = y_cop_pred_vector.T
            y_cop_test_vector = y_cop_test_vector.T
            print(y_cop_pred_vector.shape)
            #y_cop_test_flat=y_cop_test.flatten()

            eval_metrics_cop, accuracy_metrics_df_cop = metrics_eval.metrics_eval_cop(
                y_cop_pred_vector, y_cop_test_vector, logs_path)

            return y_pred, y_cop_pred, model, eval_metrics, accuracy_metrics_df, eval_metrics_cop, accuracy_metrics_df_cop

        return y_pred, y_cop_pred, model
	logs_path=train_path+'/logs'
	pathlib.Path(logs_path).mkdir(parents=True, exist_ok=True)

	plots_path=train_path+'/plots'
	pathlib.Path(plots_path).mkdir(parents=True, exist_ok=True)

	deployment_path=train_path+'/deploy'
	pathlib.Path(deployment_path).mkdir(parents=True, exist_ok=True)

	#Objects of Measurement System, Assembly System, Get Inference Data
	print('Initializing the Assembly System and Measurement System....')
	measurement_system=HexagonWlsScanner(data_type,application,system_noise,part_type,data_format)
	vrm_system=VRMSimulationModel(assembly_type,assembly_kccs,assembly_kpis,part_name,part_type,voxel_dim,voxel_channels,point_dim,aritifical_noise)
	get_data=GetTrainData();
	deploy_model=DeployModel();
	metrics_eval=MetricsEval();

	print('Importing and Preprocessing Cloud-of-Point Data')
	dataset=[]
	dataset.append(get_data.data_import(file_names_x,data_folder))
	dataset.append(get_data.data_import(file_names_y,data_folder))
	dataset.append(get_data.data_import(file_names_z,data_folder))

	dataset_test=[]
	dataset_test.append(get_data.data_import(test_file_names_x,data_folder))
	dataset_test.append(get_data.data_import(test_file_names_y,data_folder))
	dataset_test.append(get_data.data_import(test_file_names_z,data_folder))
	point_index=get_data.load_mapping_index(mapping_index)

	kcc_dataset=get_data.data_import(kcc_files,kcc_folder)
	kcc_dataset_test=get_data.data_import(test_kcc_files,kcc_folder)
Beispiel #7
0
	dl_model_unet=Encode_Decode_Model(output_dimension)
	model=dl_model_unet.resnet_3d_cnn_hybrid(voxel_dim,voxel_channels,kcc_classification.shape[1])
	
	#sys.exit()
	y_test=[kcc_regression,kcc_classification]
	#Inference from simulated data
	
	inference_model=deploy_model.get_model(model,model_path)
	print(inference_model.summary())

	y_pred=deploy_model.model_inference(input_conv_data,inference_model,deploy_path,print_result=0,plot_result=0);

	evalerror=1

	if(evalerror==1):
		metrics_eval=MetricsEval();
		
		eval_metrics_reg,accuracy_metrics_df_reg=metrics_eval.metrics_eval_base(y_pred[0],y_test[0],logs_path)
		eval_metrics_cla,accuracy_metrics_df_cla=metrics_eval.metrics_eval_classification(y_pred[1],y_test[1],logs_path)
		
		accuracy_metrics_df_reg.to_csv(logs_path+'/metrics_test_regression.csv')
		accuracy_metrics_df_cla.to_csv(logs_path+'/metrics_test_classification.csv')
		
		print("The Model Validation Metrics for Regression based KCCs")	
		print(accuracy_metrics_df_reg)
		accuracy_metrics_df_reg.mean().to_csv(logs_path+'/metrics_test_regression_summary.csv')
		print("The Model Validation Metrics Regression Summary")
		print(accuracy_metrics_df_reg.mean())

		print("The Model Validation Metrics for Classification based KCCs")	
		print(accuracy_metrics_df_cla)
        y_shape_error_test_list.append(test_output_conv_data)

    shape_error_test = np.concatenate(y_shape_error_test_list, axis=4)

    Y_out_test_list.append(shape_error_test)

    unet_deploy_model = Unet_DeployModel()

    pred_vector, epistemic_vector, epistemic_vector_iqr, aleatoric_vector = unet_deploy_model.bayes_unet_run_model(
        test_input_conv_data, model, Y_out_test_list, plots_path)

    model_outputs = pred_vector

    print("Computing Metrics..")

    metrics_eval = MetricsEval()

    eval_metrics_reg, accuracy_metrics_df_reg = metrics_eval.metrics_eval_base(
        pred_vector[0], Y_out_test_list[0], logs_path)
    eval_metrics_cla, accuracy_metrics_df_cla = metrics_eval.metrics_eval_classification(
        pred_vector[1], Y_out_test_list[1], logs_path)

    eval_metrics_cop_list = []
    accuracy_metrics_df_cop_list = []
    get_point_cloud = GetPointCloud()

    cop_file_name = vc.voxel_parameters['nominal_cop_filename']
    cop_file_path = '../resources/nominal_cop_files/' + cop_file_name
    #Read cop from csv file
    print('Importing Nominal COP')
    nominal_cop = vrm_system.get_nominal_cop(cop_file_path)