part_name, part_type, voxel_dim, voxel_channels,
                               point_dim)

    #Import model architecture
    output_dimension = assembly_kccs
    dl_model = Bayes_DLModel(model_type, output_dimension, optimizer,
                             loss_func, regularizer_coeff, output_type)
    model = dl_model.bayes_cnn_model_3d(voxel_dim, voxel_channels)

    #Inference from simulated data
    inference_model = deploy_model.get_model(model, model_path, voxel_dim,
                                             voxel_channels)

    kcc_dataset = get_data.data_import(kcc_files, kcc_folder)

    input_conv_data, kcc_subset_dump, kpi_subset_dump = get_data.data_convert_voxel_mc(
        vrm_system, dataset, point_index, kcc_dataset)
    y_pred = np.zeros_like(kcc_dataset)

    y_pred, y_std, y_aleatoric_std = deploy_model.model_inference(
        input_conv_data, inference_model, y_pred, kcc_dataset.values,
        plots_path)

    avg_std = np.array(y_std).mean(axis=0)
    avg_aleatoric_std = np.array(y_std).mean(axis=0)
    print("Average Epistemic Uncertainty of each KCC: ", avg_std)
    print("Average Aleatoric Uncertainty of each KCC: ", avg_aleatoric_std)
    evalerror = 1

    if (evalerror == 1):
        metrics_eval = MetricsEval()
        eval_metrics, accuracy_metrics_df = metrics_eval.metrics_eval_base(
Beispiel #2
0
		file_names_y=[file_names_y]
		file_names_z=[file_names_z]
				
		np.savetxt(file_path, initial_samples, delimiter=",")
		print('Sampling Completed...')

		test_samples=initial_samples
		cae_status=cae_simulations.run_simulations(run_id=0,type_flag='test')

		print("Pre-processing simulated test data")
		dataset_test=[]
		dataset_test.append(get_data.data_import(file_names_x,data_folder))
		dataset_test.append(get_data.data_import(file_names_y,data_folder))
		dataset_test.append(get_data.data_import(file_names_z,data_folder))
				
		input_conv_data_test, kcc_subset_dump_test,kpi_subset_dump_test=get_data.data_convert_voxel_mc(vrm_system,dataset_test,point_index,test_samples)


	for i in tqdm(range(max_run_length)):
		
		run_id=i
		print('Training Run ID: ',i)
		
		file_name=sampling_config['output_file_name_train']+'_'+str(i)+'.csv'
		
		file_names_x=sampling_config['datagen_filename_x']+'train'+'_'+str(i)+'.csv'
		file_names_y=sampling_config['datagen_filename_y']+'train'+'_'+str(i)+'.csv'
		file_names_z=sampling_config['datagen_filename_z']+'train'+'_'+str(i)+'.csv'
		file_names_x=[file_names_x]
		file_names_y=[file_names_y]
		file_names_z=[file_names_z]
Beispiel #3
0
        get_data.data_import(test_output_file_names_y, data_folder))
    test_output_dataset.append(
        get_data.data_import(test_output_file_names_z, data_folder))

    kcc_dataset = get_data.data_import(kcc_files, kcc_folder)
    test_kcc_dataset = get_data.data_import(test_kcc_files, kcc_folder)

    if (kcc_sublist != 0):
        print("Sub-setting Process Parameters: ", kcc_sublist)
        kcc_dataset = kcc_dataset.iloc[:, kcc_sublist]
        test_kcc_dataset = test_kcc_dataset[:, kcc_sublist]
    else:
        print("Using all Process Parameters")

    #Pre-processing to point cloud data
    input_conv_data, kcc_subset_dump, kpi_subset_dump = get_data.data_convert_voxel_mc(
        vrm_system, input_dataset, point_index, kcc_dataset)
    test_input_conv_data, test_kcc_subset_dump, test_kpi_subset_dump = get_data.data_convert_voxel_mc(
        vrm_system, test_input_dataset, point_index, test_kcc_dataset)

    output_conv_data, kcc_subset_dump, kpi_subset_dump = get_data.data_convert_voxel_mc(
        vrm_system, output_dataset, point_index, kcc_dataset)
    test_output_conv_data, test_kcc_subset_dump, test_kpi_subset_dump = get_data.data_convert_voxel_mc(
        vrm_system, test_output_dataset, point_index, test_kcc_dataset)

    unet_train_model = Unet_TrainModel(batch_size, epocs, split_ratio)

    trained_model, eval_metrics, accuracy_metrics_df = unet_train_model.unet_run_train_model(
        model, input_conv_data, kcc_subset_dump, output_conv_data,
        test_input_conv_data, test_kcc_subset_dump, test_output_conv_data,
        model_path, logs_path, plots_path, activate_tensorboard)
        file_names_z = sampling_config[
            'datagen_filename_z'] + 'test' + '_' + str(0) + '.csv'

        np.savetxt(file_path, test_samples, delimiter=",")
        print('Sampling Completed...')

        cae_status = cae_simulations.run_simulations(run_id=0,
                                                     type_flag='test')

        print("Pre-processing simulated test data")
        dataset_test = []
        dataset_test.append(get_data.data_import([file_names_x], data_folder))
        dataset_test.append(get_data.data_import([file_names_y], data_folder))
        dataset_test.append(get_data.data_import([file_names_z], data_folder))

        input_conv_data_test, kcc_subset_dump_test, kpi_subset_dump_test = get_data.data_convert_voxel_mc(
            vrm_system, dataset_test, point_index, test_samples)

    if (sampling_validation_flag == 1):
        print('Generating Adaptive Sampling Data...')
        print('LHS Sampling for validation samples')

        #get prediction errors
        #get uncertainty estimates
        from cae_simulations import CAESimulations
        cae_simulations = CAESimulations(simulation_platform,
                                         simulation_engine, max_run_length,
                                         case_study)
        validate_samples = adaptive_sampling.inital_sampling_uniform_random(
            kcc_struct, sampling_config['sample_validation_dim'])

        file_name = sampling_config['output_file_name_validate'] + ".csv"