Exemplo n.º 1
0
    def generate_noisy_signal_dist(self):
        logger.debug("Generating noisy signal on distance")
        isFirstChunk = True
        number_of_chunks = len(self.m_noise_std)

        chunkIndex = np.arange(number_of_chunks)
        np.random.shuffle(self.m_noise_std)
        try:
            chunk_lens = self.generate_rand_chunk(number_of_chunks)
        except ValueError as valerr:
            logger.debug('All chunk lengths are zero')
            errdict = {
                "file": __file__,
                "message": valerr.args[0],
                "errorType": CErrorTypes.value
            }
            raise CFrameworkError(errdict) from valerr

        for chunk in chunkIndex:
            chunk_len = int(chunk_lens[chunk])
            if isFirstChunk:
                debut = 0
                fin = chunk_len
                isFirstChunk = False
            else:
                debut = debut + int(chunk_lens[chunk - 1])
                fin = chunk_len + debut

            noise_dist_chunk = np.random.normal(loc=self.m_noise_level,
                                                scale=self.m_noise_std[chunk],
                                                size=chunk_len)
            # Concatenate back the chunks
            self.noise_dist[debut:fin] = np.abs(noise_dist_chunk)
        try:
            self.noise = self.noise * self.noise_dist
        except ValueError as valerr:
            logger.debug('Invalid noise length <%s>', valerr.args[0])
            errdict = {
                "file": __file__,
                "message": valerr.args[0],
                "errorType": CErrorTypes.value
            }
            raise CFrameworkError(errdict) from valerr
Exemplo n.º 2
0
def process_data(params):
    data_obj = cProcessFile(params)
    data_obj.plotAvgSR()
    data_obj.plotAccuracy()

    if not params["bTrainNetwork"]:
        if params["CSV_DATA"]["bPlot_real_path"]:
            data_obj.plot_real_path_recon()
            data_obj.plot_path_org_2d()
            data_obj.plot_path_noisy_2d()

        MSE_noise_WM, MSE_noise_latlon, MSE_r_wm, max_error, MSE_r_latlon, reconstructed_db_latlon = \
            data_obj.calculate_MSE()
        try:
            data_obj.plot_path_org_2d()
            data_obj.plot_path_noisy_2d()
            data_obj.plot_MSE(MSE_noise_WM, MSE_noise_latlon, MSE_r_wm,
                              max_error, MSE_r_latlon)
            data_obj.plot_SNR(reconstructed_db_latlon)
            data_obj.analyze_DCT()
            data_obj.power_spectral_density()
        except KeyError as key_error:
            message = "Could not find key %s" % (key_error.args[0])
            errdict = {
                "file": __file__,
                "message": message,
                "errorType": CErrorTypes.value
            }
            raise CFrameworkError(errdict) from key_error

        params["RESULTS"]["reconstructed_db_latlon"] = reconstructed_db_latlon
        params["RESULTS"]["MSE_latlon"] = MSE_r_latlon
        params["RESULTS"]["MSE_r_wm"] = MSE_r_wm
        params["RESULTS"]["MSE_noise_WM"] = MSE_noise_WM
        params["RESULTS"]["MSE_noise_latlon"] = MSE_noise_latlon

    for key in list(params):
        if "Obj" in key:
            del params[key]

    if params[
            "bSimplified_Results"]:  # TODO logic will only work after csv is integrated
        del params['RESULTS']['paths_wm_org']
        del params['RESULTS']['paths_latlon_org']
        del params['RESULTS']['paths_wm_noisy']
        del params['RESULTS']['paths_latlon_noisy']
        del params['RESULTS']['reconstructed_latlon_paths']
        del params['RESULTS']['reconstructed_WM_paths']
        # del params['RESULTS']['final_sampling_ratio']
        del params['RESULTS']['noise_vals']

        if not params['TRANSFORM']['bDctTransform']:
            del params['RESULTS']['transformed_paths']

    return data_obj.set_pickle_file(params)
Exemplo n.º 3
0
 def set_pickle_file(self, struct):
     with open(self.m_filename, 'wb') as txt_file:
         try:
             pickle.dump(struct, txt_file)
         except IOError as io_error:
             errdict = {
                 "file": __file__,
                 "message": "Could not dump in pickle file",
                 "errorType": CErrorTypes.ioerror
             }
             raise CFrameworkError(errdict) from io_error
Exemplo n.º 4
0
 def run(self, path, samples, acquisition_length):
     # Perform inference on given path
     if acquisition_length == self.m_acquisition_length:
         path_dnw = path[samples].transpose()
         path_in = np.array([path_dnw])
         # Check if vector length is the same as input layer is implicitly done by ValueError
         return self.m_model_lat.predict(path_in)
     else:
         logger.debug("Input length is invalid")
         errdict = {
             "file": __file__,
             "message": "No NN model was chosen",
             "errorType": CErrorTypes.value
         }
         raise CFrameworkError(errdict)
Exemplo n.º 5
0
 def custom_print(self, message):
     # Will be called by model.summary() for every line in the summary
     logger.info(message)
     if not self.identifier:
         logger.debug("No NN model was chosen")
         errdict = {
             "file": __file__,
             "message": "No NN model was chosen",
             "errorType": CErrorTypes.value
         }
         raise CFrameworkError(errdict)
     else:
         if self.identifier in self.messageSummary_dict.keys():
             self.messageSummary_dict[self.identifier] += message
         else:
             self.messageSummary_dict[self.identifier] = message
Exemplo n.º 6
0
def reconstructor(params, path, algorithm, noise_dist):
    logger.debug(
        'Returns dictionary of different path reconstructions using different algorithms'
    )

    if params[algorithm]["bReconstruct"]:
        try:
            reconstructed_paths, final_sampling_ratio = DevOps(
                algorithm, params, path, noise_dist)
        except (ValueError, KeyError) as valerr:
            logger.debug('<%s> value error with message <%s>', algorithm,
                         valerr.args[0])
            errdict = {
                "file": __file__,
                "message": valerr.args[0],
                "errorType": CErrorTypes.value
            }
            raise CFrameworkError(errdict) from valerr

    return reconstructed_paths, final_sampling_ratio
Exemplo n.º 7
0
    def __init__(self, struct, algorithm):

        self.messageSummary_dict = {}
        self.identifier = ""  # To describe which model the message belongs to

        if struct[algorithm]["sampling_ratio"] > 1:
            logger.debug("Sampling_ratio larger than 1")
            errdict = {
                "file": __file__,
                "message": "Sampling_ratio larger than 1",
                "errorType": CErrorTypes.value
            }
            raise CFrameworkError(errdict)

        self.m_acquisition_length = struct[algorithm]['block_length']
        self.alpha = struct[algorithm]["alpha"]
        if struct['bTrainNetwork']:
            self.m_model_lat = keras.Sequential()
            self.m_model_lon = keras.Sequential()
            self.bTrainlat = (struct["Train_NN"]["modelname_lat"] != "")
            self.bTrainlon = (struct["Train_NN"]["modelname_lon"] != "")
            self.delta = struct[algorithm]["delta"]
        else:
            modelname_lat = resultsPath + struct[algorithm]["modelname"]
            modelname_lon = resultsPath + struct[algorithm]["modelname"]
            self.save_nnModel = struct[algorithm]["save_nnModel"]
            try:
                self.load_models(modelname_lat, modelname_lon)
            except FileNotFoundError:
                message = 'Model <%s> in directory <%s>not found!' % (
                    struct[algorithm]["modelname"], resultsPath)
                logger.debug(message)
                errdict = {
                    "file": __file__,
                    "message": message,
                    "errorType": CErrorTypes.value
                }
                raise CFrameworkError(errdict)

        self.number_of_samples = int(struct[algorithm]["sampling_ratio"] *
                                     struct[algorithm]["block_length"])
        self.realizations = struct['realization']

        if self.number_of_samples <= 0:
            logger.debug("Number of samples cannot be 0 or negative")
            errdict = {
                "file": __file__,
                "message": "Invalid number of samples",
                "errorType": CErrorTypes.value
            }
            raise CFrameworkError(errdict)

        self.noiseLevel_len = len(struct['noise_level_meter'])
        if self.noiseLevel_len <= 0:
            logger.debug("Noise array cannot be empty")
            errdict = {
                "file": __file__,
                "message": "Noise array is empty",
                "errorType": CErrorTypes.value
            }
            raise CFrameworkError(errdict)
Exemplo n.º 8
0
    def mainComputation(self, local_struct):

        # Variables initialization
        use_random_seed = local_struct['bUse_random_seed']
        random_seed = local_struct['random_seed']
        numberOfRealizations = local_struct['realization']
        noise_level_len = len(local_struct['noise_level_meter'])

        use_csv_data = local_struct['CSV_DATA']['bUse_csv_data']
        csv_path = local_struct['CSV_DATA']['csv_path']
        path_length = local_struct['CSV_DATA']['path_length']

        # Set seed
        if use_random_seed:
            np.random.seed(random_seed)

        # Iterate over the total number of realizations
        if use_csv_data:
            local_struct['noise_level_meter'] = [0]
            noise_level_len = 1

            acquisition_length = path_length
            paths_latlon_org, latlon_accuracy, latlon_interval = munge_csv(
                csv_path, path_length)
            local_struct['realization'] = latlon_accuracy.shape[-1]
            numberOfRealizations = local_struct['realization']
            paths_latlon_org = paths_latlon_org.reshape(
                (2, path_length, numberOfRealizations, noise_level_len))
        else:
            acquisition_length = local_struct['gps_freq_Hz'] * local_struct[
                'acquisition_time_sec']
            paths_latlon_org = np.zeros(
                (2, acquisition_length, numberOfRealizations, noise_level_len))

        local_struct['acquisition_length'] = acquisition_length

        paths_wm_org = np.zeros(
            (2, acquisition_length, numberOfRealizations, noise_level_len))
        paths_wm_noisy = np.zeros(
            (2, acquisition_length, numberOfRealizations, noise_level_len))
        paths_latlon_noisy = np.zeros(
            (2, acquisition_length, numberOfRealizations, noise_level_len))
        noise_vals = np.zeros(
            (acquisition_length, numberOfRealizations, noise_level_len))
        transformed_paths = np.zeros(
            (2, acquisition_length, numberOfRealizations, noise_level_len))

        reconstructed_latlon_paths = {}
        reconstructed_WM_paths = {}
        final_sampling_ratio = {}
        bNN_initialized = {}

        reconstruction_algorithms = identify_algorithms(local_struct)
        for algorithm in reconstruction_algorithms:
            reconstructed_latlon_paths[algorithm] = np.zeros(
                (2, acquisition_length, numberOfRealizations, noise_level_len))
            reconstructed_WM_paths[algorithm] = np.zeros(
                (2, acquisition_length, numberOfRealizations, noise_level_len))
            final_sampling_ratio[algorithm] = np.zeros(
                (numberOfRealizations, noise_level_len))
            bNN_initialized[algorithm] = False

        if local_struct['bTrainNetwork'] and local_struct['Train_NN'][
                'bUseGeneratedData']:
            self.logger.info('Using generated data')
            try:
                dataFileName = 'TrainingSet'
                filename = self.paramPath + 'NeuralNetworks' + direc_ident + dataFileName + '.txt'
                try:
                    loadedStruct = get_pickle_file(filename)
                    # Assign the paths to local variables
                    # TODO some other checks on some parameters can be done
                    paths_latlon_org = loadedStruct['RESULTS'][
                        'paths_latlon_org']
                    paths_latlon_noisy = loadedStruct['RESULTS'][
                        'paths_latlon_noisy']
                except FileNotFoundError as filerr:
                    self.logger.debug("Training set not found")
                    errdict = {
                        "file": __file__,
                        "message": filerr.args[0],
                        "errorType": CErrorTypes.range
                    }
                    raise CFrameworkError(errdict)
            except CFrameworkError as frameErr:
                self.errorAnalyzer(frameErr, "load training set")

        else:
            self.logger.info(
                'Starting simulation with <%d> realizations and <%d> path length',
                numberOfRealizations, acquisition_length)
            for lvl in range(noise_level_len):
                for realization in range(numberOfRealizations):
                    # Generate random data
                    self.logger.log(
                        15,
                        'Generating random data for lvl <%d> for realization <%d>',
                        local_struct['noise_level_meter'][lvl], realization)
                    if not use_csv_data:
                        (paths_wm_org[:, :, realization, lvl], paths_latlon_org[:, :, realization, lvl]) = \
                            random_2d_path_generator(local_struct)
                        # Generate noise for each realization
                        (paths_wm_noisy[:, :, realization, lvl], paths_latlon_noisy[:, :, realization, lvl],
                         noise_vals[:, realization, lvl]) = \
                            noise_generator(local_struct, paths_wm_org[:, :, realization, lvl],
                                            local_struct['noise_level_meter'][lvl])
                    else:
                        paths_wm_org[:, :, realization,
                                     lvl] = cord.generate_WM_array(
                                         paths_latlon_org[:, :, realization,
                                                          lvl])
                        paths_latlon_noisy[:, :, realization,
                                           lvl] = paths_latlon_org[:, :,
                                                                   realization,
                                                                   lvl]
                        paths_wm_noisy[:, :, realization,
                                       lvl] = paths_wm_org[:, :, realization,
                                                           lvl]

                    # Apply transforms
                    if not local_struct['bTrainNetwork']:
                        transformed_paths[:, :, realization, lvl] = \
                            transforms(local_struct, paths_latlon_noisy[:, :, realization, lvl])

                        # Apply reconstruction algorithms
                        if local_struct['bReconstruct']:
                            for algorithm in reconstruction_algorithms:
                                if "NN" in algorithm and not bNN_initialized[
                                        algorithm]:
                                    from NeuralNetworks.NN import CNeuralNetwork
                                    nn_name = algorithm + "Obj"
                                    try:
                                        local_struct[nn_name] = CNeuralNetwork(
                                            local_struct, algorithm)
                                        bNN_initialized[algorithm] = True
                                    except CFrameworkError as frameErr:
                                        self.errorAnalyzer(
                                            frameErr, str((algorithm, lvl)))
                                try:
                                    try:
                                        local_struct[algorithm][
                                            'baseline'] = local_struct[
                                                algorithm]['error_baseline'][
                                                    lvl]
                                    except KeyError:
                                        pass
                                    temp, final_sampling_ratio[algorithm][
                                        realization, lvl] = reconstructor(
                                            local_struct,
                                            paths_latlon_noisy[:, :,
                                                               realization,
                                                               lvl], algorithm,
                                            noise_vals[:, realization, lvl])
                                    reconstructed_latlon_paths[
                                        algorithm][:, :, realization,
                                                   lvl] = temp
                                    try:
                                        reconstructed_WM_paths[algorithm][:, :, realization, lvl] = \
                                            cord.generate_WM_array(temp)
                                    except ValueError as valerr:
                                        self.logger.debug(
                                            "Lat/Lon out of range in degrees")
                                        errdict = {
                                            "file": __file__,
                                            "message": valerr.args[0],
                                            "errorType": CErrorTypes.range
                                        }
                                        raise CFrameworkError(errdict)
                                except CFrameworkError as frameErr:
                                    self.errorAnalyzer(frameErr,
                                                       str((algorithm, lvl)))

        if local_struct['bTrainNetwork']:
            from NeuralNetworks.NN import CNeuralNetwork
            # Iterate over the total number of realizations to generate training set
            modelname_lat = self.paramPath + 'NeuralNetworks' + direc_ident + 'Models' + direc_ident \
                            + local_struct["Train_NN"]["modelname_lat"]
            modelname_lon = self.paramPath + 'NeuralNetworks' + direc_ident + 'Models' + direc_ident \
                            + local_struct["Train_NN"]["modelname_lon"]

            nnObj = CNeuralNetwork(local_struct, "Train_NN")
            nnObj.design_nn()
            results_lat, results_lon = nnObj.train_nn(paths_latlon_org,
                                                      paths_latlon_noisy)
            nnObj.save_models(modelname_lat, modelname_lon)

            # if nnObj.dump_nn_summary():
            #    self.logAnalyzer(nnObj.messageSummary_dict, modelname_lat)
            #    self.logAnalyzer(nnObj.messageSummary_dict, modelname_lon)

            if local_struct["Train_NN"]["bPlotTrainResults"]:
                nnObj.train_result_visu(
                    results_lat, results_lon,
                    local_struct["Train_NN"]["modelname_lat"],
                    local_struct["Train_NN"]["modelname_lon"])

        # Store data in local struct
        local_struct['RESULTS']['paths_wm_org'] = paths_wm_org
        local_struct['RESULTS']['paths_latlon_org'] = paths_latlon_org
        local_struct['RESULTS']['paths_wm_noisy'] = paths_wm_noisy
        local_struct['RESULTS']['paths_latlon_noisy'] = paths_latlon_noisy
        local_struct['RESULTS']['transformed_paths'] = transformed_paths
        local_struct['RESULTS'][
            'reconstructed_latlon_paths'] = reconstructed_latlon_paths
        local_struct['RESULTS'][
            'reconstructed_WM_paths'] = reconstructed_WM_paths
        local_struct['RESULTS']['final_sampling_ratio'] = final_sampling_ratio
        local_struct['RESULTS']['noise_vals'] = noise_vals

        self.logger.debug('Generating results and plotting')
        try:
            process_data(local_struct)
        except CFrameworkError as frameErr:
            self.errorAnalyzer(frameErr, "process_data")

        self.exit_framework()
        return self.frameworkError_list