예제 #1
0
    def _get_slices(self, crs):
        """
        Returns the slices for the collection of files given
        """
        files = self.session.get_files()
        crs_axes = CRSUtil(crs).get_axes(self.session.coverage_id)

        slices = []
        count = 1
        for file in files:
            # NOTE: don't process any imported file from *.resume.json as it is just waisted time
            if not self.resumer.is_file_imported(file.filepath):
                timer = Timer()

                # print which file is analyzing
                FileUtil.print_feedback(count, len(files), file.filepath)
                if not FileUtil.validate_file_path(file.filepath):
                    continue

                valid_coverage_slice = True
                try:
                    subsets = GdalAxisFiller(crs_axes, GDALGmlUtil(file.get_filepath())).fill()
                except Exception as ex:
                    # If skip: true then just ignore this file from importing, else raise exception
                    FileUtil.ignore_coverage_slice_from_file_if_possible(file.get_filepath(), ex)
                    valid_coverage_slice = False

                if valid_coverage_slice:
                    slices.append(Slice(subsets, FileDataProvider(file)))

                timer.print_elapsed_time()
                count += 1

        return slices
예제 #2
0
    def __init__(self, config, db_name, log_root_path):
        """
        :type config: dict
        :param config: the DB configuration file

        :type db_name: str
        :param config: name of an existing DB

        :type log_root_path: str
        :param log_root_path: the log path
        """
        self._date_util = DateUtil()
        self._db_util = DbUtil()

        self._logging_util = LoggingUtil()
        self._log_path = log_root_path + "export-file-json-" + db_name
        self._logger = self._logging_util.get_logger(self._log_path)
        self._fileHandler = self._logging_util.get_file_handler(
            self._logger, self._log_path, "info")

        self._db_name = db_name
        config.update({'database': db_name})
        self._config = config

        self._cnx = self._db_util.get_connection(self._config)
        self._db_util.set_database(self._cnx, self._db_name)
        self._db_util.set_settings(self._cnx)
        self._file_util = FileUtil(self._config, self._logger)
예제 #3
0
파일: mediator.py 프로젝트: javyxu/rasdaman
 def get_gml_file(self):
     """
     Returns the file path to the file containing the coverage held by the mediator
     :rtype: File
     """
     gml = self.get_gml_coverage().to_gml()
     fu = FileUtil()
     return File(fu.write_to_tmp_file(gml))
예제 #4
0
    def _get_coverage_slices(self, crs, gdal_coverage_converter):
        """
        Returns the slices for the collection of files given
        """
        crs_axes = CRSUtil(crs).get_axes(self.session.coverage_id)

        slices_dict = self.create_dict_of_slices(self.session.import_overviews)

        timeseries = self._generate_timeseries_tuples()
        count = 1
        for tpair in timeseries:
            file = tpair.file
            file_path = tpair.file.get_filepath()

            timer = Timer()

            # print which file is analyzing
            FileUtil.print_feedback(count, len(timeseries), file_path)
            if not FileUtil.validate_file_path(file_path):
                continue

            valid_coverage_slice = True

            gdal_file = GDALGmlUtil(file.get_filepath())
            try:
                subsets = GdalAxisFiller(crs_axes, gdal_file).fill(True)
                subsets = self._fill_time_axis(tpair, subsets)
            except Exception as ex:
                # If skip: true then just ignore this file from importing, else raise exception
                FileUtil.ignore_coverage_slice_from_file_if_possible(
                    file_path, ex)
                valid_coverage_slice = False

            if valid_coverage_slice:
                # Generate local metadata string for current coverage slice
                self.evaluator_slice = EvaluatorSliceFactory.get_evaluator_slice(
                    self.recipe_type, tpair.file)
                local_metadata = gdal_coverage_converter._generate_local_metadata(
                    subsets, self.evaluator_slice)
                if self.session.import_overviews_only is False:
                    slices_dict["base"].append(
                        Slice(subsets, FileDataProvider(tpair.file),
                              local_metadata))

                # Then, create slices for selected overviews from user
                for overview_index in self.session.import_overviews:
                    subsets_overview = self.create_subsets_for_overview(
                        subsets, overview_index, gdal_file)

                    slices_dict[str(overview_index)].append(
                        Slice(subsets_overview, FileDataProvider(file),
                              local_metadata))

            timer.print_elapsed_time()
            count += 1

        return slices_dict
예제 #5
0
    def _generate_timeseries_tuples(self, limit=None):
        """
        Generate the timeseries tuples from the original files based on the recipe.
        And sort the files in order of time.
        :rtype: list[TimeFileTuple]
        """
        ret = []
        if limit is None:
            limit = len(self.session.get_files())

        time_format = None
        if 'datetime_format' in self.options['time_parameter']:
            time_format = self.options['time_parameter']['datetime_format']

        if 'metadata_tag' in self.options['time_parameter']:
            mtag = self.options['time_parameter']['metadata_tag']['tag_name']
            for tfile in self.session.get_files():
                if len(ret) == limit:
                    break

                valid_file = True

                try:
                    gdal_file = GDALGmlUtil(tfile.get_filepath())
                except Exception as ex:
                    FileUtil.ignore_coverage_slice_from_file_if_possible(
                        tfile.get_filepath(), ex)
                    valid_file = False

                if valid_file:
                    dtutil = DateTimeUtil(gdal_file.get_datetime(mtag),
                                          time_format,
                                          self.options['time_crs'])
                    ret.append(TimeFileTuple(dtutil, tfile))
        elif 'filename' in self.options['time_parameter'] and len(ret) < limit:
            regex = self.options['time_parameter']['filename']['regex']
            group = int(self.options['time_parameter']['filename']['group'])
            for tfile in self.session.get_files():
                if len(ret) == limit:
                    break
                dtutil = DateTimeUtil(
                    re.search(regex, tfile.filepath).group(group), time_format,
                    self.options['time_crs'])
                ret.append(TimeFileTuple(dtutil, tfile))
        else:
            raise RecipeValidationException(
                "No method to get the time parameter, you should either choose "
                "metadata_tag or filename.")

        # Currently, only sort by datetime to import coverage slices (default is ascending), option: to sort descending
        if self.options[
                "import_order"] == AbstractToCoverageConverter.IMPORT_ORDER_DESCENDING:
            return sorted(ret, reverse=True)

        return sorted(ret)
예제 #6
0
    def is_file_imported(self, input_file_path):
        """
        Check if a file was imported and exists in *.resume.json
        :param input_file_path: path to input file
        """
        if self.coverage_id in Resumer.__IMPORTED_DATA_DICT:
            for imported_file in Resumer.__IMPORTED_DATA_DICT[self.coverage_id]:
                if FileUtil.strip_root_url(input_file_path) == FileUtil.strip_root_url(imported_file):
                    return True

        return False
예제 #7
0
    def validate_base(self, ignore_no_files=False):
        """
        Validates the configuration and the input files
        :param bool ignore_no_files: if the extending recipe does not work with files, set this to true to skip
        the validation check for no files (used in wcs_extract recipe).
        """
        if self.session.get_wcs_service(
        ) is None or self.session.get_wcs_service() == "":
            raise RecipeValidationException("No valid wcs endpoint provided")
        if self.session.get_crs_resolver(
        ) is None or self.session.get_crs_resolver() == "":
            raise RecipeValidationException("No valid crs resolver provided")
        if self.session.get_coverage_id(
        ) is None or self.session.get_coverage_id() == "":
            raise RecipeValidationException("No valid coverage id provided")
        if not FileUtil.check_dir_writable(ConfigManager.tmp_directory):
            raise RecipeValidationException(
                "Cannot write to tmp directory '{}'".format(
                    ConfigManager.tmp_directory))

        checked_files = []

        for file in self.session.get_files():
            if FileUtil.validate_file_path(file.get_filepath()):
                checked_files.append(file)

        if not ignore_no_files:
            # If no input file is available, exit wcst_import.
            FileUtil.validate_input_file_paths(checked_files)

        self.session.files = checked_files

        if 'wms_import' not in self.options:
            self.options['wms_import'] = False
        else:
            self.options['wms_import'] = bool(self.options['wms_import'])

        if 'tiling' not in self.options:
            self.options['tiling'] = None

        if 'scale_levels' not in self.options:
            self.options['scale_levels'] = None

        if "import_order" in self.options:
            if self.options['import_order'] != AbstractToCoverageConverter.IMPORT_ORDER_ASCENDING \
                    and self.options['import_order'] != AbstractToCoverageConverter.IMPORT_ORDER_DESCENDING:
                error_message = "'import_order' option must be '{}' or '{}', given '{}'.".\
                                  format(AbstractToCoverageConverter.IMPORT_ORDER_ASCENDING,
                                         AbstractToCoverageConverter.IMPORT_ORDER_DESCENDING,
                                         self.options['import_order'])
                raise RecipeValidationException(error_message)
        else:
            self.options['import_order'] = None
예제 #8
0
def main():
    """
    Main function to put the pieces together and run the recipe
    """
    # NOTE: not allow GDAL to create auxilary file which causes problem when no permission on the input data folder
    command = "export GDAL_PAM_ENABLED=NO"
    os.system(command)

    reg = RecipeRegistry()
    validate()

    # Parse input arguments from command line
    arguments = parse_arguments()
    ingredients_file_path = arguments.ingredients_file

    ConfigManager.user = arguments.user
    ConfigManager.passwd = arguments.passwd

    if arguments.identity_file is not None:
        key_value = FileUtil.read_file_to_string(arguments.identity_file)
        if ":" not in key_value:
            raise RuntimeException(
                "credentials in the identity file '" +
                arguments.identity_file + "' "
                "must be specified in the file as username:password")
        tmps = key_value.split(":")
        ConfigManager.user = tmps[0].strip()
        ConfigManager.passwd = tmps[1].strip()

    try:
        ingredients = decode_ingredients(
            read_ingredients(ingredients_file_path))
        hooks = ingredients["hooks"] if "hooks" in ingredients else None
        session = Session(ingredients['config'], ingredients['input'],
                          ingredients['recipe'], hooks,
                          os.path.basename(ingredients_file_path),
                          FileUtil.get_directory_path(ingredients_file_path))
        reg.run_recipe(session)
    except RecipeValidationException as re:
        log.error(str(re))
        exit_error()
    except RuntimeException as re:
        log.error(str(re))
        exit_error()
    except WCSTException as re:
        log.error(str(re))
        exit_error()
    except Exception as ex:
        log.error(
            "An error has occured in the execution of the program. Error Message: "
            + str(ex) + "\nStack Trace: " + traceback.format_exc())
        exit_error()
예제 #9
0
 def _get_data_type(self, slice):
     """
     Returns the data type of the slice by downloading the slice and trying to guess it with GDAL
     :param Slice slice: slice
     :rtype: str
     """
     if isinstance(slice.data_provider, UrlDataProvider):
         # Do this only for coverages that have more than one axis
         if len(slice.axis_subsets) > 1:
             fu = FileUtil()
             contents = validate_and_read_url(slice.data_provider.get_url())
             file_path = fu.write_to_tmp_file(contents, "tif")
             return GDALGmlUtil(file_path).get_band_gdal_type()
     return None
예제 #10
0
    def _create_coverage_slices(self, crs_axes, calculated_evaluator_slice=None, axis_resolutions=None):
        """
        Returns all the coverage slices for this coverage
        :param crs_axes:
        :rtype: list[Slice]
        """
        slices = []
        count = 1
        for file in self.files:
            # NOTE: don't process any previously imported file (recorded in *.resume.json)
            if not self.resumer.is_file_imported(file.filepath):
                timer = Timer()

                FileUtil.print_feedback(count, len(self.files), file.filepath)

                # print which file is analyzing
                if not FileUtil.validate_file_path(file.filepath):
                    continue

                valid_coverage_slice = True

                evaluator_slice = None

                try:
                    if calculated_evaluator_slice is None:
                        # get the evaluator for the current recipe_type (each recipe has different evaluator)
                        evaluator_slice = EvaluatorSliceFactory.get_evaluator_slice(self.recipe_type, file)
                    else:
                        evaluator_slice = calculated_evaluator_slice

                    if self.data_type is None:
                        self.data_type = evaluator_slice.get_data_type(self)

                    coverage_slice = self._create_coverage_slice(file, crs_axes, evaluator_slice, axis_resolutions)
                except Exception as ex:
                    # If skip: true then just ignore this file from importing, else raise exception
                    FileUtil.ignore_coverage_slice_from_file_if_possible(file.get_filepath(), ex)
                    valid_coverage_slice = False

                if valid_coverage_slice:
                    slices.append(coverage_slice)

                timer.print_elapsed_time()
                count += 1

        # Currently, only sort by datetime to import coverage slices (default is ascending)
        reverse = (self.import_order == self.IMPORT_ORDER_DESCENDING)
        return sort_slices_by_datetime(slices, reverse)
예제 #11
0
def main():
    """
    Main function to put the pieces together and run the recipe
    """
    # NOTE: not allow GDAL to create auxilary file which causes problem when no permission on the input data folder
    command = "export GDAL_PAM_ENABLED=NO"
    os.system(command)

    reg = RecipeRegistry()
    validate()
    try:
        ingredients = decode_ingredients(read_ingredients())
        hooks = ingredients["hooks"] if "hooks" in ingredients else None
        session = Session(ingredients['config'], ingredients['input'],
                          ingredients['recipe'], hooks,
                          os.path.basename(sys.argv[1]),
                          FileUtil.get_directory_path(sys.argv[1]))
        reg.run_recipe(session)
    except RecipeValidationException as re:
        log.error(str(re))
        exit_error()
    except RuntimeException as re:
        log.error(str(re))
        exit_error()
    except WCSTException as re:
        log.error(str(re))
        exit_error()
    except Exception as ex:
        log.error(
            "An error has occured in the execution of the program. Error Message: "
            + str(ex) + "\nStack Trace: " + traceback.format_exc())
        exit_error()
예제 #12
0
def test_main():
    f1 = FunctionFactory.create_function(
        FileUtil.read_file("./lldb_sub_103CE4E48.txt"))
    # F.dumps(f1)
    registers = Parser.load_register_from_lldb_text("./tmp/lldb_registers.txt")
    # 取 dumpM [($sp - 0x330), 4096] 还要更大..
    # 更新:建议 dumpM ($sp - 0x380)
    # mems = Parser.load_mem_from_lldb_text("./tmp/lldb_mem_0x16dcb8680.txt")
    mems = Parser.load_mem_from_lldb_text("./tmp/lldb_mem_0x1701f4680.txt")
    arm = ARMEleciron().init_environment(registers, mems, [f1])
    R.r("pc").s_value(f1.func_address)  # pc修复
    # <+20036>: mem read ($x5 + 0xc0) => mem read ($sp + 0x208 + 0xc0)
    # mem_0x10bb45100_line20036 = Parser.load_mem_from_lldb_text("./tmp/lldb_mem_0x010bb45100.txt") # 20036 [x9]
    mem_0x10bb45100_line20036 = Parser.load_mem_from_lldb_text(
        "./tmp/lldb_mem_0x1094f1100.txt")  # 20036 [x9]
    arm.init_mems(mem_0x10bb45100_line20036)
    # <+1540>:mem read $sp - 24
    mem_0x1701f4668_line1540 = Parser.load_mem_from_lldb_text(
        "./tmp/lldb_mem_0x1701f4668.txt")
    arm.init_mems(mem_0x1701f4668_line1540)
    # <+464> -> <+476> -> <+492> -> <+1252 -> <+2508> -> <+42060>: 是返回位置
    # 20200812测试 0x103CE4E48 :dumpM 0x1701f39b0($sp-0x1000) 0x3096
    # 20200815测试:共有5处放0x28... 观察前4处:0x170030e38($sp+0x488) > 0x170030d20 > 0x170030560 > 0x170030bc8、
    #   1. 0x170030e38 + 0x860 是单个值取位置。 lldb_func_symbol482833$$Aweme_348.txt <+324>
    # BP.register(OpCodePosition('Op.and_("w9", "w9", 0x80000000)')) # 断点到:+25492
    # BP.register(OpCodePosition('Op.cmp("w10", R.r("w9") >> 26)'))
    # BP.register(OpCodePosition('Op.stur("x3", (R.r("x8") - 0xd0).ptr)'))
    # BP.register(OpLineNoPosition("1540"))
    # BP.register(OpLineNoPosition("27964"))
    arm.start()
예제 #13
0
def main():
    """
    Main function to put the pieces together and run the recipe
    """
    reg = RecipeRegistry()
    validate()
    try:
        ingredients = decode_ingredients(read_ingredients())
        session = Session(ingredients['config'], ingredients['input'], ingredients['recipe'],
                          os.path.basename(sys.argv[1]),
                          FileUtil.get_directory_path(sys.argv[1]))
        reg.run_recipe(session)
    except RecipeValidationException as re:
        log.error(str(re))
        exit_error()
    except RuntimeException as re:
        log.error(str(re))
        exit_error()
    except WCSTException as re:
        log.error(str(re))
        exit_error()
    except Exception as ex:
        log.error("An error has occured in the execution of the program. Error Message: " + str(
            ex) + "\nStack Trace: " + traceback.format_exc())
        exit_error()
예제 #14
0
    def create_sample(self, *, caller_file: str) -> None:
        """
        Create sample from history record.

        Pass __file__ variable of the caller script as caller_file
        parameter. It will be used as both input and output file prefix.
        """

        if len(self.columns) != 2:
            raise RuntimeError("Scatter plot must specify two columns.")

        # Prefix for all data files
        caller_name = FileUtil.get_caller_name(caller_file=caller_file)

        x_feature = self.columns[0]
        y_feature = self.columns[1]

        df = pd.read_csv(f"{caller_name}.{self.input_file}.csv")
        if self.countries is not None:
            df = df.loc[df['LOCATION'].isin(self.countries)]

        # Regression using buckets
        tolerance = 1e-10
        x_bucket_boundaries = list(
            np.arange(self.min, self.max + tolerance, self.step, dtype=float))
        x_bucket_mean_list = []
        y_bucket_mean_list = []
        y_bucket_std_list = []
        for x_bucket_index in range(len(x_bucket_boundaries) - 1):

            # Range of values for the bucket
            x_bucket_min = x_bucket_boundaries[x_bucket_index]
            x_bucket_max = x_bucket_boundaries[x_bucket_index + 1]

            # DF filter (lower value inclusive, higher value exclusive)
            bucket_filter = (df[x_feature] >= x_bucket_min) & (df[x_feature] <
                                                               x_bucket_max)
            bucket_df = df[bucket_filter]

            # Skip if no points
            if len(bucket_df[x_feature]) == 0:
                continue

            # Create (x,y) lists for mean and std line charts
            x_bucket_mean = bucket_df[x_feature].values.mean()
            y_bucket_mean = bucket_df[y_feature].values.mean()
            y_bucket_std = bucket_df[y_feature].values.std()
            x_bucket_mean_list.append(x_bucket_mean)
            y_bucket_mean_list.append(y_bucket_mean)
            y_bucket_std_list.append(y_bucket_std)

        # Save sample to file
        sample_df = pd.DataFrame({
            x_feature: x_bucket_mean_list,
            f"mean({y_feature})": y_bucket_mean_list,
            f"std_dev({y_feature})": y_bucket_std_list,
        })
        sample_df.to_csv(f"{caller_name}.bucket.csv",
                         index=False,
                         float_format="%.6f")
예제 #15
0
 def search_tweet(self, search_term, filename):
     self.__search_term = search_term
     self.__filename = filename
     if not FileUtil.check_json_file(self.__filename):
         self.__request_tweet()
     self.__load_tweets(self.__filename)
     return self.__get_best_tweet()
예제 #16
0
    def _get_coverage_slices(self, crs, gdal_coverage_converter):
        """
        Returns the slices for the collection of files given
        """
        crs_axes = CRSUtil(crs).get_axes(self.session.coverage_id)

        slices = []
        timeseries = self._generate_timeseries_tuples()
        count = 1
        for tpair in timeseries:
            file_path = tpair.file.get_filepath()

            # NOTE: don't process any imported file from *.resume.json as it is just waisted time
            if not self.resumer.is_file_imported(file_path):
                timer = Timer()

                # print which file is analyzing
                FileUtil.print_feedback(count, len(timeseries), file_path)

                if not FileUtil.validate_file_path(file_path):
                    continue

                valid_coverage_slice = True
                try:
                    subsets = GdalAxisFiller(crs_axes,
                                             GDALGmlUtil(file_path)).fill(True)
                    subsets = self._fill_time_axis(tpair, subsets)
                except Exception as ex:
                    # If skip: true then just ignore this file from importing, else raise exception
                    FileUtil.ignore_coverage_slice_from_file_if_possible(
                        file_path, ex)
                    valid_coverage_slice = False

                if valid_coverage_slice:
                    # Generate local metadata string for current coverage slice
                    self.evaluator_slice = EvaluatorSliceFactory.get_evaluator_slice(
                        self.recipe_type, tpair.file)
                    local_metadata = gdal_coverage_converter._generate_local_metadata(
                        subsets, self.evaluator_slice)
                    slices.append(
                        Slice(subsets, FileDataProvider(tpair.file),
                              local_metadata))

                timer.print_elapsed_time()
                count += 1

        return slices
예제 #17
0
 def get_features(self, selected_tweet, filename):
     """Busca os amigos do usuário que escreveu o tweet."""
     if not FileUtil.check_json_file(filename):
         self.__create_file(selected_tweet, filename)
         return self.__load_file(selected_tweet.user_id, filename)
     else:
         return self.get_features_by_user_id(selected_tweet.user_id,
                                             filename)
예제 #18
0
파일: recipe.py 프로젝트: javyxu/rasdaman
 def _get_slices(self, gdal_dataset):
     """
     Returns the slices for the collection of files given
     """
     files = self.session.get_files()
     crs = gdal_dataset.get_crs()
     crs_axes = CRSUtil(crs).get_axes()
     slices = []
     count = 1
     for file in files:
         # print which file is analyzing
         FileUtil.print_feedback(count, len(files), file.filepath)
         subsets = GdalAxisFiller(crs_axes,
                                  GDALGmlUtil(file.get_filepath())).fill()
         slices.append(Slice(subsets, FileDataProvider(file)))
         count += 1
     return slices
예제 #19
0
    def _slices(self, crs_axes):
        """
        Returns all the slices for this coverage
        :param crs_axes:
        :rtype: list[Slice]
        """
        slices = []
        count = 1
        for file in self.files:
            # print which file is analyzing
            FileUtil.print_feedback(count, len(self.files), file.filepath)
            slices.append(self._slice(file, crs_axes))
            count += 1
        # NOTE: we want to sort all the slices by date time axis
        # to avoid the case the later time slice is added before the sooner time slice
        sorted_slices = sort_slices_by_datetime(slices)

        return sorted_slices
예제 #20
0
 def start(self, database="inventory", directory="../resources/migration"):
     connection = sqlite3.connect(database)
     file_paths = FileUtil.get_file_paths_from_directory(directory)
     for file_path in file_paths:
         with open(file_path) as sql_file:
             for line in sql_file:
                 connection.execute(line)
             connection.commit()
             connection.close()
예제 #21
0
    def delete_plot(*, caller_file: str) -> None:
        """
        Delete plot file.

        Pass __file__ variable of the caller script as caller_file
        parameter. It will be used as both input and output file prefix.
        """

        caller_name = FileUtil.get_caller_name(caller_file=caller_file)
        os.remove(f"{caller_name}.sample.scatter.png")
예제 #22
0
파일: recipe.py 프로젝트: javyxu/rasdaman
 def _get_slices(self, crs):
     """
     Returns the slices for the collection of files given
     """
     crs_axes = CRSUtil(crs).get_axes()
     slices = []
     timeseries = self._generate_timeseries_tuples()
     count = 1
     for tpair in timeseries:
         # print which file is analyzing
         FileUtil.print_feedback(count, len(timeseries),
                                 tpair.file.filepath)
         subsets = GdalAxisFiller(crs_axes,
                                  GDALGmlUtil(
                                      tpair.file.get_filepath())).fill()
         subsets = self._fill_time_axis(tpair, subsets)
         slices.append(Slice(subsets, FileDataProvider(tpair.file)))
         count += 1
     return slices
예제 #23
0
 def __execute_metrics(self, kmeans_array):
     array_data = [
         MetricsFunctions.execute_silhouette(kmeans_array=kmeans_array,
                                             data=self.__data),
         MetricsFunctions.execute_v_measure_score(kmeans_array=kmeans_array,
                                                  labels=self.__labels),
         MetricsFunctions.execute_adjusted_rand_score(
             kmeans_array=kmeans_array, labels=self.__labels),
         MetricsFunctions.execute_calinski_harabasz_score(
             kmeans_array=kmeans_array, data=self.__data)
     ]
     array_metrics_names = [
         MetricsFunctions.SILHOUETTE, MetricsFunctions.V_MEASURE_SCORE,
         MetricsFunctions.ADJUSTED_RAND_SCORE,
         MetricsFunctions.CALINSKI_HARABASZ_SCORE
     ]
     FileUtil.create_file(array_data=array_data,
                          array_metrics_names=array_metrics_names,
                          filename=self.__option)
예제 #24
0
    def delete_sample(self, *, caller_file: str) -> None:
        """
        Delete sample file.

        Pass __file__ variable of the caller script as caller_file
        parameter. It will be used as both input and output file prefix.
        """

        caller_name = FileUtil.get_caller_name(caller_file=caller_file)
        os.remove(f"{caller_name}.bucket.csv")
    def cleanup(self, *, caller_file: str) -> None:
        """
        Delete all files generated by this script.

        Pass __file__ variable of the caller script as caller_file
        parameter. It will be used as both input and output file prefix.
        """

        caller_name = FileUtil.get_caller_name(caller_file=caller_file)
        os.remove(f"{caller_name}.history.short_rate.csv")
        os.remove(f"{caller_name}.history.term_rate.csv")
예제 #26
0
    def __get_file_paths(self, paths):
        """"
        Get the list of file paths to be imported
        """
        file_paths = []
        for path in paths:
            path = path.strip()
            file_paths = file_paths + FileUtil.get_file_paths_by_regex(
                self.ingredients_dir_path, path)

        return file_paths
예제 #27
0
    def delete_plot(*, caller_file: str) -> None:
        """
        Delete plot file.

        Pass __file__ variable of the caller script as caller_file
        parameter. It will be used as both input and output file prefix.
        """

        caller_name = FileUtil.get_caller_name(caller_file=caller_file)
        file_name = f"{caller_name}.{self.output_file.lower()}.png"
        os.remove(file_name)
예제 #28
0
    def open_gdal_dataset_from_any_file(files):
        """
        This method is used to open 1 dataset to get the common metadata shared from all input files.
        :param list files: input files
        """
        gdal_dataset = None

        for file in files:
            try:
                gdal_dataset = GDALGmlUtil(file.get_filepath())
                return gdal_dataset
            except Exception as ex:
                # Cannot open file by gdal, try with next file
                if ConfigManager.skip:
                    continue
                else:
                    raise

        if gdal_dataset is None:
            # Cannot open any dataset from input files, just exit wcst_import process
            FileUtil.validate_input_file_paths([])
예제 #29
0
파일: main.py 프로젝트: dnsoumik/ether
class IndexHandler(tornado.web.RequestHandler, metaclass=ABCMeta):
    fu = FileUtil()

    async def prepare(self):
        self.set_status(404)
        error_page_raw = open('./lib/http_error/404.html', 'r')
        error_page = error_page_raw.read()
        error_page_raw.close()
        self.write(
            error_page.format(self.fu.serverUrl)
        )
        await self.finish()
        return
예제 #30
0
    def get_valid_files(self):
        """
            Valid file path could be opened by GDAL
            files is list of files need to valid
        """
        # Validate input files by GDAL. If GDAL could not decode file then will have an warning.
        # GDAL needs file name encode in 'utf8' or file name with spaces could not open.
        file_paths = []

        for file in self.files:
            fileName = str(file).encode('utf8')
            try:
                check = gdal.Open(fileName)
                file_paths = file_paths + [file]
            except Exception as e:
                log.warn(
                    "WARNING: File " + fileName +
                    " is not is not a valid GDAL decodable file. Reason: " +
                    str(e) + ". The import process will ignore this file.\n")

        FileUtil.validate_input_file_paths(file_paths)

        return file_paths
예제 #31
0
def main():
    """
    Main function to put the pieces together and run the recipe
    """
    reg = RecipeRegistry()
    validate()
    try:
        ingredients = decode_ingredients(read_ingredients())
        session = Session(ingredients['config'], ingredients['input'], ingredients['recipe'],
                          FileUtil.get_directory_path(sys.argv[1]))
        reg.run_recipe(session)
    except RecipeValidationException as re:
        log.error(str(re))
        exit_error()
    except RuntimeException as re:
        log.error(str(re))
        exit_error()
    except WCSTException as re:
        log.error(str(re))
        exit_error()
    except Exception as ex:
        log.error("An error has occured in the execution of the program. Error Message: " + str(
            ex) + "\nStack Trace: " + traceback.format_exc())
        exit_error()
예제 #32
0
 def release(self):
     if ConfigManager.mock is False:
         fu = FileUtil()
         fu.delete_file(self.filepath)