def run_analysis_case(train_data: InputData, test_data: InputData, case_name: str, task, metric, is_composed=False, result_path=None): pipeline = pipeline_by_task(task=task, metric=metric, data=train_data, is_composed=is_composed) pipeline.fit(train_data) if not result_path: result_path = join(default_fedot_data_dir(), 'sensitivity', f'{case_name}') if not exists(result_path): makedirs(result_path) pipeline.show(path=result_path) pipeline_analysis_result = NodesAnalysis( pipeline=pipeline, train_data=train_data, test_data=test_data, path_to_save=result_path, approaches=[NodeDeletionAnalyze, NodeReplaceOperationAnalyze]).analyze() print(f'pipeline analysis result {pipeline_analysis_result}')
def run_analysis_case(train_data: InputData, test_data: InputData, case_name: str, task, metric, is_composed=False, result_path=None): chain = chain_by_task(task=task, metric=metric, data=train_data, is_composed=is_composed) chain.fit(train_data) if not result_path: result_path = join(default_fedot_data_dir(), 'sensitivity', f'{case_name}') if not exists(result_path): makedirs(result_path) visualiser = ChainVisualiser() visualiser.visualise(chain, save_path=result_path) chain_analysis_result = ChainStructureAnalyze( chain=chain, train_data=train_data, test_data=test_data, all_nodes=True, path_to_save=result_path, approaches=[NodeDeletionAnalyze, NodeReplaceOperationAnalyze]).analyze() print(f'chain analysis result {chain_analysis_result}')
def run_case_analysis(train_data: InputData, test_data: InputData, case_name: str, task, metric, sa_class, is_composed=False, result_path=None): pipeline = pipeline_by_task(task=task, metric=metric, data=train_data, is_composed=is_composed) pipeline.fit(train_data) if not result_path: result_path = join(default_fedot_data_dir(), 'sensitivity', f'{case_name}') if not exists(result_path): makedirs(result_path) pipeline.show(join(result_path, f'{case_name}')) sa_class_with_approaches = SA_CLASS_WITH_APPROACHES.get(sa_class) sa_class = sa_class_with_approaches['class'] approaches = sa_class_with_approaches['approaches'] pipeline_analysis_result = sa_class(pipeline=pipeline, train_data=train_data, test_data=test_data, approaches=approaches, path_to_save=result_path).analyze() print(f'pipeline analysis result {pipeline_analysis_result}')
def __init__(self, log: Log = default_log(__name__)): default_data_dir = default_fedot_data_dir() self.temp_path = os.path.join(default_data_dir, 'composing_history') if 'composing_history' not in os.listdir(default_data_dir): os.mkdir(self.temp_path) self.log = log self.chains_imgs = [] self.convergence_imgs = [] self.best_chains_imgs = [] self.merged_imgs = []
def __init__(self, log: Log = default_log(__name__)): default_data_dir = default_fedot_data_dir() self.temp_path = os.path.join(default_data_dir, 'composing_history') if 'composing_history' not in os.listdir(default_data_dir): os.mkdir(self.temp_path) self.log = log self.pipelines_imgs = [] self.convergence_imgs = [] self.best_pipelines_imgs = [] self.merged_imgs = [] self.graph_visualizer = GraphVisualiser(log=log)
def default_log(logger_name: str, log_file=None) -> 'Log': """ :param logger_name: string name for logger :param log_file: path to the file where log messages will be recorded to :return Log: Log object """ if not log_file: log_file = os.path.join(default_fedot_data_dir(), 'log.log') log = Log(logger_name=logger_name, config_json_file='default', log_file=log_file) return log
def __init__(self, logger_name: str, config_json_file: str, log_file: str = None): if not log_file: self.log_file = os.path.join(default_fedot_data_dir(), 'log.log') else: self.log_file = log_file self.name = logger_name self.config_file = config_json_file self.logger = LogManager().get_logger(logger_name, config_file=self.config_file, log_file=self.log_file)
def write_composer_history_to_csv(self, file='history.csv'): history_dir = os.path.join(default_fedot_data_dir(), 'composing_history') file = os.path.join(history_dir, file) if not os.path.isdir(history_dir): os.mkdir(history_dir) self._write_header_to_csv(file) idx = 0 for gen_num, gen_chains in enumerate(self.chains): for chain in gen_chains: self._add_history_to_csv(file, chain.fitness, len(chain.model_templates), chain.depth, idx, gen_num) idx += 1
def __init__( self, approaches: Optional[List[Type['NodeAnalyzeApproach']]] = None, approaches_requirements: SensitivityAnalysisRequirements = None, path_to_save=None, log: Log = None): self.approaches = [NodeDeletionAnalyze, NodeReplaceOperationAnalyze ] if approaches is None else approaches self.path_to_save = \ join(default_fedot_data_dir(), 'sensitivity', 'nodes_sensitivity') if path_to_save is None else path_to_save self.log = default_log(__name__) if log is None else log self.approaches_requirements = \ SensitivityAnalysisRequirements() if approaches_requirements is None else approaches_requirements
def default_log(logger_name: str, log_file: Optional[str] = None, verbose_level: int = 2) -> 'Log': """ :param logger_name: string name for logger :param log_file: path to the file where log messages will be recorded to :param verbose_level level of detalization :return Log: Log object """ if not log_file: log_file = os.path.join(default_fedot_data_dir(), 'log.log') log = Log(logger_name=logger_name, config_json_file='default', log_file=log_file, output_verbosity_level=verbose_level) return log
def run_analysis(pipeline, train_data, test_data): sa_requirements = SensitivityAnalysisRequirements( is_visualize=True, is_save_results_to_json=True) approaches = [ NodeDeletionAnalyze, NodeReplaceOperationAnalyze, MultiOperationsHPAnalyze ] result_path = join(default_fedot_data_dir(), 'sensitivity', f'{PipelineSensitivityAnalysis.__name__}') PipelineSensitivityAnalysis(pipeline=pipeline, train_data=train_data, test_data=test_data, approaches=approaches, requirements=sa_requirements, path_to_save=result_path).analyze()
def __init__(self, pipeline: Pipeline, train_data: InputData, test_data: InputData, valid_data: InputData, case_name: str, path_to_save: str = None, approaches: Optional[List[Type[NodeAnalyzeApproach]]] = None, log: Log = None): self.pipeline = pipeline self.original_pipeline_len = self.pipeline.length self.train_data = train_data self.test_data = test_data self.valid_data = valid_data self.case_name = case_name self.path_to_save = \ join(default_fedot_data_dir(), 'sensitivity', 'mta_analysis', f'{case_name}') \ if path_to_save is None else path_to_save self.approaches = [NodeDeletionAnalyze] if approaches is None else approaches self.log = default_log(__name__) if log is None else log
def write_composer_history_to_csv(self, file='history.csv'): history_dir = os.path.join(default_fedot_data_dir(), 'composing_history') file = os.path.join(history_dir, file) if not os.path.isdir(history_dir): os.mkdir(history_dir) self._write_header_to_csv(file) idx = 0 for gen_num, gen_chains in enumerate(self.chains): for chain_num, chain in enumerate(gen_chains): if self.is_multi_objective: fitness = chain.fitness.values else: fitness = chain.fitness row = [idx, gen_num, fitness, len(chain.operation_templates), chain.depth, self.chains_comp_time_history[gen_num][chain_num]] self._add_history_to_csv(file, row) idx += 1
def __init__(self, pipeline: Pipeline, train_data: InputData, test_data: InputData, requirements: SensitivityAnalysisRequirements = None, path_to_save=None, log: Log = None): self._pipeline = pipeline self._train_data = train_data self._test_data = test_data self.problem: Optional[Problem] = None requirements = SensitivityAnalysisRequirements() if requirements is None else requirements self.requirements: HyperparamsAnalysisMetaParams = requirements.hp_analysis_meta self.analyze_method = analyze_method_by_name.get(self.requirements.analyze_method) self.sample_method = sample_method_by_name.get(self.requirements.sample_method) self.operation_types = None self.path_to_save = \ join(default_fedot_data_dir(), 'sensitivity', 'pipeline_sensitivity') \ if path_to_save is None else path_to_save self.log = default_log(__name__) if log is None else log
def __init__(self, pipeline: Pipeline, train_data, test_data: InputData, requirements: SensitivityAnalysisRequirements = None, path_to_save=None, log: Log = None): self._pipeline = pipeline self._train_data = train_data self._test_data = test_data self._origin_metric = None self._requirements = \ SensitivityAnalysisRequirements() if requirements is None else requirements self._path_to_save = \ join(default_fedot_data_dir(), 'sensitivity', 'nodes_sensitivity') if path_to_save is None else path_to_save self.log = default_log(__name__) if log is None else log if not exists(self._path_to_save): makedirs(self._path_to_save)
def __init__( self, pipeline: Pipeline, train_data: InputData, test_data: InputData, approaches: Optional[List[Type[MultiOperationsHPAnalyze]]] = None, requirements: SensitivityAnalysisRequirements = None, path_to_save=None, log: Log = None): self.pipeline = pipeline self.train_data = train_data self.test_data = test_data self.requirements = \ SensitivityAnalysisRequirements() if requirements is None else requirements self.approaches = [MultiOperationsHPAnalyze ] if approaches is None else approaches self.path_to_save = \ join(default_fedot_data_dir(), 'sensitivity', 'pipeline_sa') if path_to_save is None else path_to_save self.log = default_log(__name__) if log is None else log
def __init__(self, chain: Chain, train_data: InputData, test_data: InputData, approaches: Optional[List[Type[NodeAnalyzeApproach]]] = None, metric: str = None, nodes_ids_to_analyze: List[int] = None, all_nodes: bool = False, path_to_save=None, log: Log = None): self.chain = chain self.train_data = train_data self.test_data = test_data self.approaches = approaches self.metric = metric if all_nodes and nodes_ids_to_analyze: raise ValueError( "Choose only one parameter between all_nodes and nodes_ids_to_analyze" ) elif not all_nodes and not nodes_ids_to_analyze: raise ValueError( "Define nodes to analyze: all_nodes or nodes_ids_to_analyze") if all_nodes: self.nodes_ids_to_analyze = [ i for i in range(len(self.chain.nodes)) ] else: self.nodes_ids_to_analyze = nodes_ids_to_analyze if not path_to_save: self.path_to_save = join(default_fedot_data_dir(), 'sensitivity') else: self.path_to_save = path_to_save if not log: self.log = default_log(__name__) else: self.log = log
def write_composer_history_to_csv(self, file='history.csv'): history_dir = os.path.join(default_fedot_data_dir(), 'composing_history') file = os.path.join(history_dir, file) if not os.path.isdir(history_dir): os.mkdir(history_dir) self._write_header_to_csv(file) idx = 0 for gen_num, gen_inds in enumerate(self.individuals): for ind_num, ind in enumerate(gen_inds): if self.is_multi_objective: fitness = ind.fitness.values else: fitness = ind.fitness row = [ idx, gen_num, fitness, len(ind.graph.operation_templates), ind.graph.depth, self.pipelines_comp_time_history[gen_num][ind_num] ] self._add_history_to_csv(file, row) idx += 1
def __init__(self, pipeline: Pipeline, train_data: InputData, test_data: InputData, approaches: Optional[List[Type[NodeAnalyzeApproach]]] = None, requirements: SensitivityAnalysisRequirements = None, path_to_save=None, log: Log = None, nodes_to_analyze: List[Node] = None): self.pipeline = pipeline self.train_data = train_data self.test_data = test_data self.approaches = approaches self.requirements = \ SensitivityAnalysisRequirements() if requirements is None else requirements self.metric = self.requirements.metric self.log = default_log(__name__) if log is None else log self.path_to_save = \ join(default_fedot_data_dir(), 'sensitivity', 'nodes_sensitivity') if path_to_save is None else path_to_save if not nodes_to_analyze: self.log.message('Nodes to analyze are not defined. All nodes will be analyzed.') self.nodes_to_analyze = self.pipeline.nodes else: self.nodes_to_analyze = nodes_to_analyze
def __init__(self, pipeline: Pipeline, train_data, test_data: InputData, requirements: SensitivityAnalysisRequirements = None, path_to_save=None, log: Log = None): super().__init__(pipeline, train_data, test_data, path_to_save) requirements = SensitivityAnalysisRequirements( ) if requirements is None else requirements self.requirements: HyperparamsAnalysisMetaParams = requirements.hp_analysis_meta self.analyze_method = analyze_method_by_name.get( self.requirements.analyze_method) self.sample_method = sample_method_by_name.get( self.requirements.sample_method) self.problem = None self.operation_type = None self.data_under_lock: dict = {} self.path_to_save = \ join(default_fedot_data_dir(), 'sensitivity', 'nodes_sensitivity') if path_to_save is None else path_to_save self.log = default_log(__name__) if log is None else log
def test_default_fedot_data_dir(): default_fedot_data_dir() assert 'Fedot' in os.listdir(str(Path.home()))
def __init__(self, log: Log = default_log(__name__)): default_data_dir = default_fedot_data_dir() self.temp_path = os.path.join(default_data_dir, 'composing_history') self.log = log