def _api_process(user_id: str, model_selected: str, petition_process_in_background=True): """ Execute api process in background (optional). Before it does it, it update the current global "PROBLEM_ID". Args: user_id: user id sent from PHP server/client petition_process_in_background: If the process will be executed in the background """ _update_project_configuration(new_project_id=model_selected) if petition_process_in_background: import subprocess import src.services.api.API as api try: # Getting fullpath from api module filepath = str(api.__file__) pt("filepath", filepath) python_path2 = 'python "' + filepath + '" -i ' + user_id + ' -m ' + model_selected if "\\..\\src" in python_path2: python_path2 = python_path2.replace("\\..\\src", "") pt("python_path2", python_path2) GS.LOGGER.write_to_logger("Opening from path: " + python_path2) #p = subprocess.Popen(bat_path, creationflags=subprocess.CREATE_NEW_CONSOLE) GS.LOGGER.write_to_logger("New petition: \n" + "USER_ID: " + user_id + " MODEL: " + model_selected) GS.LOGGER.write_to_logger(python_path2) process = subprocess.Popen( python_path2, creationflags=subprocess.CREATE_NEW_CONSOLE) except Exception as error: GS.LOGGER.write_log_error(error) else: pass
def check_file_exists_and_change_name(path, char="", index=None): """ Check if file exists and, if exists, try to change the name to another with a higher 'index'. Example: --> filename = 'name(id).png'. If exists, then try to create a new filename with a new index. --> new filename = 'name(id)_1.png)'. This has '_' as 'char'. If not char, then go only the index. Args: path: filepath char: char to add index: actual index Returns: new path """ if folders.file_exists_in_path_or_create_path(path): name = os.path.splitext(path)[0] extension = os.path.splitext(path)[1] if index == 0 or is_none(index): index = 1 chars_to_delete = None else: chars_to_delete = number_of_digits(index) index = int(name[-chars_to_delete:]) + 1 if char: chars_to_delete += len(char) if chars_to_delete: new_path = name[:-chars_to_delete] + char + str(index) + extension else: new_path = name + char + str(index) + extension pt("new_path", path) path = check_file_exists_and_change_name(path=new_path, char=char, index=index) return path
def write_log_error(self, err, info=None, force_path=None): exc_type, exc_obj, exc_tb = sys.exc_info( ) # this is to get error line number and description. file_name = os.path.split( exc_tb.tb_frame.f_code.co_filename)[1] # to get File Name. error_string = "ERROR : Error Msg:{},File Name : {}, Line no : {}\n".format( err, file_name, exc_tb.tb_lineno) pt(error_string) if force_path: file_log = open(force_path, "a") else: file_log = open(ERROR_LOG, "a") file_log.write(self.short_header_error + str(datetime.datetime.now()) + "\n\n") if info: file_log.write(str(info) + "\n\n") file_log.write(str(err) + "\n\n") if type(err) != type(""): ex_traceback = err.__traceback__ tb_lines = [ line.rstrip('\n') for line in traceback.format_exception( err.__class__, err, ex_traceback) ] [file_log.write(str(line)) for line in tb_lines] file_log.write("\n\n")
def load_accuracies_and_losses(path_to_load, flag_restore_model=False): """ :param path_to_load: path to load the numpy accuracies and losses :return: accuracies_train, accuracies_validation, loss_train, loss_validation """ # TODO (@gabvaztor) Docs accuracies_train, accuracies_validation, loss_train, loss_validation = [], [], [], [] if flag_restore_model: try: npy_extension = Dictionary.string_npy_extension filename_train_accuracies = Dictionary.filename_train_accuracies + npy_extension filename_validation_accuracies = Dictionary.filename_validation_accuracies + npy_extension filename_train_losses = Dictionary.filename_train_losses + npy_extension filename_validation_losses = Dictionary.filename_validation_losses + npy_extension if folders.file_exists_in_path_or_create_path( path_to_load + filename_train_accuracies): accuracies_train = list( np.load(path_to_load + filename_train_accuracies)) accuracies_validation = list( np.load(path_to_load + filename_validation_accuracies)) loss_train = list(np.load(path_to_load + filename_train_losses)) loss_validation = list( np.load(path_to_load + filename_validation_losses)) except Exception: pt("Could not load accuracies and losses") accuracies_train, accuracies_validation, loss_train, loss_validation = [], [], [], [] return accuracies_train, accuracies_validation, loss_train, loss_validation
def execute_clasification(PETITIONS): """ Get petition and classify elements Args: PETITIONS: List with new petitions Returns: petitions_end_ok """ petitions_end_ok = [] for petition_id in PETITIONS: GS.LOGGER.write_to_logger("Petition was found: " + petition_id) # Read petition json # TODO (@gabvaztor) Create a different object class to manage paths logic path_config = AnswerConfiguration(petition_id=petition_id) petition = Petition(path=path_config.json_petition_src, petition_id=petition_id) prediction_results = CPrediction(current_petition=petition) new_answer_configuration = AnswerConfiguration( petition_id=petition_id, prediction_results=prediction_results) json_answer_str = object_to_json(object=new_answer_configuration) pt(json_answer_str) write_string_to_pathfile( string=json_answer_str, filepath=new_answer_configuration.json_answer_src) petitions_end_ok.append(petition_id) GS.LOGGER.write_to_logger("Petition finished") return petitions_end_ok
def execute_model(model: tf.keras.Sequential, **kwargs): # Print actual configuration CMODEL.print_current_configuration(config=CONFIG) # Batching values and labels from input and labels (with batch size) # TODO (@gabvaztor) When restore model and don't change train size, it must to keep the same order of # train set. # To restore model if CONFIG.restore_model_flag: #self.load_and_restore_model_v2() # TODO (@gabvaztor) model path must satisfied path logic (/framework/Models/...). Not use SETTINGS for # models paths #fullpath_save = CMODEL.settings_object.model_path + CONFIG.model_name_saved fullpath_save = GS.MODELS_PATH + "Retinopathy.1\\models\\" + CONFIG.model_name_saved pt("Trying to load model...") model = tf.keras.models.load_model(fullpath_save) pt("Model Loaded successfully!") # Besides this, when test/validation set requires check its accuracy but its size is very long to save # in memory, it has to update all files during training to get the exact precision. #train_current_model(model=model) CMODEL.train_current_model(model=model, config=CONFIG)
def class_properties(object, attributes_to_delete=None): """ Return a string with actual object features without not necessaries :param attributes_to_delete: represent witch attributes set must be deleted. :return: A copy of class.__dic__ without deleted attributes """ pt("object", object) dict_copy = object.__dict__.copy( ) # Need to be a copy to not get original class' attributes. return dict_copy
def inner(*args, **kwargs): start = time.time() try: return method(*args, **kwargs) finally: methodStr = str(method) pt( "Running time method:" + str(methodStr[9:-23]), str( time.strftime("%Hh%Mm%Ss", time.gmtime((time.time() - start)))))
def load_model(self, model_fullpath) -> tf.keras.Sequential: """ Load and return a tf.keras model Args: model_fullpath: model fullpath Returns: models loaded """ start_time_load_model = datetime.datetime.now() model = tf.keras.models.load_model(model_fullpath) delta = datetime.datetime.now() - start_time_load_model pt("Time to load model ", delta.total_seconds()) return model
def read_generic_problem(self): # TODO When the csv has only a type is much better use numpy. Use known_data_type # self.data = np.fromfile(dataFile,dtype = np.float64) # Time to execute Breast_Cancer_Wisconsin Data.csv with np.fromfile: 0.0s # TODO Parametrizable delimiter # TODO Do delimiter and enconding as parameter self.data = pd.read_csv(self.reader_features.set_data_files[0], delimiter=self.delimiter, encoding="ISO-8859-1") # Time to execute Breast_Cancer_Wisconsin Data.csv with pd.read_csv: 0.007000446319580078s pt("DataTest Shape", self.data.shape) # TODO Create labelData Variable from a list of strings # TODO For each pop we have a class # TODO Fix this with advanced for <-- label_data = np.asarray( [self.data.pop(self.reader_features.labels_sets[0])], dtype=np.float32) # Data's labels # label_data = label_data.transpose() input_data = self.data # Input data # self.number_classes = len(self.data.columns) trainSize = self.reader_features.train_validation_test_percentages[ 0] # first value contains trainSize test_size = self.reader_features.train_validation_test_percentages[ -1] # last value contains testSize validationSize = None self.x_train, self.x_test, self.y_train, self.y_test = train_test_split( input_data, label_data, test_size=test_size) # Divide set into train and test sets (if it has validation set, into train and validation set for the first part and test set for the second part) if self.reader_features.there_is_validation: # If it has validation percentage validationSize = self.reader_features.train_validation_test_percentages[ 1] # Get validation percentage totalLen = self.data.shape[0] # All data rows # TODO If the data is in columns, we have to take the shape[1] value. trainValidationLen = self.x_train.shape[ 0] # All train validation rows valueValidationPercentage = validationSize * totalLen # Value of validation percentage in x_train (train and validation) validationSize = valueValidationPercentage / trainValidationLen # Update validation percentage pt("ValidationSize: ", validationSize) # TODO Convert sets into Tensors self.x_train, self.x_validation, self.y_train, self.y_validation = train_test_split( self.x_train, self.y_train, test_size=validationSize ) # Divide train and validation sets into two separate sets. # TODO If there is not train and test set with optional validation then Reader will do nothing self.load_sets()
def __init__(self, current_petition=None, input=None, id=None): self.id = id if id else None # Load updated config self.config = Projects.get_problem_config() # Load updated settings self.settings = Projects.get_settings() if current_petition: self.petition_id = current_petition.petition_id if current_petition.petition_id else None current_petition = self.__get_petition(current_petition) input_path = current_petition.absolute_folder_path + current_petition.image_name pt("input_path", input_path) self.results = self.execute_petition_prediction(input_path=input_path) else: self.results = self.execute(input=input) self.readable_results = self.make_readable_results(config=self.config)
def save_submission_to_csv(path_to_save, dictionary): print('Saving submission...') pt("Getting keys...") keys = list(dictionary.keys()) pt("Getting values...") values = list(dictionary.values()) submission = pd.DataFrame({'Pages_Date': keys, 'Visits': values}) pt("Saving to csv in path...") submission.to_csv(path_to_save, index=False, encoding='utf-8') pt("Save successfully")
def object_to_json(object, attributes_to_delete=None): """ Convert class to json with properties method. :param attributes_to_delete: String set with all attributes' names to delete from properties method :return: sort json from class properties. """ try: object_dictionary = class_properties( object=object, attributes_to_delete=attributes_to_delete) json_string = json.dumps(object, default=lambda m: object_dictionary, sort_keys=True, indent=4) except Exception as e: pt(e) pt(traceback.print_exc()) raise ValueError("PARAR") return json_string
def execute_petition_prediction(self, input_path): try: # Load model #model = self.load_model(model_fullpath=self.settings.model_path + self.config.model_name_saved) model = self.load_model(model_fullpath=GS.MODELS_PATH + "Retinopathy.1\\models\\" + self.config.model_name_saved) # Transform current image_path to an image to be predicted to_be_predicted, _ = models.data_treatment_generic_problem(input=input_path, inputs_labels=None, options=self.config.options, to_predict=True) pt("to_be_predicted", to_be_predicted.shape) # Predict image results = model.predict(x=to_be_predicted) return results.tolist() except Exception as e: LOGGER.write_log_error(e)
def inner_dec(*args, **kwargs): """ This function ... Args: *args: **kwargs: Returns: """ exception_function_flag = False method_str = str(function)[10:-23] if function else "" if exceptions_functions: exception_function_flag = True if method_str in exceptions_functions else False if timed_flag and not exception_function_flag: start = time.time() method_separator_end = "" method_separator_start = "" try: method_separator_start = DecoratorClass().__method_separator(method_name=method_str, step=1) method_separator_end = DecoratorClass().__method_separator(method_name=method_str, step=3) pt(method_separator_start) except: pt(method_separator_start) try: return function(*args, DEBUG=DEBUG_MODE, **kwargs) except Exception as e: # Function has not kwargs if Errors.got_unexpected_parameter_debug in str(e): return function(*args, **kwargs) finally: if timed_flag and not exception_function_flag: end_str = "\"" + str(method_str) + "\"" + \ str(time.strftime("%Hh%Mm%Ss", time.gmtime((time.time() - start)))) pt(method_separator_end + " " + end_str)
def object_to_json(object, attributes_to_delete=None, **kwargs): """ Convert class to json with properties method. Args: object: class object attributes_to_delete: String set with all attributes' names to delete from properties method **kwargs: Returns:sort json from class properties. """ try: object_dictionary = utils.class_properties( object=object, attributes_to_delete=attributes_to_delete) json_string = json.dumps(object, default=lambda m: object_dictionary, sort_keys=True, indent=4) except Exception as e: pt(e) pt(traceback.print_exc()) raise ValueError("STOP") return json_string
def __get_new_online_petitions(): global PETITIONS # First time start = timer() #past_petitions = __get_new_folders(petitions=PETITIONS) petitions_counts = 0 sleeps_counts = 0 while True: #pt("p1", past_petitions) PETITIONS = __get_new_folders(petitions=PETITIONS) #pt("p2", PETITIONS) #PETITIONS = list(set(PETITIONS) - set(past_petitions)) if PETITIONS: pt("\n") pt( "Petitions:", PETITIONS, "|@@| Date:[" + str(date_from_format(date=datetime.datetime.now()) + "]")) pt("\n") elif sleeps_counts % 10 == 0: pt( "Total Counts: " + str(petitions_counts) + " ### Petitions:", PETITIONS, "|@@| Date:[" + str(date_from_format(date=datetime.datetime.now()) + "]")) #if sleeps_counts % 600: gc.collect() if PETITIONS: execute_clasification(PETITIONS) # TODO Detele folders # TODO if classification OK or timeout, then move/delete folder petition #past_petitions = past_petitions + petitions_end_ok #PETITIONS = list(set(PETITIONS) - set(petitions_end_ok)) petitions_counts += 1 sys.exit() exit() quit() end = timer() if end - start >= 600: exit() quit() sys.exit() time.sleep(0.2) sleeps_counts += 1
def save_and_restart(path_to_backup): """ Save and restart all progress. Create a "Zip" file from "Models" folder and, after that, remove it. Args: path_to_backup: Path to do a backup and save it in a different folder """ actual_time = str( time.strftime("%Y-%m-%d_%Hh%Mm%Ss", time.gmtime(time.time()))) to_copy = folders.get_directory_from_filepath(path_to_backup) + "\\" to_paste = folders.get_directory_from_filepath( folders.get_directory_from_filepath( to_copy)) + "\\" + "Models_Backup(" + actual_time + ")" pt("Doing Models backup ...") # Do backup shutil.make_archive(to_paste, 'zip', to_copy) pt("Backup done successfully") # Do remove pt("Removing Models folder...") shutil.rmtree(to_copy) pt("Models removed successfully")
def main(): """ Args: model_class: CCModel class config: Config class """ from tensorflow.python.client import device_lib #m = device_lib.list_local_devices() pt("CUDA status:", tf.test.is_built_with_cuda()) #tf.debugging.set_log_device_placement(True) gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: pt("Num GPUs Available: ", len(gpus)) else: pt("No GPUs Availables") #tf.keras.backend.clear_session() """ LAYERS """ model = network_structure_v1() model.summary() #train_model(model=model) #evaluate_model(model=model) # Training """ model.fit(x=CMODEL.input_batch, y=CMODEL.label_batch, batch_size=64, epochs=CMODEL.epoch_numbers, use_multiprocessing=True) """ training_generator = CMODEL.batch_generator_v2(shape=_.shape) validation_generator = CMODEL.batch_generator_v2(shape=_.shape, is_test=True) filepath_to_save = CMODEL.settings_object.model_path + "model" + "{epoch:04d}" + ".ckpt" callbacks = [ tf.keras.callbacks.ModelCheckpoint(filepath=filepath_to_save, verbose=1, period=1), ] history = model.fit_generator(generator=training_generator, validation_data=validation_generator, epochs=CMODEL.epoch_numbers, use_multiprocessing=True, verbose=2, callbacks=callbacks) model.evaluate(CMODEL.x_test, CMODEL.y_test, verbose=2)
def main(**kwargs): """ Args: model_class: CModel class config: Config class """ from tensorflow.python.client import device_lib #m = device_lib.list_local_devices() pt("CUDA status", tf.test.is_built_with_cuda()) #tf.debugging.set_log_device_placement(True) gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: pt("Num GPUs Available: ", len(gpus)) else: pt("No GPUs Availables") #tf.keras.backend.clear_session() """ LAYERS """ model = network_structure_v1() model.summary() execute_model(model=model)
def read_web_traffic_data_and_create_files(self, is_necessary_create_files=False ): """ Create 9 csv files each one with "Page_Date,Visits" as header. Note: The train_1.csv file must have 145063 rows with header It useful one time. If you have created the files, then is_necessary_create_files need to be false. Attributes: is_necessary_create_files: If True, then use this method to create files. Else it is because you have created files before. """ if is_necessary_create_files: pt('Reading data from ...') key_1 = pd.read_csv(self.paths_to_read[1], encoding="utf-8") train_1 = pd.read_csv(self.paths_to_read[0], encoding="utf-8") #ss_1 = pd.read_csv(self.paths_to_read[2]) pt('Preprocessing...', "Changing NaN by 3") train_1.fillna(3, inplace=True) pt('Processing...') ids = key_1.Id.values pages2 = key_1.Page.values print('train_1...') pages = list(train_1.Page.values) columns_list = list(train_1.columns.values) columns_list.pop(0) pt("Train_1", "Getting values...") train_values = train_1.get_values() del train_1 pages_with_date_and_label = {} to_save = "D:\\Machine_Learning\\Competitions\\Kaggle_Data\\Web_Traffic_Time\\Trains\\" part = 1 csv = Dictionary.string_csv_extension pt("Train_1", "Start for...") for index_page in range(len(pages)): for index_date in range(len(columns_list)): if index_page % 16118 == 0 and index_date == 0 and index_page != 0: path_to_save = to_save + str(part) + csv utils.save_submission_to_csv( path_to_save, pages_with_date_and_label) part += 1 pages_with_date_and_label = {} page_with_date = pages[ index_page] + Dictionary.string_char_low_stripe + str( columns_list[index_date]) value = train_values[index_page][index_date + 1] pages_with_date_and_label[page_with_date] = value if index_page % 1000 == 0 and index_date == 0: pt("index_page", index_page) path_to_save = to_save + str(part) + csv utils.save_submission_to_csv(path_to_save, pages_with_date_and_label) pt("END Creating files ")
arguments = () if type(function_def) == type(str("")): name = function_def else: name = function_def.__name__ process = multiprocessing.Process(name=name, target=function_def, args=arguments) process.start() process.join() def class_properties(object, attributes_to_delete=None): """ Return a string with actual object features without not necessaries :param attributes_to_delete: represent witch attributes set must be deleted. :return: A copy of class.__dic__ without deleted attributes """ pt("object", object) dict_copy = object.__dict__.copy( ) # Need to be a copy to not get original class' attributes. return dict_copy if __name__ == "__main__": pt("__name__", __name__) from src.services.ccboost.TFModels import global_function, global_metadata arguments = global_metadata Multiprocess(functions=object_to_json, arguments=(arguments, )) #Multiprocess(functions=global_function, arguments=arguments)
def get_fullpath_and_execute_problem_operation(self, problem): """ Generic class to find a fullpath and do an specific operation (function) to a given problem. """ pt("Creating train and test/validation data...") setting_object = self.reader.settings dataframe_labels = None if setting_object.labels_path: labels_path = setting_object.labels_path # imports Dynamically import importlib reader = importlib.import_module(".Reader", package="src.projects." + problem) reader.read_problem() if problem == Projects.retinopathy_k_problem_id: # Read CSV Labels # TODO (@gabvaztor) Do generic import if more than one problem use it import pandas as pd print("labels_path: ", labels_path) try: dataframe_labels = pd.read_csv( filepath_or_buffer=labels_path) except Exception as e: print(e) labels_path = labels_path.replace("\\\\", "\\") dataframe_labels = pd.read_csv( filepath_or_buffer=labels_path) start_time = time.time() for path in self.path_to_read: pt("Reading", path) for root, dirs, files in os.walk(path): for count_number, file_name in enumerate(files): pt("Files Size", len(files), same_line=True) pt("Count number", count_number, same_line=True) progress = float(((count_number * 100) / len(files))) progress = "{0:.3f}".format(progress) pt("Progress percent", progress + "%", same_line=True) if problem == Projects.retinopathy_k_problem_id: if (file_name.endswith( Dictionary.string_extension_jpeg)): full_path = os.path.join(root, file_name) labels = np.zeros(self.features.number_of_classes, dtype=np.float32) name = os.path.splitext(file_name)[0] if np.where(dataframe_labels["image"] == name)[0]: index = int( np.where(dataframe_labels["image"] == name) [0][0]) label = int(dataframe_labels.loc[[ index ]]["level"].iloc[0]) labels[label] = 1 # To save if Dictionary.string_train in path: self.y_train.append(list(labels)) self.x_train.append(full_path) if Dictionary.string_test in path: self.y_test.append(list(labels)) self.x_test.append(full_path) elif problem == Projects.signals_images_problem_id: self.find_train_and_test_sets_from_path_signals() pt( 'Time to create data_sets', str( time.strftime("%Hh%Mm%Ss", time.gmtime((time.time() - start_time))))) pt("Finish creating train and test/validation data...")
def __relative_imports_step_1(self): sys.path.append(os.path.dirname(__file__)) sys.path.append(os.path.join(os.path.dirname(__file__), '..')) sys.path.append('..\\..\\') pt("print(sys.path)", sys.path)
def _core_process(): # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # ---- GLOBAL VARIABLES ---- # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- """ """ # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # ---- USER INTERFACE ---- # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- """Creating user interface #properties = eg.EasyGui() #uf.pt("Typos GUI",properties.types) """ # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # ---- READING DATA ---- # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- """ Creating Reader Features """ OPTION_PROBLEM = Projects.retinopathy_k_problem_id options = [OPTION_PROBLEM, 1, 720, 1280] path_train_and_test_images = [ SETTING_OBJECT.train_path, SETTING_OBJECT.test_path ] number_of_classes = 5 # Start in 0 percentages_sets = None # Example labels_set = [Dictionary.string_labels_type_option_hierarchy] is_an_unique_csv = False # If this variable is true, then only one CSV file will be passed and it will be treated like # trainSet, validationSet(if necessary) and testSet known_data_type = '' # Contains the type of data if the data file contains an unique type of data. Examples: # Number # or Chars. # TODO (@gabvaztor) Check if file exist automatically load_dataset = True if load_dataset: path_to_load = SETTING_OBJECT.saved_dataset_path x_train_string = "x_train.npy" y_train_string = "y_train.npy" x_test_string = "x_test.npy" y_test_string = "y_test.npy" x_train = np.load(file=path_to_load + x_train_string) y_train = np.load(file=path_to_load + y_train_string) x_test = np.load(file=path_to_load + x_test_string) y_test = np.load(file=path_to_load + y_test_string) else: """ Creating Reader Features """ reader_features = tfr.ReaderFeatures( set_data_files=path_train_and_test_images, number_of_classes=number_of_classes, labels_set=labels_set, is_unique_csv=is_an_unique_csv, known_data_type=known_data_type, percentages_sets=percentages_sets) """ Creating Reader from ReaderFeatures """ tf_reader = tfr.Reader( type_problem=OPTION_PROBLEM, reader_features=reader_features, settings=SETTING_OBJECT) # Reader Object with all information x_train = tf_reader.x_train y_train = tf_reader.y_train x_test = tf_reader.x_test y_test = tf_reader.y_test pt("x_train", x_train.shape) pt("y_train", y_train.shape) pt("x_test", x_test.shape) pt("y_test", y_test.shape) # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # ---- END READING DATA ---- # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- CMODEL = importlib.import_module(name=MODULE_NAME, package=MODELING_PACKAGE) CONFIG = importlib.import_module(name=MODULE_CONFIG, package=PROJECT_ID_PACKAGE) cmodels = CModels(setting_object=SETTING_OBJECT, option_problem=options, input_data=x_train, test=x_test, input_labels=y_train, test_labels=y_test, number_of_classes=number_of_classes, type=None, validation=None, validation_labels=None, execute_background_process=False, predict_flag=GS.IS_PREDICTION) CMODEL.core(cmodels, CONFIG.call()) """
def __wrap_decorators_step_2(self, modules=None): """ Args: modules: List with modules to be executed this time. Returns: "_success_execution". If this must run some functionality, then will return True if there is not errors. """ _success_execution = True modules = list(sys.modules.values()) if not modules else modules pt("PROJECT_SRC_PATH: ", GS.PROJECT_ROOT_PATH) for module in modules.copy(): try: module_path = os.path.abspath(module.__file__) #if self.__module_no_decorable(module_path) or not PROJECT_ROOT_PATH in module_path: if not GS.PROJECT_ROOT_PATH in module_path: modules.remove(module) except Exception: modules.remove(module) if GS.GLOBAL_DECORATOR > 0: pt("GLOBAL_DECORATOR is: " + str(GS.GLOBAL_DECORATOR)) try: if GS.DEBUG_MODE == 3: # Full Verbose pt("Modules to be wrapped with new functionality") for i, module in enumerate(modules): pt([i, module]) decorator_class = DecoratorClass() #modules.append(Configurator.__module__) #modules.append(decorator_class.__module__) decorator_class.start_wrapper_decoration( modules=modules, timed_flag=GS.TIMED_FLAG_DECORATOR, exceptions_functions=GS.FUNCTIONS_NO_DECORABLES) pt("Global decorated finished successfully") except Exception as error: if GS.DEBUG_MODE: import traceback pt("Global decorated finished with errors") traceback.print_exc() pt(str(error)) _success_execution = False else: pt("GLOBAL_DECORATOR is not activated") return _success_execution
def show_actual_path(): pt("Actual Path", os.path.dirname(os.path.abspath(__file__))) pt("Actual Path", os.getcwd())