def train_adaboosted_RF_model(X_train, y_train, X_test): print_title("AdaBoosted RF") t1 = time.time() #Hyperparameters for the base estimator are from the RF model base_estimator = RandomForestRegressor(n_estimators=300, max_features=847, min_samples_leaf=1, n_jobs=-1) reg = AdaBoostRegressor(base_estimator, n_estimators=50, learning_rate=1.0, loss='exponential', random_state=12) reg.fit(X_train, y_train) joblib.dump(reg, 'EEW_ADA_RF_50estimators_model.pkl') print("Weights for each estimator: ", reg.estimator_weights_) print("Errors for each estimator: ", reg.estimator_errors_) # Prediction y_pred = reg.predict(X_test) y_train_pred = reg.predict(X_train) t2 = time.time() print("Time elapsed: %d s" % (t2 - t1)) return { "y": y_pred, "y_train": y_train_pred, "coef": reg.feature_importances_ }
def start_forward_check_algorithm(size_min, size_max): for size in range(size_min, size_max + 1): print_title('CSP WARM-UP: start') print_title('CSP WARM-UP: init square with size ' + str(size)) square = init_square(size) print_title('CSP WARM-UP: set possible values') possible_values_table = np.array([]) for i in range(1, size + 1): possible_values_table = np.append(possible_values_table, i) print_title( 'CSP WARM-UP: fill square non repeatedly (forward check algorithm)' ) square_possible_values = np.full((size, size, size), possible_values_table, dtype=np.int16) start_time = time.time() is_filled, square_filled = fill_square_non_repeatedly_forward_checking( square, square_possible_values, False, 0) end_time = time.time() print('Time elapsed: ', end_time - start_time) print('Result') for y in range(0, square_filled.shape[0]): square_filled_str = np.array2string(square_filled[y], precision=2, separator=', ', max_line_width=60) print(' ' + square_filled_str[1:-1]) print_title('CSP WARM-UP: end')
def train_lasso_model(_train_x, train_y, _predict_x): print_title("Lasso Regressor") train_x, predict_x = \ standarize_feature(_train_x, _predict_x) reg = linear_model.LassoCV(precompute=True, cv=5, verbose=1, n_jobs=4) reg.fit(train_x, train_y) print("alphas: %s" % reg.alphas_) print("mse path: %s" % np.mean(reg.mse_path_, axis=1)) itemindex = np.where(reg.alphas_ == reg.alpha_) print("itemindex: %s" % itemindex) _mse = np.mean(reg.mse_path_[itemindex[0], :]) print("Best alpha using bulit-in LassoCV: %f(mse: %f)" % (reg.alpha_, _mse)) alpha = reg.alpha_ reg = linear_model.Lasso(alpha=alpha) reg.fit(train_x, train_y) n_nonzeros = (reg.coef_ != 0).sum() print("Non-zeros coef: %d" % n_nonzeros) predict_y = reg.predict(predict_x) train_y_pred = reg.predict(train_x) return {"y": predict_y, "train_y": train_y_pred, "coef": reg.coef_}
def clean_package_files(script_settings): try: # instantiate the msfsProject and create the necessary resources if it does not exist msfs_project = MsfsProject(script_settings.projects_path, script_settings.project_name, script_settings.definition_file, script_settings.author_name, script_settings.sources_path) check_configuration(script_settings, msfs_project) if script_settings.backup_enabled: msfs_project.backup( Path(os.path.abspath(__file__)).stem.replace( SCRIPT_PREFIX, str())) isolated_print(EOL) print_title("CLEAN PACKAGE FILES") msfs_project.clean() if script_settings.build_package_enabled: build_package(msfs_project, script_settings) pr_bg_green("Script correctly applied" + constants.CEND) except ScriptError as ex: error_report = "".join(ex.value) isolated_print(constants.EOL + error_report) pr_bg_red("Script aborted" + constants.CEND) except RuntimeError as ex: isolated_print(constants.EOL + str(ex)) pr_bg_red("Script aborted" + constants.CEND)
def train_lassolars_model(train_x, train_y, predict_x): print_title("LassoLars Regressor") reg = linear_model.LassoLarsCV(cv=10, n_jobs=3, max_iter=2000, normalize=False) reg.fit(train_x, train_y) print("alphas and cv_alphas: {0} and {1}".format(reg.alphas_.shape, reg.cv_alphas_.shape)) print("alphas[%d]: %s" % (len(reg.cv_alphas_), reg.cv_alphas_)) print("mse shape: {0}".format(reg.cv_mse_path_.shape)) # print("mse: %s" % np.mean(_mse, axis=0)) # print("mse: %s" % np.mean(_mse, axis=1)) # index = np.where(reg.alphas_ == reg.alpha_) # print("itemindex: %s" % index) index = np.where(reg.cv_alphas_ == reg.alpha_) _mse_v = np.mean(reg.cv_mse_path_[index, :]) print("mse value: %f" % _mse_v) print("best alpha: %f" % reg.alpha_) best_alpha = reg.alpha_ reg = linear_model.LassoLars(alpha=best_alpha) reg.fit(train_x, train_y) n_nonzeros = (reg.coef_ != 0).sum() print("Non-zeros coef: %d" % n_nonzeros) predict_y = reg.predict(predict_x) return {'y': predict_y, "coef": reg.coef_}
def train_ridge_linear_model(_train_x, train_y, _predict_x, sample_weight=None): print_title("Ridge Regressor") train_x, predict_x = \ standarize_feature(_train_x, _predict_x) # using the default CV alphas = [0.1, 1, 10, 100, 1e3, 1e4, 2e4, 5e4, 8e4, 1e5, 1e6, 1e7, 1e8] reg = linear_model.RidgeCV(alphas=alphas, store_cv_values=True) #reg.fit(train_x, train_y, sample_weight=sample_weight) reg.fit(train_x, train_y) cv_mse = np.mean(reg.cv_values_, axis=0) print("alphas: %s" % alphas) print("CV MSE: %s" % cv_mse) print("Best alpha using built-in RidgeCV: %f" % reg.alpha_) # generate the prediction using the best model alpha = reg.alpha_ reg = linear_model.Ridge(alpha=alpha) #reg.fit(train_x, train_y, sample_weight=sample_weight) reg.fit(train_x, train_y) predict_y = reg.predict(predict_x) train_y_pred = reg.predict(train_x) return {"y": predict_y, "train_y": train_y_pred, "coef": reg.coef_}
def compress_built_package(script_settings): try: # instantiate the msfsProject and create the necessary resources if it does not exist msfs_project = MsfsProject(script_settings.projects_path, script_settings.project_name, script_settings.definition_file, script_settings.author_name, script_settings.sources_path, fast_init=True) check_configuration(script_settings, msfs_project, check_built_package=True, check_compressonator=True) isolated_print(EOL) print_title("COMPRESS BUILT PACKAGE") msfs_project.compress_built_package(script_settings) if script_settings.build_package_enabled: build_package(msfs_project, script_settings) pr_bg_green("Script correctly applied" + constants.CEND) except ScriptError as ex: error_report = "".join(ex.value) isolated_print(constants.EOL + error_report) pr_bg_red("Script aborted" + constants.CEND) except RuntimeError as ex: isolated_print(constants.EOL + str(ex)) pr_bg_red("Script aborted" + constants.CEND)
def display_title(self, title=str()): if self.range <= 0: return if title is not str(): self.title = title if self.title is not str(): print_title(self.title)
def __init__( self, size_x, size_y, ): self.size_x = size_x self.size_y = size_y print_title('CSP CROSSWORD: new board with size x=' + str(size_x) + ', y=' + str(size_y)) self.board, self.board_result = self.init_board(size_x, size_y)
def train_random_forest_model(X_train, y_train, X_test): print_title("Random Forest") t1 = time.time() reg = RandomForestRegressor(n_estimators=300, oob_score=True, n_jobs=-1, random_state=12) #Use OOB score to select hyperparameters. Trained in one sequence. max_features_ratio = [30, 10, 5, 3, 2, 1] num_max_features = [int(X_train.shape[1] / i) for i in max_features_ratio] num_min_samples_leaf = [1] max_oob = 0 y_para = [] for max_features in num_max_features: for min_samples_leaf in num_min_samples_leaf: reg.set_params(max_features=max_features, min_samples_leaf=min_samples_leaf) reg.fit(X_train, y_train) y_oob = reg.oob_score_ y_para.append([y_oob, max_features, min_samples_leaf]) print( "RF model with max_features = %d, min_samples_leaf = %d (oob score = %f) trained." % (max_features, min_samples_leaf, y_oob)) if max_oob < y_oob: max_oob = y_oob max_features_best = max_features min_samples_leaf_best = min_samples_leaf print( "The best hypoparameter max_features = %d, min_samples_leaf = %d (oob score = %f)." % (max_features_best, min_samples_leaf_best, max_oob)) if (len(max_features_ratio) > 1) | (len(num_min_samples_leaf) > 1): reg.set_params(max_features=max_features_best, min_samples_leaf=min_samples_leaf_best) reg.fit(X_train, y_train) joblib.dump(reg, 'EEW_RF_tree300_model.pkl') y_pred = reg.predict(X_test) y_train_pred = reg.predict(X_train) t2 = time.time() print("Time elapsed: %d s" % (t2 - t1)) return { "y": y_pred, "y_train": y_train_pred, "coef": reg.feature_importances_ }
def dimension_reduction_with_PCA(_X_train, _X_test, n_components): print_title("PCA") t1 = time.time() pca = PCA(n_components=n_components, random_state=12) pca.fit(_X_train) X_var_ratio = pca.explained_variance_ratio_ print("%d principal components explain %.4f percent of the variance." % (n_components, np.sum(X_var_ratio) * 100)) X_train = pca.transform(_X_train) X_test = pca.transform(_X_test) t2 = time.time() print("Time elapsed: %d s" % (t2 - t1)) return X_train, X_test
def train_lasso_model(X_train, y_train, X_test): print_title("Lasso Regressor") reg = linear_model.LassoCV(precompute=True, cv=5, verbose=1, n_jobs=4) reg.fit(X_train, y_train) print("alphas: %s" % reg.alphas_) print("mse path: %s" % np.mean(reg.mse_path_, axis=1)) print("Best alpha using bulit-in LassoCV: %f" % (reg.alpha_)) n_nonzeros = (reg.coef_ != 0).sum() print("Non-zeros coef: %d" % n_nonzeros) # Prediction y_pred = reg.predict(X_test) y_train_pred = reg.predict(X_train) return {"y": y_pred, "y_train": y_train_pred, "coef": reg.coef_}
def train_ridge_linear_model(X_train, y_train, X_test): print_title("Ridge Regressor") # using the default CV alphas = [0.1, 0.3, 1, 3, 10, 30, 100, 300, 1e3, 3e3, 1e4] reg = linear_model.RidgeCV(alphas=alphas, store_cv_values=True) reg.fit(X_train, y_train) cv_mse = np.mean(reg.cv_values_, axis=0) print("alphas: %s" % alphas) print("CV MSE: %s" % cv_mse) print("Best alpha using built-in RidgeCV: %d" % reg.alpha_) # Prediction y_pred = reg.predict(X_test) y_train_pred = reg.predict(X_train) return {"y": y_pred, "y_train": y_train_pred, "coef": reg.coef_}
def dimension_reduction_with_MI(_X_train, _X_test, y_train, X_name, n_components): print_title("Mutual Information") try: MI_train = load_json('MI_train.json') print('Mutual information file loaded.') except IOError: t1 = time.time() mi_train = mutual_info_regression(_X_train, y_train) t2 = time.time() print('Calculate mutual information: completed. Time: %d s' % (t2 - t1)) dump_json({'mi': list(mi_train), "features": X_name}, 'MI_train.json') feature_index = np.argsort(MI_train['mi'])[-n_components:] X_train = _X_train[:, feature_index] X_test = _X_test[:, feature_index] return X_train, X_test
def train_SVM_model(X_train, y_train, X_test): print_title("Support Vector Machine") t1 = time.time() MLM = SVR(cache_size=500) #Cross Validation to choose from RBF and linear kernel tuned_parameters = [{ 'kernel': ['rbf'], 'gamma': [100, 10, 1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5], 'C': [0.1, 1, 10, 1e2, 1e3, 1e4, 1e5] }, { 'kernel': ['linear'], 'C': [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100] }] reg = GridSearchCV(MLM, tuned_parameters, scoring='neg_mean_squared_error', cv=5, verbose=1, n_jobs=-1) reg.fit(X_train, y_train) joblib.dump(reg, 'EEW_svm.pkl') print("Best parameters set found on training set:") print(reg.best_params_) print("Grid scores on training set:") means = reg.cv_results_['mean_test_score'] stds = reg.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, reg.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params)) t2 = time.time() print("Time elapsed: %d s" % (t2 - t1)) # Prediction y_pred = reg.predict(X_test) y_train_pred = reg.predict(X_train) return {"y": y_pred, "y_train": y_train_pred, "coef": []}
def init_msfs_scenery_project(script_settings): try: print_title("INIT SCENERY PROJECT") # instantiate the msfsProject and create the necessary resources if it does not exist MsfsProject(script_settings.projects_path, script_settings.project_name, script_settings.definition_file, script_settings.author_name, script_settings.sources_path, init_structure=True) pr_bg_green("Script correctly applied" + constants.CEND) except ScriptError as ex: error_report = "".join(ex.value) isolated_print(constants.EOL + error_report) pr_bg_red("Script aborted" + constants.CEND) except RuntimeError as ex: isolated_print(constants.EOL + str(ex)) pr_bg_red("Script aborted" + constants.CEND)
def __init__(self, session_user_id, api_id, api_hash, proxy=None): print_title("Initialization") super().__init__( session_user_id, api_id, api_hash, connection=ConnectionTcpAbridged, proxy=proxy, ) print("Connecting to Telegram servers...") try: loop.run_until_complete(self.connect()) except IOError: # We handle IOError and not ConnectionError because # PySocks' errors do not subclass ConnectionError # (so this will work with and without proxies). print("Initial connection failed. Retrying...") loop.run_until_complete(self.connect()) if not loop.run_until_complete(self.is_user_authorized()): print("First run. Sending code request...") user_phone = input("Enter your phone: ") loop.run_until_complete(self.sign_in(user_phone)) self_user = None while self_user is None: code = input("Enter the code you just received: ") try: self_user = loop.run_until_complete(self.sign_in(code=code)) except SessionPasswordNeededError: pw = getpass( "Two step verification is enabled. " "Please enter your password: " ) self_user = loop.run_until_complete(self.sign_in(password=pw))
def train_EN_model(X_train, y_train, X_test): print_title("Elastic Net") # Cross validation to choose the ratio of Lasso l1_ratio = [0.1, 0.5, 0.7, 0.9, 0.91, 0.93, 0.95, 0.97, 0.99, 1] reg = linear_model.ElasticNetCV(l1_ratio=l1_ratio, cv=5, n_jobs=4, verbose=1, precompute=True) reg.fit(X_train, y_train) n_nonzeros = (reg.coef_ != 0).sum() print("best_l1_ratio(%e), n_nonzeros: %d, alpha: %f " % (reg.l1_ratio_, n_nonzeros, reg.alpha_)) # Prediction y_pred = reg.predict(X_test) y_train_pred = reg.predict(X_train) return {"y": y_pred, "y_train": y_train_pred, "coef": reg.coef_}
def initialize_kafka_producer_stream(producer): """Initializes the kafka producer stream. Reads the files from a drop location and outputs them to Kafka topic. Args: producer (Producer): The producer. """ # Spark Configuration spark_session = spark_initializer.initialize_session(producer.app_name) file_stream_df = spark_initializer.read_stream(spark_session, producer.drop_path, producer.get_schema()) # Converts the dataframe to a JSON format result_df = producer.to_json_df(file_stream_df) # Stream Data to Kafka print("Streaming to Kafka topic \"" + producer.topic + "\" ...") print_title("Kafka - Streaming to topic \"" + producer.topic + "\"") try: result_df.selectExpr("CAST(value AS STRING)")\ .writeStream\ .outputMode("append")\ .format("kafka")\ .option("kafka.bootstrap.servers", cfg.bootstrap_servers)\ .option("topic", producer.topic)\ .option("checkpointLocation", producer.checkpoint_path)\ .start()\ .awaitTermination() except Exception as e: print("") print(e) print("An error occured.") print("Are you missing the required dependencies?") print("Example: spark-submit " + console_bold("--packages " + cfg.required_dependencies) + " main.py " + cfg.activeProducers[0].name + "\n")
def train_EN_model(_train_x, train_y, _predict_x): print_title("ElasticNet") train_x, predict_x = \ standarize_feature(_train_x, _predict_x) #l1_ratios = [1e-4, 1e-3, 1e-2, 1e-1] #l1_ratios = [1e-5, 1e-4, 1e-3] l1_ratios = [0.9, 0.92, 0.95, 0.97, 0.99] #l1_ratios = [0.5] min_mse = 1 for r in l1_ratios: t1 = time.time() reg_en = linear_model.ElasticNetCV(l1_ratio=r, cv=5, n_jobs=4, verbose=1, precompute=True) reg_en.fit(train_x, train_y) n_nonzeros = (reg_en.coef_ != 0).sum() _mse = np.mean(reg_en.mse_path_, axis=1)[np.where(reg_en.alphas_ == reg_en.alpha_)[0][0]] if _mse < min_mse: min_mse = _mse best_l1_ratio = r best_alpha = reg_en.alpha_ t2 = time.time() print("ratio(%e) -- n: %d -- alpha: %f -- mse: %f -- " "time: %.2f sec" % (r, n_nonzeros, reg_en.alpha_, _mse, t2 - t1)) print("Best l1_ratio and alpha: %f, %f" % (best_l1_ratio, best_alpha)) # predict_model reg = linear_model.ElasticNet(l1_ratio=best_l1_ratio, alpha=best_alpha) reg.fit(train_x, train_y) predict_y = reg.predict(predict_x) train_y_pred = reg.predict(train_x) return {"y": predict_y, "train_y": train_y_pred, "coef": reg.coef_}
def __convert_tiles_textures(self, src_format, dest_format): textures = self.__retrieve_tiles_textures(src_format) if textures: isolated_print( src_format + " texture files detected in the tiles of the project! Try to install pip, then convert them" ) print_title("INSTALL PILLOW") install_python_lib("Pillow") pbar = ProgressBar(textures, title="CONVERT " + src_format.upper() + " TEXTURE FILES TO " + dest_format.upper()) for texture in textures: file = texture.file if not texture.convert_format(src_format, dest_format): raise ScriptError( "An error was detected while converting texture files in " + self.texture_folder + " ! Please convert them to " + dest_format + " format prior to launch the script, or remove them") else: pbar.update("%s converted to %s" % (file, dest_format))
def start_backtracking_algorithm(size_min, size_max, lemmas): for size_y in range(size_min, size_max + 1): for size_x in range(size_min, size_max + 1): print_title('CSP CROSSWORD: start') print_title('CSP CROSSWORD: init square with size ' + str(size_y) + 'x' + str(size_x)) csp_crossword = CspCrossword(size_y, size_x) print_title( 'CSP CROSSWORD: fill square with words (backward algorithm)') start_time = time.time() csp_crossword.backward_assign_words(lemmas) end_time = time.time() print('Time elapsed: ', end_time - start_time) print('Result') csp_crossword.plot() csp_crossword.print_result() print_title('CSP CROSSWORD: end')
def start_backtracking_algorithm(size_min, size_max): for size in range(size_min, size_max + 1): print_title('CSP WARM-UP: start') print_title('CSP WARM-UP: init square with size ' + str(size)) square = init_square(size) print_title('CSP WARM-UP: set possible values') possible_values = set() for i in range(1, size + 1): possible_values.add(i) print_title( 'CSP WARM-UP: fill square non repeatedly (backward algorithm)') start_time = time.time() is_filled, square_filled = fill_square_non_repeatedly_backtracked( square, possible_values, False) end_time = time.time() print('Time elapsed: ', end_time - start_time) print('Result') for y in range(0, square_filled.shape[0]): square_filled_str = np.array2string(square_filled[y], precision=2, separator=', ', max_line_width=60) print(' ' + square_filled_str[1:-1]) print_title('CSP WARM-UP: end')
def start(negative_points, positive_points, y_polynomial, initial_pop, param_crossover_probability, param_mutation_probability, param_generation_number): pop_size = initial_pop.shape[1] print_title('GA: start - polynomial degree 2') print_title('GA: initialise population of functions arguments') print(initial_pop) print_title('GA: evaluate negative and positive points groups') args_number = initial_pop.shape[0] args_pop_selected = np.zeros((args_number, 0), dtype=np.float16) args_pop_all, args_pop_fitness_values = evaluate(initial_pop, negative_points, positive_points, y_polynomial, args_pop_selected) print(args_pop_fitness_values) print_title('GA: selection, sort population by fitness values') args_pop_selected, best_value, worst_value, mean_value = selection_sort(args_pop_all, args_pop_fitness_values, pop_size) resulting_fitness_values = np.zeros((1, param_generation_number), dtype=np.float16) print(args_pop_selected) resulting_fitness_values[0] = best_value print('%.2f\t%.2f\t%.2f' % (best_value.astype(float), mean_value.astype(float), worst_value.astype(float)), end='\n') for i in range(param_generation_number - 1): # print(i) args_pop_crossovered = crossover(args_pop_selected, param_crossover_probability) args_pop_mutated = mutation(args_pop_crossovered, param_mutation_probability) args_pop_all, args_pop_fitness_values = evaluate(args_pop_mutated, negative_points, positive_points, y_polynomial, args_pop_selected) args_pop_selected, best_value, worst_value, mean_value = selection_sort(args_pop_all, args_pop_fitness_values, pop_size) resulting_fitness_values[0, i+1] = best_value print('%.2f\t%.2f\t%.2f' % (best_value.astype(float), mean_value.astype(float), worst_value.astype(float)), end='\n') args = args_pop_selected[:, 0][::-1] x = np.linspace(-10, 20, 1000) y = y_polynomial(x, *args) np.set_printoptions(suppress=True) plt.plot(x, y, label=str(np.around(args, decimals=3)[::-1]).strip('[]')) # plt.legend() plt.title('Polynomial of ' + str(len(args) - 1) + ' degree', loc='left') plt.title(str(best_value) + '% fit', loc='right') print('\n') print(args_pop_selected) return resulting_fitness_values
def start_forward_check_algorithm(size_min, size_max, lemmas): for size_y in range(size_min, size_max + 1): for size_x in range(size_min, size_max + 1): print_title('CSP CROSSWORD: start') print_title('CSP CROSSWORD: init square with size ' + str(size_y) + 'x' + str(size_x)) csp_crossword = CspCrossword(size_y, size_x) print_title( 'CSP CROSSWORD: fill square with words (forward check algorithm)' ) square_possible_opts, square_possible_vals = csp_crossword.init_foreward_check_board( lemmas, size_y, size_x) start_time = time.time() csp_crossword.forward_assign_words(square_possible_opts, square_possible_vals) end_time = time.time() print('Time elapsed: ', end_time - start_time) print('Result') csp_crossword.plot() csp_crossword.print_result() print_title('CSP CROSSWORD: end')
def main(): print_title('Genetic Algorithm starts') # generate_and_save_points() negative_points = get_negative_points() positive_points = get_positive_points() print_title('Negative and positive points') print(negative_points) print(positive_points) plt.plot(negative_points[0], negative_points[1], 'r.') plt.plot(positive_points[0], positive_points[1], 'b.') plt.axis([-10, 20, -10, 20]) find_polynomial(2, negative_points, positive_points) # find_polynomial(3, negative_points, positive_points) # find_polynomial(4, negative_points, positive_points) # find_polynomial(5, negative_points, positive_points) plt.show() print_title('Genetic Algorithms end')
if WEIGHTS_PATH: model.load_weights(WEIGHTS_PATH) model.compile(loss="sparse_categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) if hvd.rank() == 0: model.summary() # ============== # Start training # ============== rank_prefix = "[" + str(hvd.rank()) + "]" utils.print_title(rank_prefix + " Started Training") sess.run(tf.local_variables_initializer()) sess.run(tf.global_variables_initializer()) sess.run(tf.tables_initializer()) # Broadcast initial variable states from rank 0 to all other processes: bcast = hvd.broadcast_global_variables(0) bcast.run(session=sess) if hvd.rank() == 0: if args.progress: verbose = 1 else: verbose = 2
# import sys # # for var, obj in locals().items(): # print(var, sys.getsizeof(obj)) import os import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Circle import matplotlib.cm as cm import psf import utils import calibration utils.print_title(message='\nN C P A', font=None, random_font=False) print("\n -|| MULTIWAVE ||- ") print("\nCan we use multiple wavelength channels to improve the calibration?\n") # PSF bits N_PIX = 256 # pixels for the Fourier arrays pix = 25 # pixels to crop the PSF images WAVE = 1.5 # microns | reference wavelength SPAX = 4.0 # mas | spaxel scale RHO_APER = utils.rho_spaxel_scale(spaxel_scale=SPAX, wavelength=WAVE) RHO_OBSC = 0.30 * RHO_APER # ELT central obscuration print("Nominal Parameters | Spaxel Scale and Wavelength") utils.check_spaxel_scale(rho_aper=RHO_APER, wavelength=WAVE) N_actuators = 20 # Number of actuators in [-1, 1] line
pix, pix, 2, ) SNR = 500 # SNR for the Readout Noise N_loops, epochs_loop = 5, 5 # How many times to loop over the training readout_copies = 2 # How many copies with Readout Noise to use N_iter = 3 # How many iterations to run the calibration (testing) # In case you need to reload a library after changing it import importlib importlib.reload(calibration) if __name__ == """__main__""": utils.print_title(message='N C P A', font='mayhem_d') plt.rc('font', family='serif') plt.rc('text', usetex=False) # ================================================================================================================ # # Base Scenario - Get a model running, for reference # ================================================================================================================ # # Calculate the Actuator Centres centers = psf.actuator_centres(N_actuators, rho_aper=RHO_APER, rho_obsc=RHO_OBSC, radial=True) N_act = len(centers[0]) psf.plot_actuators(centers, rho_aper=RHO_APER, rho_obsc=RHO_OBSC)
""" Print parameters. """ print("Category :", category_name) print('-'*80) print("Model Name :", model_name) print("Image Shape :", img_shape) print('-'*80) print("Num of Classes :", num_classes) print("Train Size :", train_size) print("Validation Size :", val_size) print('-'*80) print("Epochs :", epochs) print("Batch Size :", batch_size) print("Initial LR :", lr) print("Early Stopping Patience :", early_stopping_patience) print("LR Reduce Patience :", lr_reduce_patience) print('-'*80) print('Train Data Path‘ :') print(train_data_dir) print('Val Data Path‘ :') print(val_data_dir) print("Model Save Path :") print(model_save_dir) # Tests if (__name__ == '__main__'): print_title('Test %s' % __file__, symbol='*') print_params()