def _modeler_factory(self): m = Modeler( # line_width=self.line_width, # arrhenius_plot_type=self.arrhenius_plot_type ) bind_preference(m, 'logr_ro_line_width', 'pychron.mdd.logr_ro_line_width') bind_preference(m, 'arrhenius_plot_type', 'pychron.mdd.plot_type') bind_preference(m, 'clovera_directory', 'pychron.mdd.clovera_dir') bind_preference(m, 'data_directory', 'pychron.mdd.data_dir') return m
def __init__(self, config): self.config = config metrics = Metrics().get() m = Modeler().get() loss = Loss().get() optimizer = Optimizer().get() console.log("Model has", m.count_params(), "params") m.compile(loss=loss, optimizer=optimizer, metrics=metrics) m.summary(line_length=150) self.model = m # need to know so that we can avoid rounding errors with spectrogram # this should represent how much the input gets downscaled # in the middle of the network self.peakDownscaleFactor = 4
def main(): # Process command-line args. desc = 'This application run an end-to-end MERRA/Max process.' parser = argparse.ArgumentParser(description = desc) parser.add_argument('-f', required = True, help = 'path to file of presence points') parser.add_argument('-o', default = '.', help = 'path to output directory') parser.add_argument('-p', default = 10, help = 'number of concurrent processes to run') parser.add_argument('-s', required = True, help = 'species name') parser.add_argument('-t', default = 10, help='number of trials for selecting top-ten predictors') parser.add_argument('--startDate', help = 'MM-DD-YYYY') parser.add_argument('--endDate', help = 'MM-DD-YYYY') args = parser.parse_args() # Run the process. c = ConfigureMmxRun(args.f, args.startDate, args.endDate, args.s, args.o, args.p, args.t) GetMerra (c.config.configFile).run() PrepareImages(c.config.configFile).run() PrepareTrials(c.config.configFile).run() RunTrials (c.config.configFile).run() Selector (c.config.configFile).run() Modeler (c.config.configFile).run()
def __init__( self, cgra: MRRG, design: Design, solver_str: str, seed: int = 0, incremental: bool = False, duplicate_const: bool = False, duplicate_all: bool = False, ): if duplicate_all: for op in design.operations: op.allow_duplicate() elif duplicate_const: for op in design.operations: if op.opcode == 'const': op.allow_duplicate() self._cgra = cgra self._design = design self._incremental = incremental self._solver = solver = smt(solver_str) self._solver_opts = solver_opts = [('random-seed', seed), ('produce-models', 'true')] if incremental: solver_opts.append(('incremental', 'true')) if solver_str == 'CVC4': if incremental: solver_opts.append(('bv-sat-solver', 'cryptominisat')) else: solver_opts.append(('bv-sat-solver', 'cadical')) #solver_opts.append(('bitblast', 'eager')) self._init_solver() self._vars = Modeler(solver) self._model = None
def main(name, maxeval, metric): """Triggers experiment looping through ML algorithms Args: name: name of experiment maxeval: maximum number of evaluation metric: name of metric to minimize cost function """ mlflow.set_experiment(name) MAX_EVALS = maxeval METRIC = metric space = [{ 'max_depth': hp.choice('max_depth', range(1, 20)), 'max_features': hp.choice('max_features', range(1, 26)), 'n_estimators': hp.choice('n_estimators', range(100, 500)), 'criterion': hp.choice('criterion', ["gini", "entropy"]) }, { 'var_smoothing': hp.uniform('var_smoothing', 0.000000001, 0.000001) }] X_train, X_test, y_train, y_test = Modeler().prepro() for index, algo in enumerate([RandomForestClassifier, GaussianNB]): with mlflow.start_run(run_name=str(algo)) as run: trials = Trials() train_objective = build_train_objective(algo, X_train, y_train, X_test, y_test, METRIC) hyperopt.fmin(fn=train_objective, space=space[index], algo=hyperopt.tpe.suggest, max_evals=MAX_EVALS, trials=trials) log_best(run, METRIC) # search_run_id = run.info.run_id # experiment_id = run.info.experiment_id mlflow.end_run()
def load_dataset(task: str, DELIMITER='#'): set_seeds() if task == "Amazon": df_train, df_dev, df_valid, df_test, df_test_heldout = load_amazon_dataset( delimiter=DELIMITER) elif task == "Youtube": df_train, df_dev, df_valid, df_test, df_test_heldout = load_youtube_dataset( delimiter=DELIMITER) elif task == "Film": df_train, df_dev, df_valid, df_test, df_test_heldout = load_film_dataset( ) elif (task == "News") or (task == "Debug"): df_train, df_dev, df_valid, df_test, df_test_heldout = load_news_dataset( ) global modeler modeler = Modeler(df_train, df_dev, df_valid, df_test, df_test_heldout) update_stats({}, "load_data") return (df_train, df_dev, df_valid, df_test)
model2 = Sequential() model2.add(Dense(128, activation='relu', input_dim=784)) model2.add(Dense(10, activation='softmax')) model2.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model3 = Sequential() model3.add(Dense(256, activation='relu', input_dim=784)) model3.add(Dense(10, activation='softmax')) model3.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) return (model1, model2, model3) if __name__ == "__main__": data = load_data() (model1, model2, model3) = generate_models() modeler = Modeler() modeler.add(model1) modeler.add(model2) modeler.add(model3) modeler.start(data, epochs=12) modeler.save(n_save=1)
from plotter import plot_data_1, plot_data_2 from modeler import Modeler if __name__ == '__main__': plot = True # plot = False # Filter dataframe pd.options.display.max_columns = None df = pd.read_csv('data/owid-covid-data.csv') columns_filter = ['iso_code', 'location', 'date', 'total_cases', 'new_cases', 'total_deaths', 'new_deaths'] df_indo = df[df['location'] == 'Indonesia'][columns_filter] df_indo['date'] = pd.to_datetime(df_indo['date'], format='%Y-%m-%d') # print(df_indo) # Plot data if plot: save = True # save = False # Plot actual data plot_data_1(df_indo.iloc[30:], save) plot_data_2(df_indo.iloc[30:], 'new_cases', 'blue', save) plot_data_2(df_indo.iloc[30:], 'new_deaths', 'red', save) # Plot prediction data m = Modeler(df_indo.iloc[65:]) m.plot_observed_and_expected_total_case(90, save) print() m.plot_observed_and_expected_new_case(90, save)
async def on_message(message): bot_name = str(client.user).split("#")[0] target_name = str(message.author.name) str_author = str(message.author) prefix = "user_data" if message.author == client.user: return if message.channel.type == discord.ChannelType.private: print(target_name) print(active_sessions.keys()) if target_name not in active_sessions.keys(): # print("session not active") active_sessions[target_name] = False session_count[target_name] = 0 msg_count[target_name] = 0 if not active_sessions[target_name]: if f"Bonjour {bot_name}" in message.content: active_sessions[target_name] = True sentence_buffer[target_name] = "" if target_name not in target_modelers.keys(): target_modelers[target_name] = Modeler(target_name) if not os.path.exists(f"{prefix}/{str_author}"): os.makedirs(f"{prefix}/{str_author}") target_modelers[target_name].save_profile( f"{prefix}/{str_author}/{str_author}_profile.json") else: target_modelers[target_name].load_profile( f"{prefix}/{str_author}/{str_author}_profile.json") time.sleep(1) await message.channel.send(f"Bonjour {target_name} !") time.sleep(.7) await message.channel.send( f"Je suis {bot_name}, le robot qui écoute les problèmes ! Mon rôle est de déchiffrer tes 'méta-programmes' afin d'identifier les meilleurs vecteurs d'amélioration selon ta personnalité." ) time.sleep(1) await message.channel.send( "Ainsi, j'aimerais que tu me parles d'un élément de ta vie que tu souhaiterais améliorer afin que l'on puisse ensemble l'analyser en profondeur. Cela peut être lié aux hobbies, au travail, aux relations ..." ) time.sleep(1.5) await message.channel.send( "Note: je ne réponds que lorsque que ton message sera terminé par un point." ) session_count[target_name] += 1 session_answerer = Answerer(session_count) session_answerer.load_answer_list("templates/meta_answers.csv") target_answerers[target_name] = session_answerer # TODO: Potentiellement demander si prise en compte des conversations passées si nb session > 1 time.sleep(1.5) await message.channel.send( f"De quoi allons-nous parler aujourd'hui ?") time.sleep(.7) await message.channel.send( f"(Écrire 'Merci {bot_name}' pour mettre fin à la discussion)" ) else: await message.channel.send( f"Vous pouvez écrire 'Bonjour {bot_name}' pour lancer la discussion !" ) else: if message.content == f"Merci {bot_name}": print("end_message") time.sleep(1) await message.channel.send(f"Bonne journée {target_name} !") active_sessions[target_name] = False if not os.path.exists(f"{prefix}/{str_author}"): os.makedirs(f"{prefix}/{str_author}") print(target_modelers[target_name].profile) target_answerers[target_name].save_conversation_data( f"{prefix}/{str_author}/{str_author}_{datetime.now()}_{session_count[target_name]}.csv" ) target_modelers[target_name].save_profile( f"{prefix}/{str_author}/{str_author}_profile.json") else: print("normal_message") session_answerer = target_answerers[target_name] session_modeler = target_modelers[target_name] if ("." in message.content) or ("!" in message.content) or ( "?" in message.content): sentence_list = [ msg.strip() for msg in re.split('[.!?]+', message.content) ] print(sentence_list) if sentence_buffer[target_name] != "": current_sentence = sentence_buffer[ target_name] + ' ' + sentence_list[0] else: current_sentence = sentence_list[0] session_answerer.update_conversation(current_sentence) session_modeler = session_modeler.update_profile( current_sentence) session_answerer.update_target_profile( session_modeler.profile) sentence_buffer[target_name] = "" msg_count[target_name] += len(sentence_list[:-1]) for current_sentence in sentence_list[1:-1]: session_answerer.update_conversation(current_sentence) session_modeler = session_modeler.update_profile( current_sentence) session_answerer.update_target_profile( session_modeler.profile) if sentence_list[-1] == "": sentence_buffer[target_name] = "" session_answerer.nb_answers = msg_count[target_name] response = session_answerer.get_answer() response_time = max( 1.0, 0.2 * len(message.content.split(" "))) time.sleep(response_time) await message.channel.send(response) else: sentence_buffer[target_name] = sentence_list[-1] else: if sentence_buffer[target_name] != "": sentence_buffer[target_name] = sentence_buffer[ target_name] + ' ' + message.content else: sentence_buffer[target_name] = message.content else: await message.channel.send(f"Venez discutez par message privé !")
def test_modeler_output(df): assert Modeler().predict(df) in (0,1)
def test_modeler_model(): Modeler().fit() assert os.path.isfile(modelpath) == True