Exemplo n.º 1
0
def main(config) -> None:
    logger.info(f"\n{config.pretty()}")

    pl.seed_everything(config.hparams.seed)

    datamodule = MNISTDataModule(
        data_dir=config.dataset.path,
        batch_size=config.hparams.batch_size,
        num_workers=config.hparams.num_workers,
    )

    trainer = pl.Trainer(
        **config.trainer,
        checkpoint_callback=ModelCheckpoint(**config.model_checkpoint),
        callbacks=[EarlyStopping(**config.early_stopping)]
        + [hydra.utils.instantiate(i) for i in config.callbacks],
        logger=[hydra.utils.instantiate(i) for i in config.loggers],
        auto_lr_find=config.hparams.lr == 0,
    )

    model = Model(**config.hparams)

    trainer.tune(model, datamodule=datamodule)
    assert model.hparams.lr > 0, f'model.hparams.lr > 0={model.hparams.lr > 0}'
    config.hparams.lr = model.hparams.lr

    # 更新したhparamsをlogに出力するには↓がいる
    model = Model(**config.hparams)

    trainer.fit(model, datamodule=datamodule)
    trainer.test()
Exemplo n.º 2
0
def main(config) -> None:
    all_done = False
    try:
        logger.info('\n' + OmegaConf.to_yaml(config))

        shutil.copytree(
            Path(hydra.utils.get_original_cwd()) / 'src',
            Path.cwd() / 'copied' / 'src')

        pl.seed_everything(config.hparams.seed)

        datamodule = hydra.utils.instantiate(config.data.datamodule)

        trainer = pl.Trainer(
            **config.trainer,
            checkpoint_callback=ModelCheckpoint(**config.model_checkpoint),
            callbacks=[EarlyStopping(**config.early_stopping)] +
            [hydra.utils.instantiate(i) for i in config.callbacks],
            logger=[hydra.utils.instantiate(i) for i in config.loggers],
            auto_lr_find=config.hparams.lr == 0,
        )

        model = Model(**config.hparams)
        torchsummary.summary(model)

        trainer.tune(model, datamodule=datamodule)
        if config.debug == True:
            # fast_dev_runモードではauto_lr_findが失敗し,model.hparams.lrにNoneが代入される
            assert model.hparams.lr is None
            model.hparams.lr = 1
        assert model.hparams.lr > 0, f'model.hparams.lr > 0={model.hparams.lr > 0}'
        config.hparams.lr = model.hparams.lr

        # 更新したhparamsをロガーに出力するには↓がいる
        model = Model(**config.hparams)

        trainer.fit(model, datamodule=datamodule)
        trainer.test()

        logger.info('All done.')
        all_done = True
    finally:
        if all_done == False:
            path = Path.cwd()
            if 'outputs' in path.parts or 'multirun' in path.parts:
                logger.info(
                    f'Rename directory name. "{path}" -> "{path}__interrupted"'
                )
                path.rename(path.parent / (path.name + '__interrupted__'))
Exemplo n.º 3
0
def get_rickers_trancendental_root(alpha, mu):
    model = Model(alpha, mu)
    roots = get_roots(model)
    return list(
        filter(
            lambda x_: not (almost_equals(0, x_[0]) or almost_equals(1, x_[0])
                            ), roots))[0]
Exemplo n.º 4
0
def main():
    f = open("iris.dataset")
    dataset = [[(float(x)) for x in line.split(" ")] for line in f]
    dataset = dataset[0:150]
    f.close()

    model = Model(0.2, [4, 10, 3])

    # k-fold cross validation
    k = 5
    fold = []
    fold_len = int(len(dataset) / k)
    # print(dataset)
    for i in range(0, k):
        r = {
            'validation_data': dataset[0:fold_len],
            'training_data': dataset[fold_len:len(dataset)]
        }
        fold.append(r)
        dataset = shift(dataset, -fold_len)

    print("init weights:", model.layer_weights)

    for j in range(0, k):
        print(fold[j]['training_data'])
        model.train(fold[j]['training_data'], fold[j]['validation_data'], 100)

    print("final weights:", model.layer_weights)

    plt.plot(model.error)
    plt.plot(model.accuracy)
    plt.xlabel('epoch')
    plt.yscale('log')
    plt.legend(['Error', 'Accuracy'], loc='upper left')
    plt.show()
Exemplo n.º 5
0
def run_cv(data, target, base_model, params, tags, k=5):
    """
    Performing a CV training

    Args:
        data (pd.DataFrame):        DataFrame with the filtered data
        target (str):               Name of the prediction target.              
        base_model (sklearn model): Scikit-learn object that will be used as base model for training.
      
    Returns:
        model:                      Trained model
        metrics (dict):             Dictionary with all the metrics for the model
    """
    experiment_name = "StockForecasting_PROD"

    print('{:=^80}'.format('  RUN  '))
    print("Starting RUN on project {}.".format(experiment_name))

    # Init model
    norm = Normalize()
    pipeline = Pipeline([("norm", norm), ("model", base_model)])
    model = Model(pipeline)

    # Get CV metrics
    cv_metrics = cross_validate(model, data, target, k)
    print(cv_metrics)

    # Log in mlflow with no artifacts
    log_mlflow(experiment_name, None, params, cv_metrics, tags)

    return cv_metrics
Exemplo n.º 6
0
    def __init__(self, loop):
        self.loop = loop

        controller = Controller()
        model = Model(controller=controller)
        self.view = View(controller=controller)
        controller.segundo_init(model=model, view=self.view)
Exemplo n.º 7
0
    def test_reward(self):
        model_keys = ["000000100", "000210100", "020112001"]

        model_probs = ["112111011", "211001011", "101000120"]
        model = Model(load_model=False)

        for i, k in enumerate(model_keys):
            model.states[k] = self.to_int_list(model_probs[i])

        ai_keys = ["001000000", "001012000", "001112020"]
        ai_boards = [
            BoardState.from_board(self.to_int_list(s)) for s in ai_keys
        ]

        ai_moves = [5, 7, 1]
        progress = list(zip(ai_boards, ai_moves))

        model.reward(progress, Result.X_WINS)

        target_probs = ["110110211", "110100102", "110000101"]
        target_probs = [
            list(map(float, self.to_int_list(p))) for p in target_probs
        ]

        self.assertEqual(len(model.states), len(model_keys),
                         "Too much state entries in model.")

        for i, k in enumerate(ai_keys):
            model_probs = model.states[k]
            self.assertEqual(model_probs, target_probs[i])
Exemplo n.º 8
0
def run(app):
    dir_name = os.path.dirname(__file__)
    os.environ['QML_IMPORT_PATH'] = os.path.join(dir_name, 'resources')
    os.environ['QML2_IMPORT_PATH'] = os.path.join(dir_name, 'resources')

    #QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)

    # Create the application instance.
    #app = QGuiApplication(sys.argv)

    # Create QML engine
    engine = QQmlApplicationEngine()
    context = engine.rootContext()

    # Testor
    manage = ManageThreads()
    context.setContextProperty("manage", manage)

    # Model
    TestorModel = Model()
    manage.runtimeSig.connect(TestorModel.addData)
    context.setContextProperty("TestorModel", TestorModel)

    engine.load(QUrl('src/resources/main.qml'))

    engine.quit.connect(app.quit)
    sys.exit(app.exec_())
Exemplo n.º 9
0
    def generate(self, solution: Solution[Model]) -> Solution[Model]:
        model = solution.getvalue()
        connection: torch.Tensor = model.connection
        weight: torch.Tensor = model.weight
        self.device = model.device

        connection, weight = self._regulate_neural(connection,
                                                   weight,
                                                   removable=model.input_dim +
                                                   model.output_dim)
        flip_mask = torch.ones_like(connection) * self.possibility_of_invert
        flip_mask = torch.bernoulli(flip_mask).to(torch.int8)
        new_connection = connection.to(torch.int8).__xor__(flip_mask)

        uniform_from_distance = torch.empty_like(weight).uniform_(
            -1 * self.distance, self.distance)
        new_weight = weight + uniform_from_distance

        new_model = Model(
            model.activation, model.input_dim, model.output_dim,
            connection.size()[0] - model.input_dim - model.output_dim,
            new_connection, new_weight, model.delay, model.device)

        return Solution(new_model, solution.get_generator(),
                        solution.get_evaluator())
Exemplo n.º 10
0
def calc_operating_income(new_trucks: int, num_customers: float) -> float:
    # initialize the model
    model = Model()
    model.inputs.num_customers = num_customers

    # get total trucks, demand, and the trucks needed to serve it
    model.inputs.trucks_total = (model.operations.productivity.avg_num_trucks +
                                 new_trucks)
    trucks_utilized = min([model.trucks_required(), model.inputs.trucks_total])

    available_capacity_per_truck = (
        model.inputs.lifts_per_truck_day *
        model.operations.avg_vol_per_lift() *
        model.operations.productivity.working_days_per_year)

    # what demand is met
    served_demand = model.demand_served()
    revenue = served_demand * model.inputs.revenue_per_m3
    disposal_cost = model.new_disposal_cost()

    # depot related costs
    depot_overhead = model.depot_overhead_cost(
        model.inputs.trucks_total)  # assume each depot incurs OH
    depot_labor = model.depot_labor_cost(
        trucks_utilized)  # but only "active" ones incur labor cost

    # driver costs
    driver_labor = model.driver_labor_cost(trucks_utilized)

    # fuel and maintenance
    fuel = model.fuel_cost(trucks_utilized)
    maintenance = model.maintenance_cost(trucks_utilized)

    return (revenue - disposal_cost - depot_overhead - depot_labor -
            driver_labor - fuel - maintenance)
Exemplo n.º 11
0
def main():
    # Load train dataset
    data = dataLoader(directory='./dataset/captcha', dataset_dir='train',\
                      dataset_name='train.txt', max_steps=6, image_width=200,\
                      image_height=64, grd_attn=True, mode='Train')
    # Load Model
    model = Model(dim_feature=[672, 128],
                  dim_hidden=128,
                  n_time_step=8,
                  alpha_c=1.0,
                  image_height=64,
                  image_width=200,
                  mode='train')
    # Load Trainer
    trainer = Train(model,
                    data,
                    val_data=None,
                    n_epochs=1000,
                    batch_size=64,
                    update_rule='adam',
                    learning_rate=0.0001,
                    print_every=100,
                    save_every=5,
                    pretrained_model=None,
                    model_path='model/lstm1/',
                    log_path='log1/')
    # Begin Training
    trainer.train()
Exemplo n.º 12
0
def update_alphas(generic_model, model_name, model_configs, alpha, l1_ratio,
                  prefix):

    refined_alphas = model_configs['update_alphas'] * alpha
    refined_l1_ratios = model_configs['update_alphas'] * l1_ratio

    if model_name == 'Ridge':
        model = generic_model(alphas=refined_alphas,
                              cv=model_configs['cross_val'])

    elif model_name == 'Lasso':
        model = generic_model(alphas=refined_alphas,
                              cv=model_configs['cross_val'],
                              max_iter=model_configs['max_iter'])

    elif model_name == 'ElasticNet':

        model = generic_model(l1_ratio=refined_l1_ratios,
                              alphas=refined_alphas,
                              cv=model_configs['cross_val'],
                              max_iter=model_configs['max_iter'])

    model = Model(model,
                  model_name,
                  prefix,
                  cross_val=model_configs['cross_val'])
    return model
Exemplo n.º 13
0
    def on_run_regex(self):
        """ Passes clinician note file to Model """

        self.phrases = self.regex_text.get(1.0, 'end-1c').strip()

        # GETS FILE NAMe, passes global path to ReadRPDR
        file_loc = self.data_model.input_fname
        self.dirname = os.path.dirname(file_loc)

        opts = {
            'r_encoding': 'utf-8',
            'preserve_header': True,
            'patient_id': self.patient_id_entry.get(),
            'note_key': self.note_key_entry.get(),
            'rpdr': self.rpdr_checkbox.var.get()
        }

        phrases = [p.strip() for p in self.phrases.split(",")]

        self.model = Model(options_=opts,
                           file_location_=file_loc,
                           keywords_=phrases)

        if self.checkvar:

            self.num_notes = self.model.get_num_notes_positive()

        else:

            self.num_notes = self.model.get_num_notes()

        first_note, index = self.model.first(self.checkvar)

        self.display_output_note(first_note, index)
Exemplo n.º 14
0
def get_jacoby_matrix(alpha, mu):
    model = Model(alpha, mu)
    f, g = model.f_symbolic, model.g_symbolic
    df_dx = diff(f, x)
    df_dy = diff(f, y)
    dg_dx = diff(g, x)
    dg_dy = diff(g, y)
    return Matrix([[df_dx, df_dy], [dg_dx, dg_dy]])
Exemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--mel-path')
    args = parser.parse_args()
    print('Loading model')
    model = Model(FITTED_MODEL_PATH)
    print('Successfully loaded model')
    preds = model.predict(args.mel_path)
    print(preds)
Exemplo n.º 16
0
def train_model():
    df = pd.read_csv(os.sep.join([DATA_DIR, TRAIN_NAME]))

    my_model = Model()
    X_train, y_train = my_model.preprocess_training_data(df)
    my_model.fit(X_train, y_train)

    # Save JOB
    joblib.dump(my_model, JOBLIB_NAME)
Exemplo n.º 17
0
def test_model_predict():
    """Test if model can extract entities and accompanying sentence"""
    with open("tests/processed_html_text.txt", "r", encoding="utf-8") as f:
        data = f.read()

    model = Model()
    output = model.predict(data)

    assert len(output) > 0, "model did not return any prediction"
    assert len(output[0]) == 2, "model did not return entity and sentence"
Exemplo n.º 18
0
    def __init__(self, parent=None):
        super().__init__(parent)

        self.view = Ui_View()
        self.mainwindow = QMainWindow()
        self.model = Model()

        self.view.setupUi(self.mainwindow)
        self.setFixedSize(620, 250)
        self.setup_signals()
Exemplo n.º 19
0
def test_model(test_tree, test_soil):
    # change all viscosities to be 1
    axial_permeability = [10, 20]
    for i in range(test_tree.num_elements):
        for j in range(2):
            test_tree.viscosity[i, j] = 1
            test_tree.axial_permeability[i, j] = axial_permeability[j]
            test_tree.element_height[i] = 1
            test_tree.pressure[i, j] = i
            test_tree.transpiration_rate[i] = 1
    return Model(test_tree, test_soil)
def test_checkpoint():
    ''' Load checkpoint for pretrained model'''
    from src.model import Model
    model = Model(input_dim=2,
                  hidden_dim=16,
                  kernel_size=3,
                  pool_size=10,
                  n_max=1,
                  pad=True,
                  device='cpu',
                  version='0.0.1')
Exemplo n.º 21
0
 def __init__(self):
     self.iteration_number = 0
     self.solution_type = ""
     self.solution_name = ""
     self.model = Model()
     self.solver_data = SolverData()
     self.data = DataStore()
     self.iteration_results = IterationResults()
     self.sequence_list = SequenceList()
     self.current_sequence = Sequence()
     self.iteration_saved = False
 def __init__(self, train_filepath, test_filepath, submission_filepath):
     if (Titanic.class_instance != None):
         raise Exception(
             "The Class is Singlton, can not create more than one objects")
     else:
         Titanic.class_instance = self
         self.train_dataset_path = train_filepath
         self.test_dataset_path = test_filepath
         self.submission_filepath = submission_filepath
         self.training_dataframe = pd.DataFrame.from_dict({})
         self.testing_dataframe = pd.DataFrame.from_dict({})
         self.submission_dataframe = pd.DataFrame(
             columns=['PassengerId', 'Survived'], index=None)
         self.model = Model()
         #to be fixed
         self.training_dataPrep_object = Datapreparation(
             self.train_dataset_path)
         self.test_dataPrep_object = Datapreparation(self.test_dataset_path)
         self.model = Model(model_type='LogisticRegression')
     return
Exemplo n.º 23
0
def main(config):

    config_proto = tf.ConfigProto()
    config_proto.gpu_options.allow_growth = True
    sess = tf.InteractiveSession(config=config_proto)
    dataloader = Dataloader(sess, config, prefetch_buffer_size=10)
    model = Model(sess, dataloader, config)
    model.build_model()
    logger = Logger(sess, config)
    trainer = Trainer(sess, model, dataloader, logger, config)
    trainer.train()
Exemplo n.º 24
0
def main():
    # Load train dataset
    data = dataLoader(directory='./dataset', dataset_dir='test_curated',
                      dataset_name='test.txt', max_steps=6, mode='Test')
    # Load Model
    model = Model(dim_feature=[196, 128], dim_hidden=128, n_time_step=6,
                  alpha_c=1.0, image_height=64, image_width=64, mode='test')
    # Load Inference model
    testing = Test(model, data, max_steps=6, batch_size=1, print_every=2000,
                   pretrained_model='model/lstm2/model-650')
    # Begin Evaluation
    testing.test()
Exemplo n.º 25
0
    def get(self):
        '''
        Import the model
            write prediction module
                outputs datetime, price, trade amount as JSON
        pull data from poloniex
        use model.prediction
        return model.prediction output
        '''
        m = Model(symbols=['BTC'])

        return m.predict_for_api(5900, self.funds)
def main(model_dir, data_dir):

    best_train_result_path = os.path.join(model_dir, "best03.pth")

    checkpoint = torch.load(best_train_result_path)

    model = Model().cuda()
    model.load_state_dict(checkpoint["model"])

    input = torch.randn(1, 3, 96, 96, device='cuda')

    torch.onnx.export(model, input, './model.onnx')
Exemplo n.º 27
0
def run_ensemble_model(generic_model, model_name, model_configs,
                       ensemble_param_grid, prefix):

    X_train, X_test, y_train, y_test = model_configs['splitted_data']
    y_train = y_train.to_numpy().ravel()
    y_test = y_test.to_numpy().ravel()

    scorer = make_scorer(mean_squared_error, greater_is_better=False)
    model = RandomizedSearchCV(generic_model(loss='huber'),
                               ensemble_param_grid,
                               random_state=1,
                               n_iter=100,
                               cv=model_configs['cross_val'],
                               verbose=0,
                               scoring=scorer)

    model = Model(model,
                  model_name,
                  prefix,
                  cross_val=model_configs['cross_val'])

    model.fit(X_train, y_train)
    best_model = model.model.best_estimator_
    best_params = model.model.best_params_
    logging.info(f'Best {model_name} params:\n{best_params}')

    model = Model(best_model,
                  model_name,
                  prefix,
                  cross_val=model_configs['cross_val'])

    rmse_train = model.fit_cross_val(X_train, y_train)
    rmse_test = model.predict_cross_val(X_test, y_test)
    logging.info(f'RMSE on training data: {rmse_train}')
    logging.info(f'RMSE on validation data: {rmse_test}')

    model.plot_feature_importances(X_test)

    filepath = f'models/{prefix}_{model_name}'
    save_serialized(filepath, model)
Exemplo n.º 28
0
    def solve_data_set(self):
        self.model = Model()
        self.model.set_data(self.solver_data.data_set_number, self.data)
        self.annealing.set_data_set_number(self.solver_data.data_set_number,
                                           self.data)
        self.annealing.start_sequence1()
        self.annealing.sequence.set_sequences()

        self.model.current_sequence = self.annealing.next_sequence()
        self.model.set_sequence()
        # calculate sequence
        #self.model.current_sequence = self.current_sequence
        self.model.solve()
Exemplo n.º 29
0
    def test_tick_input_with_wrong_dim(self) -> None:
        instance: Model = Model(square,
                                1,
                                5,
                                10,
                                torch.zeros([16, 16]),
                                torch.zeros([16, 16]),
                                device=torch.device('cpu'))

        wrong_input: torch.Tensor = torch.empty([2], dtype=torch.float64)

        with self.assertRaises(WrongDimensionException):
            instance.tick(wrong_input)
Exemplo n.º 30
0
def main():
    # Load train dataset
    data = dataLoader(directory='./dataset', dataset_dir='train_cropped',
                      dataset_name='extra.txt', max_steps=6, mode='train')
    # Load Model
    model = Model(dim_feature=[196, 128], dim_hidden=128, n_time_step=6,
                  alpha_c=0.0, image_height=64, image_width=64, mode='train')
    # Load Trainer
    trainer = Train(model, data, val_data=None, n_epochs=1000, batch_size=96,
                    update_rule='adam', learning_rate=0.0001, print_every=100, save_every=5,
                    pretrained_model=None, model_path='model/lstm7/', log_path='log7/')
    # Begin Training
    trainer.train()