Пример #1
0
def save_log(cfg, exp, trial_log, ax=False):
    trial_num = 0
    name = cfg.checkpoint_file.format(trial_num)
    path = os.path.join(os.getcwd(), name)
    log.info(f"T{trial_num} : Saving log {path}")
    torch.save(trial_log, path)
    save(exp, os.path.join(os.getcwd(), "exp.json"))
Пример #2
0
def main(args):
    begbeg = int(args.begbeg)
    begend = int(args.begend)
    endbeg = int(args.endbeg)
    endend = int(args.endend)
    howmany = int(args.howmany)
    poolsize = int(args.poolsize)
    fname = args.outfile
    shortname=args.shortout

    spy = web.DataReader("SPY", "av-daily-adjusted", start=datetime(2000, 2, 9), api_key=os.getenv('ALPHAVANTAGE_API_KEY')).close
    dia = web.DataReader("DIA", "av-daily-adjusted", start=datetime(2000, 2, 9), api_key=os.getenv('ALPHAVANTAGE_API_KEY')).close
    qqq = web.DataReader("QQQ", "av-daily-adjusted", start=datetime(2000, 2, 9), api_key=os.getenv('ALPHAVANTAGE_API_KEY')).close

    mydf = pd.concat([spy, dia, qqq], axis=1)
    mydf.index=pd.to_datetime(mydf.index)
    mydf.columns = ['spy', 'dia', 'qqq']
    mydf['ordnum'] = mydf.reset_index().index
    mydf.dropna(inplace=True)

    ssaeval = trialscored(mydf, ['spy', 'dia', 'qqq'], (begbeg, begend), (endbeg, endend), howmany, poolsize, doscorelog)
    best_parameters, values, experiment, model = optimize(
        parameters = [
            {
                "name": "howmany",
                "type": "range",
                "bounds" : [1, 40],
                "value_type" : "int",
            },
            {
                "name": "components",
                "type": "range",
                "bounds": [1, 40],
                "value_type" : "int",
            },
            {
                "name": "winsize",
                "type": "fixed",
                "value": True,
            },
        ],
        experiment_name="ssa_test",
        objective_name="ssa",
<<<<<<< HEAD
        total_trials = 100,
        evaluation_function = ssaeval,
=======
        total_trials=1000,
        evaluation_function = ssaeval
>>>>>>> a34dcd1c768aa04a92efa91cc62a0365e9235134
    )
    shortdesc = open(shortname, "a")
    print(best_parameters, file=shortdesc)
    print(values, file=shortdesc)
    shortdesc.close()
    save(experiment, fname)
Пример #3
0
            },
            {
                "name": "attention_location_n_filters",
                "type": "range",
                "bounds": [16, 128],
                "value_type": "int",
                "log_scale": True
            },
            {
                "name": "speaker_embedding_dim",
                "type": "range",
                "bounds": [64, 2048],
                "value_type": "int",
                "log_scale": True
            },
        ],
        # Booth function
        total_trials=15,
        evaluation_function=train_experiment,
        minimize=False,
        objective_name='validation_average_max_attention_weight',
    )
    print("__best_values__\n", best_values, "\n\n")
    print("__best_parameters__\n", best_parameters, "\n\n")
    print("__experiment__\n", experiment, "\n\n")
    print("__model__\n", model, "\n\n")

    filepath = "ax_experiment"
    print("saving experiment to;\n", filepath)
    ax.save(experiment, filepath)
Пример #4
0
    def _run_actual_experiment(self):
        self.log.info('Loading data')
        try:
            supervised_images, supervised_masks = rs.data.cil.load_images(
                rs.data.cil.training_sample_paths(self.data_directory)
            )
            self.log.debug('Loaded %d supervised samples', supervised_images.shape[0])

            unsupervised_sample_paths = np.asarray(rs.data.unsupervised.processed_sample_paths(self.data_directory))
            self.log.debug('Loaded %s unsupervised sample paths', len(unsupervised_sample_paths))
        except (OSError, ValueError):
            self.log.exception('Unable to load data')
            return

        self.log.info('Building experiment')
        search_space = self.build_search_space()
        self.log.debug('Built search space %s', search_space)

        objective = ax.Objective(metric=ax.Metric(self._TARGET_METRIC, lower_is_better=False), minimize=False)
        optimization_config = ax.OptimizationConfig(objective, outcome_constraints=None)
        self.log.debug('Built optimization config %s', optimization_config)

        def _evaluation_function_wrapper(
                parameterization: typing.Dict[str, typing.Union[float, str, bool, int]],
                weight: typing.Optional[float] = None
        ) -> typing.Dict[str, typing.Tuple[float, float]]:
            return self._run_trial(parameterization, supervised_images, supervised_masks, unsupervised_sample_paths)

        experiment = ax.SimpleExperiment(
            search_space=search_space,
            name=self.tag,
            evaluation_function=_evaluation_function_wrapper
        )
        experiment.optimization_config = optimization_config
        self.log.debug('Built experiment %s', experiment)

        generation_strategy = self._build_generation_strategy()
        self.log.info('Using generation strategy %s', generation_strategy)

        loop = ax.OptimizationLoop(
            experiment,
            total_trials=self.parameters['base_search_initial_trials'] + self.parameters['base_search_optimised_trials'],
            arms_per_trial=1,
            random_seed=self.SEED,
            wait_time=0,
            run_async=False,
            generation_strategy=generation_strategy
        )
        self.log.info('Running trials')
        loop.full_run()
        self.log.info('Finished all trials')

        best_parameterization, (means, covariances) = loop.get_best_point()
        self.log.info('Best encountered parameters: %s', best_parameterization)
        self.log.info(
            'Best encountered score: mean=%.4f, var=%.4f',
            means[self._TARGET_METRIC],
            covariances[self._TARGET_METRIC][self._TARGET_METRIC]
        )

        experiment_save_path = os.path.join(self.experiment_directory, 'trials.json')
        ax.save(experiment, experiment_save_path)
        self.log.info('Saved experiment to %s', experiment_save_path)
Пример #5
0
    SRC_DIR = Path(args.srcdir)
    TMP_DIR = Path(args.datadir)
    DATA_DIR = TMP_DIR / "data"
    RESULTS_DIR = Path(SRC_DIR / "output")
    RESULTS_DIR.mkdir(exist_ok=True)
    RUNSCRIPTS_DIR = RESULTS_DIR / "run_scripts"
    RUNSCRIPTS_DIR.mkdir(exist_ok=True)
    EXP_RESULTS = RESULTS_DIR / "results"
    EXP_RESULTS.mkdir(exist_ok=True)

    run_script_name = f"runscript-{START_TIME}.py"

    print("Ax HPT Experiment runscript written to:",
          RUNSCRIPTS_DIR / run_script_name)
    copyfile(os.path.realpath(__file__), RUNSCRIPTS_DIR / run_script_name)

    best_parameters, values, experiment, model = optimize(
        parameters=parametrization,
        evaluation_function=train_evaluation,
        objective_name="Accuracy",
        total_trials=args.trials,
    )

    print(f"Means: {values[0]}\
        Covariances: {values[1]}")

    print(best_parameters)

    save_name = str(EXP_RESULTS / f"exp-hpt-sgd-{START_TIME}.json")
    save(experiment, save_name)
Пример #6
0
def hyperparameter_optimization(a: Namespace, c: connection, t: str):
    dtype = torch.float
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    global cur
    cur = c.cursor()
    global conn
    conn = c
    global args
    args = a
    global task
    task = t

    global ss
    global data_composition_key
    global model_key
    _, ss, data_composition_key, model_key, ntrails, epochs = task.split(":")
    args.epochs = int(epochs)

    make_sure_table_exist(args, conn, cur, args.train_results_ax_table_name)
    make_sure_table_exist(args, conn, cur,
                          args.validation_results_ax_table_name)
    make_sure_table_exist(args, conn, cur, args.test_results_ax_table_name)

    objective_wrapper({})  #initial run config

    best_parameters, values, experiment, model = optimize(
        parameters=[{
            "name": "lr",
            "type": "range",
            "bounds": [1e-7, 0.5],
            "log_scale": True
        }, {
            "name": "weight_decay",
            "type": "range",
            "bounds": [1e-8, .5],
            "log_scale": True
        }, {
            "name":
            "optimizer",
            "type":
            "choice",
            "values": [
                "Adadelta", "Adagrad", "Adam", "AdamW", "Adamax", "ASGD",
                "RMSprop", "SGD"
            ]
        }, {
            "name": "criterion",
            "type": "choice",
            "values": ["BCELoss", "MSELoss"]
        }, {
            "name": "feature_extraction",
            "type": "choice",
            "values": [True, False]
        }],
        evaluation_function=objective_wrapper,
        objective_name='accuracy',
        minimize=False,
        arms_per_trial=1,
        total_trials=int(
            ntrails)  #<---------------------------anpassen je nach task =)
    )

    save(experiment, os.path.join(res_path, "experiment.json"))

    return True