Exemple #1
0
    def __init__(self,
                 space,
                 name="Default Tune Experiment",
                 max_concurrent=1,
                 reward_attr=None,
                 metric="episode_reward_mean",
                 mode="max",
                 **kwargs):
        assert sgo is not None, "SigOpt must be installed!"
        assert type(max_concurrent) is int and max_concurrent > 0
        assert "SIGOPT_KEY" in os.environ, \
            "SigOpt API key must be stored as environ variable at SIGOPT_KEY"
        assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"

        self._max_concurrent = max_concurrent
        self._metric = metric
        if mode == "max":
            self._metric_op = 1.
        elif mode == "min":
            self._metric_op = -1.
        self._live_trial_mapping = {}

        # Create a connection with SigOpt API, requires API key
        self.conn = sgo.Connection(client_token=os.environ["SIGOPT_KEY"])

        self.experiment = self.conn.experiments().create(
            name=name,
            parameters=space,
            parallel_bandwidth=self._max_concurrent,
        )

        super(SigOptSearch, self).__init__(metric=metric, mode=mode, **kwargs)
Exemple #2
0
    def __init__(self,
                 space: List[Dict] = None,
                 name: str = "Default Tune Experiment",
                 max_concurrent: int = 1,
                 reward_attr: Optional[str] = None,
                 connection: Optional[Connection] = None,
                 experiment_id: Optional[str] = None,
                 observation_budget: Optional[int] = None,
                 project: Optional[str] = None,
                 metric: Union[None, str, List[str]] = "episode_reward_mean",
                 mode: Union[None, str, List[str]] = "max",
                 points_to_evaluate: Optional[List[Dict]] = None,
                 **kwargs):
        assert (experiment_id is
                None) ^ (space is None), "space xor experiment_id must be set"
        assert type(max_concurrent) is int and max_concurrent > 0

        if connection is not None:
            self.conn = connection
        else:
            assert sgo is not None, """SigOpt must be installed!
                You can install SigOpt with the command:
                `pip install -U sigopt`."""
            assert "SIGOPT_KEY" in os.environ, \
                "SigOpt API key must be stored as " \
                "environ variable at SIGOPT_KEY"
            # Create a connection with SigOpt API, requires API key
            self.conn = sgo.Connection(client_token=os.environ["SIGOPT_KEY"])

        self._max_concurrent = max_concurrent
        if isinstance(metric, str):
            metric = [metric]
            mode = [mode]
        self._metric = metric
        self._live_trial_mapping = {}

        if experiment_id is None:
            sigopt_params = dict(name=name,
                                 parameters=space,
                                 parallel_bandwidth=self._max_concurrent)

            if observation_budget is not None:
                sigopt_params["observation_budget"] = observation_budget

            if project is not None:
                sigopt_params["project"] = project

            if len(metric) > 1 and observation_budget is None:
                raise ValueError(
                    "observation_budget is required for an"
                    "experiment with more than one optimized metric")
            sigopt_params["metrics"] = self.serialize_metric(metric, mode)

            self.experiment = self.conn.experiments().create(**sigopt_params)
        else:
            self.experiment = self.conn.experiments(experiment_id).fetch()

        self._points_to_evaluate = points_to_evaluate

        super(SigOptSearch, self).__init__(metric=metric, mode=mode, **kwargs)
Exemple #3
0
    def test_search(self):
        conn = sigopt.Connection()

        n_iter = 5
        folds = 3
        cv = SigOptSearchCV(
            estimator=GradientBoostingClassifier(),
            param_domains=GradientBoostingClassifier_PARAM_DOMAIN,
            client_token='client_token',
            n_iter=n_iter,
            cv=folds)
        assert len(conn.experiments().create.mock_calls) == 0
        assert len(conn.experiments().fetch.mock_calls) == 0
        assert len(conn.experiments().suggestions.create.mock_calls) == 0
        assert len(conn.experiments().observations.create.mock_calls) == 0

        data = sklearn.datasets.load_iris()
        cv.fit(data['data'], data['target'])
        assert len(conn.experiments().create.mock_calls) == 1
        create_definition = conn.experiments().create.call_args[1]
        assert create_definition[
            'name'] == GradientBoostingClassifier_EXPERIMENT_DEF['name']

        assert len(create_definition['parameters']) == len(
            GradientBoostingClassifier_EXPERIMENT_DEF['parameters'])
        for p in GradientBoostingClassifier_EXPERIMENT_DEF['parameters']:
            assert p in create_definition['parameters']
        assert len(conn.experiments().best_assignments().fetch.mock_calls) == 1
        assert len(conn.experiments().suggestions().create.mock_calls
                   ) == n_iter * folds
        assert len(conn.experiments().observations().create.mock_calls
                   ) == n_iter * folds

        assert cv.best_params_ == zero_corner(
            GradientBoostingClassifier_EXPERIMENT_DEF)
Exemple #4
0
 def __init__(self):
   self.project = os.environ["SIGOPT_PROJECT"]
   self.client = os.environ["SIGOPT_CLIENT"]
   self.api_token = os.environ["SIGOPT_API_TOKEN"]
   self.api_url = os.environ["SIGOPT_API_URL"]
   self.log_collection_enabled = bool(os.environ.get("SIGOPT_LOG_COLLECTION_ENABLED"))
   self.conn = sigopt.Connection(self.api_token, _show_deprecation_warning=False)
   self.conn.set_api_url(self.api_url)
    def _setup(self, config):
        super()._setup(config)

        self.time_start = time.time()

        self.sigopt_exp_id = config["sigopt_experiment_id"]
        self.conn = sigopt.Connection(client_token=CLIENT_TOKEN)
        self.suggestion = self.conn.experiments(
            self.sigopt_exp_id).suggestions().create()

        assignments = self.suggestion.assignments
        print(f"Testing assignments {assignments}")

        batch_size = 0x1 << int(assignments["log2_batch_size"])
        warmup_start_iteration = round(assignments["reg_warmup_start_time"] *
                                       (assignments["num_epochs"] - 1))
        warmup_end_iteration = round(assignments["reg_warmup_end_time"] *
                                     (assignments["num_epochs"] - 1))
        reg_factor_start = math.exp(assignments["log_reg_factor_start"])
        reg_factor_end = math.exp(assignments["log_reg_factor_end"])

        self.warmup_start_iteration = warmup_start_iteration
        self.warmup_end_iteration = warmup_end_iteration

        params = dict(
            logdir=self.logdir,
            network_class=networks.gsc_lenet_vdrop,
            network_args=dict(),
            dataset_class=datasets.PreprocessedGSC,
            dataset_args=dict(),
            optim_class=torch.optim.Adam,
            optim_args=dict(lr=math.exp(assignments["log_lr"]), ),
            lr_scheduler_class=torch.optim.lr_scheduler.StepLR,
            lr_scheduler_args=dict(
                step_size=1,
                gamma=assignments["gamma_prewarmup"],
            ),
            training_iterations=assignments["num_epochs"],
            use_tqdm=False,
            batch_size_test=128,
            batch_size_train=(batch_size, batch_size),
            log_verbose_structure=False,
            reg_schedule=dict(([(
                0, reg_factor_start
            )] if warmup_start_iteration > 0 else []) + list(
                zip(
                    range(warmup_start_iteration, warmup_end_iteration),
                    np.linspace(reg_factor_start,
                                reg_factor_end,
                                warmup_end_iteration - warmup_start_iteration,
                                endpoint=False))) +
                              [(warmup_end_iteration, reg_factor_end)]),
            downscale_reg_with_training_set=True,
        )

        self.exp = VDropExperiment(**params)

        self.mean_accuracy = None
Exemple #6
0
    def __init__(self,
                 estimator,
                 param_domains,
                 n_iter=10,
                 scoring=None,
                 fit_params=None,
                 n_jobs=1,
                 iid=True,
                 refit=True,
                 cv=None,
                 verbose=0,
                 n_sug=1,
                 pre_dispatch='2*n_jobs',
                 error_score='raise',
                 cv_timeout=None,
                 opt_timeout=None,
                 client_token=None,
                 sigopt_connection=None):
        self.param_domains = param_domains
        self.n_iter = n_iter
        self.n_sug = n_sug
        self.cv_timeout = cv_timeout
        self.opt_timeout = opt_timeout
        self.verbose = verbose

        # Stores the mappings between categorical strings to Python values. The keys correspond to parameter names and
        # values correspond to the string-to-value mappings themselves.
        self.categorical_mappings_ = {}

        self.scorer_ = None
        self.our_best_params_ = None
        self.our_best_score_ = None
        self.our_best_estimator_ = None
        self.experiment = None

        # Set up sigopt_connection
        found_token = client_token or os.environ.get('SIGOPT_API_TOKEN')
        if (not found_token) and (not sigopt_connection):
            raise ValueError(
                'Please set the `SIGOPT_API_TOKEN` environment variable, pass the ``client_token`` parameter, or pass '
                'the ``sigopt_connection`` parameter. You can find your client token here: '
                'https://sigopt.com/tokens.')
        else:
            self.sigopt_connection = (sigopt_connection if sigopt_connection
                                      else sigopt.Connection(
                                          client_token=found_token))

        super(SigOptSearchCV, self).__init__(estimator=estimator,
                                             scoring=scoring,
                                             fit_params=fit_params,
                                             n_jobs=n_jobs,
                                             iid=iid,
                                             refit=refit,
                                             cv=cv,
                                             verbose=verbose,
                                             pre_dispatch=pre_dispatch,
                                             error_score=error_score)
def search(name, root_dir):
    conn = sigopt.Connection(
        client_token='OQDDPYDWYVUXNFQKIPVCAJRDBWCCHYRARCIBVEHJRJUNOQJQ')

    DQN_experiment = conn.experiments().create(
        name=name,
        observation_budget=300,
        parameters=[
            dict(name='h_size', type='int', bounds=dict(min=20, max=100)),
            dict(name='middle_size', type='int', bounds=dict(min=30, max=250)),
            dict(name='lstm_layers', type='int', bounds=dict(min=1, max=2)),
            dict(name='epsilon_decay_steps',
                 type='double',
                 bounds=dict(min=0.2, max=2.0)),
            dict(name='learning_starts',
                 type='int',
                 bounds=dict(min=0, max=training['episodes'] / 2)),
            dict(name='learning_freq', type='int', bounds=dict(min=1, max=15)),
            dict(name='target_update_freq',
                 type='int',
                 bounds=dict(min=500, max=5000)),
            dict(name='log_lr',
                 type='double',
                 bounds=dict(min=math.log(0.00001), max=math.log(1.0))),
            dict(name='gamma', type='double', bounds=dict(min=0.5,
                                                          max=0.9999)),
            dict(name='batch_size', type='int', bounds=dict(min=10, max=500))
        ])

    experiment = DQN_experiment
    for num in range(experiment.observation_budget):
        suggestion = conn.experiments(experiment.id).suggestions().create()

        print("Running trial number {}".format(num))
        objective_metric = run_environment(
            h_size=suggestion.assignments['h_size'],
            middle_size=suggestion.assignments['middle_size'],
            lstm_layers=suggestion.assignments['lstm_layers'],
            learning_starts=suggestion.assignments['learning_starts'],
            learning_freq=suggestion.assignments['learning_freq'],
            target_update_freq=suggestion.assignments['target_update_freq'],
            lr=math.exp(suggestion.assignments['log_lr']),
            gamma=suggestion.assignments['gamma'],
            batch_size=suggestion.assignments['batch_size'],
            replay_buffer_size=100000,
            epsilon_decay_steps=suggestion.assignments['epsilon_decay_steps'],
            final_epsilon=0.1,
            root_dir=root_dir,
            num=num)

        conn.experiments(experiment.id).observations().create(
            suggestion=suggestion.id, value=objective_metric)
Exemple #8
0
    def _fit(self, dataset):
        estimator = self.getOrDefault(self.estimator)
        evaluator = self.getOrDefault(self.evaluator)

        seed = self.getOrDefault(self.seed)
        nFolds = self.getOrDefault(self.numFolds)
        h = 1.0 / nFolds
        randCol = self.uid + "_rand"
        df = dataset.select("*", rand(seed).alias(randCol))

        eid = self.getOrDefault(self.experimentId)
        apiToken = self.getOrDefault(self.apiToken)
        conn = sigopt.Connection(apiToken)
        experiment = conn.experiments(eid).fetch()
        if not experiment.observation_budget:
            raise ValueError(
                'Error: Experiment must have `observation_budget`')

        while experiment.progress.observation_count < experiment.observation_budget:
            suggestion = conn.experiments(experiment.id).suggestions().create()
            metric_values = []
            for i in range(nFolds):
                validateLB = i * h
                validateUB = (i + 1) * h
                condition = (df[randCol] >= validateLB) & (df[randCol] <
                                                           validateUB)
                train = df.filter(~condition).cache()
                validation = df.filter(condition).cache()
                params = self._get_params_from_assignments(
                    suggestion.assignments)
                model = estimator.fit(train, params)
                metric_values.append(
                    evaluator.evaluate(model.transform(validation)))
                train.unpersist()
                validation.unpersist()

            mean = sum(metric_values) / nFolds
            stddev = sum([((value - mean)**2)
                          for value in metric_values]) / nFolds
            conn.experiments(experiment.id).observations().create(
                suggestion=suggestion.id,
                value=mean,
                value_stddev=stddev,
            )
            experiment = conn.experiments(eid).fetch()

        bestObservation = conn.experiments(
            eid).best_assignments().fetch().data[0]
        params = self._get_params_from_assignments(bestObservation.assignments)
        bestModel = estimator.fit(dataset, params)
        return self._copyValues(SigOptOptimizedModel(bestModel))
Exemple #9
0
    def _setup_optimizer(self):
        if self._metric is None and self._mode:
            # If only a mode was passed, use anonymous metric
            self._metric = DEFAULT_METRIC

        if self._mode is None:
            raise ValueError("`mode` argument passed to SigOptSearch must be set.")

        if isinstance(self._metric, str):
            self._metric = [self._metric]
        if isinstance(self._mode, str):
            self._mode = [self._mode]

        if self._connection is not None:
            self.conn = self._connection
        else:
            assert (
                sgo is not None
            ), """SigOpt must be installed!
                You can install SigOpt with the command:
                `pip install -U sigopt`."""
            assert (
                "SIGOPT_KEY" in os.environ
            ), "SigOpt API key must be stored as environ variable at SIGOPT_KEY"
            # Create a connection with SigOpt API, requires API key
            self.conn = sgo.Connection(client_token=os.environ["SIGOPT_KEY"])

        if self._experiment_id is None:
            sigopt_params = dict(
                name=self._name,
                parameters=self._space,
                parallel_bandwidth=self._max_concurrent,
            )

            if self._observation_budget is not None:
                sigopt_params["observation_budget"] = self._observation_budget

            if self._project is not None:
                sigopt_params["project"] = self._project

            if len(self._metric) > 1 and self._observation_budget is None:
                raise ValueError(
                    "observation_budget is required for an"
                    "experiment with more than one optimized metric"
                )
            sigopt_params["metrics"] = self.serialize_metric(self._metric, self._mode)

            self.experiment = self.conn.experiments().create(**sigopt_params)
        else:
            self.experiment = self.conn.experiments(self._experiment_id).fetch()
Exemple #10
0
    def __init__(self,
                 space,
                 name="Default Tune Experiment",
                 max_concurrent=1,
                 reward_attr=None,
                 metric="episode_reward_mean",
                 mode="max",
                 **kwargs):
        assert sgo is not None, "SigOpt must be installed!"
        assert type(max_concurrent) is int and max_concurrent > 0
        assert "SIGOPT_KEY" in os.environ, \
            "SigOpt API key must be stored as environ variable at SIGOPT_KEY"
        assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"

        if reward_attr is not None:
            mode = "max"
            metric = reward_attr
            logger.warning(
                "`reward_attr` is deprecated and will be removed in a future "
                "version of Tune. "
                "Setting `metric={}` and `mode=max`.".format(reward_attr))
        if "use_early_stopped_trials" in kwargs:
            logger.warning(
                "`use_early_stopped_trials` is not used in SigOptSearch.")

        self._max_concurrent = max_concurrent
        self._metric = metric
        if mode == "max":
            self._metric_op = 1.
        elif mode == "min":
            self._metric_op = -1.
        self._live_trial_mapping = {}

        # Create a connection with SigOpt API, requires API key
        self.conn = sgo.Connection(client_token=os.environ["SIGOPT_KEY"])

        self.experiment = self.conn.experiments().create(
            name=name,
            parameters=space,
            parallel_bandwidth=self._max_concurrent,
        )

        super(SigOptSearch, self).__init__(**kwargs)
Exemple #11
0
    def _setup(self, config):
        super()._setup(config)

        self.sigopt_exp_id = config["sigopt_experiment_id"]
        self.conn = sigopt.Connection(client_token=CLIENT_TOKEN)
        self.suggestion = self.conn.experiments(
            self.sigopt_exp_id).suggestions().create()

        assignments = self.suggestion.assignments
        print(f"Testing assignments {assignments}")

        batch_size = 1 << assignments["log_2_batch_size"]

        params = dict(
            network_class=networks.resnet18_mnist,
            network_args=dict(),

            dataset_class=datasets.MNIST,
            dataset_args={},

            optim_class=getattr(torch.optim, assignments["optimizer"]),
            optim_args=dict(
                lr=math.exp(assignments["log_lr"]),
                weight_decay=math.exp(assignments["log_weight_decay"])
            ),

            lr_scheduler_class=torch.optim.lr_scheduler.StepLR,
            lr_scheduler_args=dict(
                step_size=assignments["step_size"],
                gamma=assignments["gamma"],
            ),

            training_iterations=30,

            use_tqdm=False,
            batch_size_train=(batch_size, batch_size),
            batch_size_test=1000,
        )

        self.exp = experiments.Supervised(**params)

        self.mean_accuracy = None
Exemple #12
0
def main():
    conn = sigopt.Connection(client_token=SIGOPT_API_KEY)

    experiment = conn.experiments().create(
        name=ENVIRONMENT_NAME + ' (DQN)',
        project='sigopt-examples',
        observation_budget=128,
        parameters=[
            dict(name='hm', type='int', bounds=dict(min=5, max=100)),
            dict(name='mb_size', type='int', bounds=dict(min=10, max=500)),
            dict(name='e_decay',
                 type='int',
                 bounds=dict(min=MAX_EPISODES / 10, max=MAX_EPISODES)),
            dict(name='log_lr',
                 type='double',
                 bounds=dict(min=math.log(0.00001), max=math.log(1.0))),
            dict(name='df', type='double', bounds=dict(min=0.5, max=0.9999)),
            dict(name='weight_sd',
                 type='double',
                 bounds=dict(min=0.01, max=0.5)),
            dict(name='bias_sd', type='double', bounds=dict(min=0.0, max=0.5))
        ])

    # Run the SigOpt experiment loop
    # You can monitor the experiment's progress at https://sigopt.com/experiments
    for _ in range(experiment.observation_budget):
        suggestion = conn.experiments(experiment.id).suggestions().create()

        objective_metric = run_environment(
            hidden_multiplier=suggestion.assignments['hm'],
            minibatch_size=suggestion.assignments['mb_size'],
            epsilon_decay_steps=suggestion.assignments['e_decay'],
            learning_rate=math.exp(suggestion.assignments['log_lr']),
            discount_factor=suggestion.assignments['df'],
            initial_weight_stddev=suggestion.assignments['weight_sd'],
            initial_bias_stddev=suggestion.assignments['bias_sd'])

        conn.experiments(experiment.id).observations().create(
            suggestion=suggestion.id,
            value=objective_metric,
        )
Exemple #13
0
 def createExperiment(self, paramDefinitionMap, **experiment_kwargs):
     eva = self.getOrDefault(self.evaluator)
     if 'parameters' not in experiment_kwargs:
         experiment_kwargs['parameters'] = []
         for (param, definition) in paramDefinitionMap.items():
             definition['name'] = definition.get('name', param.name)
             experiment_kwargs['parameters'].append(definition)
     if 'metrics' not in experiment_kwargs:
         experiment_kwargs['metrics'] = [{
             'name':
             'metric',
             'objective':
             ('maximize' if eva.isLargerBetter() else 'minimize'),
         }]
     apiToken = self.getOrDefault(self.apiToken)
     conn = sigopt.Connection(apiToken)
     experiment = conn.experiments().create(**experiment_kwargs)
     parameterMap = {}
     for (param, definition) in paramDefinitionMap.items():
         parameterMap[definition['name']] = param
     self.setExperimentId(experiment.id)
     self.setParameterMap(parameterMap)
     return experiment
Exemple #14
0
    def __init__(self,
                 space,
                 name="Default Tune Experiment",
                 max_concurrent=1,
                 reward_attr="episode_reward_mean",
                 **kwargs):
        assert sgo is not None, "SigOpt must be installed!"
        assert type(max_concurrent) is int and max_concurrent > 0
        assert "SIGOPT_KEY" in os.environ, \
            "SigOpt API key must be stored as environ variable at SIGOPT_KEY"
        self._max_concurrent = max_concurrent
        self._reward_attr = reward_attr
        self._live_trial_mapping = {}

        # Create a connection with SigOpt API, requires API key
        self.conn = sgo.Connection(client_token=os.environ['SIGOPT_KEY'])

        self.experiment = self.conn.experiments().create(
            name=name,
            parameters=space,
            parallel_bandwidth=self._max_concurrent,
        )

        super(SigOptSearch, self).__init__(**kwargs)
def run_sigopt(box_scores,
               historical_games,
               historical_games_training_set,
               bet_info,
               sigopt_width=1,
               sigopt_depth=100):
    historical_games_by_tuple = evaluator.get_historical_games_by_tuple(
        historical_games)

    conn = sigopt.Connection()
    experiment = create_sigopt_experiment(conn, sigopt_depth)

    for _ in range(experiment.observation_budget):
        tunable_param_lists = []
        suggestion_ids = []
        for worker_id in range(sigopt_width):
            conn = sigopt.Connection()
            suggestion = conn.experiments(experiment.id).suggestions().create()
            suggestion_ids.append(suggestion.id)

            moving_averages = (
                suggestion.assignments['slow_ma'],
                suggestion.assignments['fast_ma'],
            )

            transform_params = {
                'type': EXP_TRANSFORM,
                'exp_param': suggestion.assignments['exp_param'],
            }

            tunable_param_lists.append([
                moving_averages,
                transform_params,
                suggestion.assignments['n_estimators'],
                suggestion.assignments['min_samples_split'],
                suggestion.assignments['min_samples_leaf'],
                suggestion.assignments['bet_threshold'],
            ])

        all_stats = read_data.generate_all_stats(box_scores)
        winnings_list = runner(
            historical_games_training_set,
            historical_games_by_tuple,
            bet_info,
            all_stats,
            tunable_param_lists,
        )

        for i, suggestion_id in enumerate(suggestion_ids):

            conn.experiments(experiment.id).observations().create(
                suggestion=suggestion_id,
                value=winnings_list[i][0],
                value_stddev=winnings_list[i][1],
            )

    print "Optimization done. View results at https://sigopt.com/experiment/{0}".format(
        experiment.id)

    experiment_detail = conn.experiments(experiment.id).fetch()
    best_observation = experiment_detail.progress.best_observation
    if best_observation:
        print "Best value found: {0} at {1}".format(
            best_observation.value, best_observation.assignments)

    return experiment.id
Exemple #16
0
 def __init__(self, api_token=None):
   self.conn = sigopt.Connection(api_token)
Exemple #17
0
 def __init__(self, client_token, experiment_id):
   threading.Thread.__init__(self)
   self.experiment_id = experiment_id
   self.conn = sigopt.Connection(client_token=client_token)
Exemple #18
0
test_yZ = one_hot_enc.fit_transform(test_y)
train_yZ = one_hot_enc.fit_transform(train_y)
extra_yZ = one_hot_enc.fit_transform(extra_y)

# stack train and extra on top of each other
extra_XZ = numpy.concatenate((extra_XZ, train_XZ), axis=0)
extra_yZ = numpy.concatenate((extra_yZ, train_yZ), axis=0)

# only consider 75% of this dataset for now
_, extra_XZ, _, extra_yZ = train_test_split(extra_XZ,
                                            extra_yZ,
                                            test_size=0.75,
                                            random_state=42)

# create SigOpt experiment
conn = sigopt.Connection()
experiment = conn.experiments().create(
    name='SVHN ConvNet',
    parameters=[
        {
            'name': 'filter1_w',
            'type': 'int',
            'bounds': {
                'min': 3,
                'max': 10
            }
        },
        {
            'name': 'filter1_depth',
            'type': 'int',
            'bounds': {
Exemple #19
0
def convert_rgb2gray(X):
    X_gray = numpy.zeros((32, 32, X.shape[3]))
    for i in xrange(0, X.shape[3]):
        img_gray = rgb2gray(X[:, :, :, i])
        X_gray[:, :, i] = img_gray
    return X_gray


# convert all image data to grayscale
unlab_X = convert_rgb2gray(unlab_X)
test_X = convert_rgb2gray(test_X)
train_X = convert_rgb2gray(train_X)

# setup SigOpt experiment
conn = sigopt.Connection()
experiment = conn.experiments().create(
    name='SVHN Classifier',
    project='sigopt-examples',
    metrics=[dict(name='accuracy', objective='maximize')],
    parameters=[
        {
            'name': 'filter_w',
            'type': 'int',
            'bounds': {
                'min': 7,
                'max': 10
            }
        },
        {
            'name': 'slide_w',
            suggestion=self.suggestion.id,
            values=[{
                "name": "log_error",
                "value": math.log(1 - self.mean_accuracy)
            }, {
                "name": "log_num_nonzero_weights",
                "value": math.log(self.inference_nz)
            }, {
                "name": "duration_seconds",
                "value": duration
            }],
        )


if __name__ == "__main__":
    conn = sigopt.Connection(client_token=CLIENT_TOKEN)
    experiment = conn.experiments().create(
        name="Variational Dropout GSC",
        parameters=[
            dict(name="num_epochs", type="int", bounds=dict(min=30, max=200)),
            dict(name="log2_batch_size", type="int", bounds=dict(min=5,
                                                                 max=8)),
            dict(name="log_lr",
                 type="double",
                 bounds=dict(min=math.log(1e-5), max=math.log(3e-1))),
            dict(name="gamma_prewarmup",
                 type="double",
                 bounds=dict(min=0.5, max=1.0)),
            dict(name="gamma_warmup",
                 type="double",
                 bounds=dict(min=0.5, max=1.0)),