Exemplo n.º 1
0
  def test_create_uses_session_if_provided(self):
    session = mock.Mock()
    conn = Connection(client_token='client_token', session=session, _show_deprecation_warning=False)
    assert conn.impl.requestor.session is session

    response = mock.Mock()
    session.request.return_value = response
    response.status_code = 200
    response.text = '{}'
    session.request.assert_not_called()
    conn.experiments().fetch()
    session.request.assert_called_once()
Exemplo n.º 2
0
    def output_score(self, experiment, assignments, score, fout, sigopt_post=False):
        """Log the score, optionally save it to file, and/or report it back to SigOpt."""
        suggestion = [assignments[param.name] for param in experiment.parameters]

        output = "score: {suggestion} = {score}\n".format(suggestion=tuple(suggestion), score=score)
        print(output, end="")
        fout.write(output)

        if sigopt_post is True:
            conn = Connection(client_token=self.client_token)
            conn.experiments(experiment.id).observations().create(assignments=assignments, value=score)
            conn.experiments(experiment.id).suggestions().delete()
 def create_experiment(self):
     """Create a SigOpt experiment for optimizing the classifier hyperparameters."""
     conn = Connection(client_token=self.client_token)
     params = CLASSIFIER_TYPE_TO_PARAMS[self.classifier_type]
     try:
         return conn.experiments().create(
             name="Example Classifier",
             parameters=params,
         )
     except ApiException as e:
         if e.status_code == 403 and '*****@*****.**' in str(e):
             existing_experiments = conn.experiments().fetch().data
             if existing_experiments:
                 raise Exception(
                     "You have existing experiments on sigopt.com: {0}."
                     " You have exceeded the number of experiments that can be created under your plan."
                     " Please visit https://sigopt.com/pricing to learn about plans."
                     .format(['https://sigopt.com/experiment/{0}'.format(e.id) for e in existing_experiments])
                 )
         raise
Exemplo n.º 4
0
 def create_experiment(self):
     """Create a SigOpt experiment for optimizing the classifier hyperparameters."""
     conn = Connection(client_token=self.client_token)
     params = CLASSIFIER_TYPE_TO_PARAMS[self.classifier_type]
     try:
         return conn.experiments().create(
             name="Example Classifier",
             parameters=params,
         )
     except ApiException as e:
         if e.status_code == 403 and '*****@*****.**' in str(e):
             existing_experiments = conn.experiments().fetch().data
             if existing_experiments:
                 raise Exception(
                     "You have existing experiments on sigopt.com: {0}."
                     " You have exceeded the number of experiments that can be created under your plan."
                     " Please visit https://sigopt.com/pricing to learn about plans."
                     .format([
                         'https://sigopt.com/experiment/{0}'.format(e.id)
                         for e in existing_experiments
                     ]))
         raise
Exemplo n.º 5
0
    def output_score(self,
                     experiment,
                     assignments,
                     score,
                     fout,
                     sigopt_post=False):
        """Log the score, optionally save it to file, and/or report it back to SigOpt."""
        suggestion = [
            assignments[param.name] for param in experiment.parameters
        ]

        output = "score: {suggestion} = {score}\n".format(
            suggestion=tuple(suggestion), score=score)
        print(output, end='')
        fout.write(output)

        if sigopt_post is True:
            conn = Connection(client_token=self.client_token)
            conn.experiments(experiment.id).observations().create(
                assignments=assignments,
                value=score,
            )
            conn.experiments(experiment.id).suggestions().delete()
 def sigopt_generator(self, experiment):
     """Generate optimal parameter configurations using SigOpt."""
     for _ in xrange(NUM_SIGOPT_SUGGESTIONS):
         conn = Connection(client_token=self.client_token)
         suggestion = conn.experiments(experiment.id).suggestions().create()
         yield suggestion.assignments.to_json()
Exemplo n.º 7
0
 def sigopt_generator(self, experiment):
     """Generate optimal parameter configurations using SigOpt."""
     for _ in xrange(NUM_SIGOPT_SUGGESTIONS):
         conn = Connection(client_token=self.client_token)
         suggestion = conn.experiments(experiment.id).suggestions().create()
         yield suggestion.assignments.to_json()
    (stdoutdata,stderrdata) = process.communicate()
    sys.stderr.write(stderrdata)
    return float(stdoutdata.strip())


if __name__ == '__main__':
  parser = argparse.ArgumentParser()
  parser.add_argument('--command', required=True, help="The command to run the function whose parameters you would "
    "like to optimize. Should accept parameters as command line argument and output only the evaluated metric at the "
    "suggested point.")
  parser.add_argument('--experiment_id', required=True, help="The parameters of this experiment should be the "
    "same type and name of the command line arguments to your executable file.")
  parser.add_argument('--client_token', required=True, help="Find your CLIENT_TOKEN at https://sigopt.com/user/profile")
  the_args = parser.parse_args()

  connection = Connection(client_token=the_args.client_token)
  experiment = connection.experiments(the_args.experiment_id).fetch()
  connection.experiments(the_args.experiment_id).suggestions().delete(state="open")
  evaluator = SubProcessEvaluator(the_args.command)

  # In a loop: receive a suggestion, evaluate the metric, report an observation
  while True:
    suggestion = connection.experiments(experiment.id).suggestions().create()
    print('Evaluating at suggested assignments: {0}'.format(suggestion.assignments))
    value = evaluator.evaluate_metric(suggestion.assignments)
    print('Reporting observation of value: {0}'.format(value))
    connection.experiments(experiment.id).observations().create(
      suggestion=suggestion.id,
      value=value,
    )
class SigOptMazeFrontierSolver(object):
    """
    This is an object which allows for the study of random parameters on the maze construction/solution problem.  It
    allows for the left, up and right construction probabilities to be manipulated in some domain, while the down
    probability is held constant.
    """
    def __init__(self,
                 api_token=None,
                 parameter_domain=None,
                 maze_size=(30, 30),
                 num_tests=100,
                 down_prob=1.0):
        """Create a new SigOpt testing tool.

        The api_token is a long string which can be found on your account at www.sigopt.com/tokens.
            You can either pass that string here or you can modify this file to save it on your local machine.

        parameter_domain describes the allowable values for the relative probabilities of constructing the maze with
            "left", "up" or "right" moves.  These probabilities are relative to the probability of moving "down" which
            is always fixed at 1.0 (by default).  The domain should be a numpy.array of shape (3, 2), where:
                * the first row denotes the lower and upper bound for the "left" domain,
                * the second row denotes the lower and upper bound for the "up" domain,
                * the third row denotes the lower and upper bound for the "right" domain,

        :param api_token: The API token used to access SigOpt (or SIGOPT_API_TOKEN can be modified above)
        :type api_token: str
        :param parameter_domain: Domain on which the solver should consider the three parameters
        :type parameter_domain: numpy.array of shape (3, 2)
        :param maze_size: size of the maze to randomly generate and test (must be at least 2 rows and 2 columns)
        :type maze_size: tuple of length 2
        :param num_tests: How many random mazes should be constructed to estimate the mean and standard deviation
        :type num_tests: int > 1, (will default to the value stored at construction)
        :param down_prob: The relative probability of moving down (against which other probabilities are compared)
        :type down_prob: float > 0
        """
        self.conn = Connection(client_token=api_token or SIGOPT_API_TOKEN)
        self.experiment = None
        self.domain = parameter_domain or numpy.array([[.01, 100]] * 3)
        if not numpy.all(self.domain[:, 1] > self.domain[:, 0]):
            raise AssertionError(
                'The minimum values (column 1) in the domain must be less than the maximum (column 2)'
            )
        assert type(maze_size) in (list,
                                   tuple) and len(maze_size) == 2 and all(
                                       [m > 1 for m in maze_size])
        self.maze_size = maze_size
        assert type(num_tests) == int and num_tests > 1
        self.num_tests = num_tests
        assert down_prob > 0
        self.down_prob = float(down_prob)

    def _execute_assignments(self, num_tests, l, u, r):
        """Estimate the mean and standard deviation for given left, up and right turn relative probabilities"""
        num_points_searched = []
        rts = RightTurnSolver()
        for _ in range(num_tests):
            rts.generate_random_maze(*self.maze_size,
                                     prob=(l, u, r, self.down_prob))
            rts.solve()
            num_points_searched.append(
                len(rts.path) / float(numpy.prod(rts.shape)))
        return numpy.mean(num_points_searched), numpy.sqrt(
            numpy.var(num_points_searched))

    def execute_low_discrepancy_testing(self,
                                        num_points,
                                        num_tests=None,
                                        log_sample=True,
                                        verbose=False,
                                        halton_kwargs=None):
        """Run a low discrepancy sampling over the domain to generate an understanding of the feasible region.

        :param num_points: The number of points at which to sample
        :type num_points: int > 0
        :param num_tests: How many random mazes should be constructed to estimate the mean and standard deviation
        :type num_tests: int > 1, (will default to the value stored at construction)
        :param log_sample: Should this low discrepancy sampling occur on a logarithmically scaled version of the domain
        :type log_sample: bool
        :param verbose: A function to call to receive the current iteration and determine if progress should be output
        :type verbose: callable or bool (if bool and True will be output after each tenth test)
        :param halton_kwargs: kwargs to pass to the generate_halton_points function
        :type halton_kwargs: dictionary (defaults to {'shift': numpy.random.random()})
        :return: Arrays recording the mean, standard deviation results, and the left, up and right values tested
        :rtype: 5 numpy.array objects
        """
        num_tests = num_tests or self.num_tests
        verbose = (lambda it: it % 10 == 0) if verbose is True else verbose
        assert verbose in (False, None) or callable(verbose)
        halton_kwargs = {} if halton_kwargs is None else {
            'shift': numpy.random.random()
        }
        if log_sample:
            test_points = numpy.exp(
                generate_halton_points(num_points, numpy.log(self.domain),
                                       **halton_kwargs))
        else:
            test_points = generate_halton_points(num_points, self.domain,
                                                 **halton_kwargs)

        mean_vec = numpy.empty(num_points)
        sd_vec = numpy.empty(num_points)
        start = default_timer()
        for k, test_point in enumerate(test_points):
            mean_vec[k], sd_vec[k] = self._execute_assignments(
                num_tests, *test_point)
            if verbose and verbose(k):
                end = default_timer()
                print '{0:5d} iterations completed, {1:4.1f} seconds since last report'.format(
                    k, end - start)
                start = end

        left_vec, up_vec, right_vec = test_points.T
        return mean_vec, sd_vec, left_vec, up_vec, right_vec

    def create_sigopt_experiment(self, name=None):
        self.experiment = self.conn.experiments().create(
            name=name or 'Scalarized Mouse Maze Solver',
            project='sigopt-examples',
            metrics=[
                dict(name='weighted_sum_scalarization', objective='maximize')
            ],
            parameters=[
                {
                    'name': 'left_prob_log',
                    'bounds': {
                        'max': numpy.log(100),
                        'min': numpy.log(0.01)
                    },
                    'type': 'double'
                },
                {
                    'name': 'up_prob_log',
                    'bounds': {
                        'max': numpy.log(100),
                        'min': numpy.log(0.01)
                    },
                    'type': 'double'
                },
                {
                    'name': 'right_prob_log',
                    'bounds': {
                        'max': numpy.log(100),
                        'min': numpy.log(0.01)
                    },
                    'type': 'double'
                },
            ],
        )

    def _extract_data_from_experiment(self, experiment_id=None):
        experiment_id = experiment_id or self.experiment.id
        num_observations = self.conn.experiments(
            experiment_id).fetch().progress.observation_count
        mean_vec = numpy.empty(num_observations)
        sd_vec = numpy.empty(num_observations)
        right_vec = numpy.empty(num_observations)
        up_vec = numpy.empty(num_observations)
        left_vec = numpy.empty(num_observations)

        for k, observation in enumerate(
                self.conn.experiments(
                    experiment_id).observations().fetch().iterate_pages()):
            mean_vec[k] = observation.metadata['mean']
            sd_vec[k] = observation.metadata['std_dev']
            left_vec[k] = numpy.exp(observation.assignments['left_prob_log'])
            up_vec[k] = numpy.exp(observation.assignments['up_prob_log'])
            right_vec[k] = numpy.exp(observation.assignments['right_prob_log'])
        return mean_vec, sd_vec, left_vec, up_vec, right_vec