def runner(self, concurrent_index, job_id, delay, duration, child_count,
               reports, search_type):
        ws_name = "quick-test"
        exper_name = "qtexper"

        fn = "code/miniSweeps.yaml"
        yd = file_utils.load_yaml(fn)
        hd = yd[constants.HPARAM_DIST]

        # simulate a controller for each concurrent runner
        hparam_search = HParamSearch()

        for index in range(child_count):
            # create a new RUN record
            run_name = self.store.start_run(ws_name,
                                            exper_name=exper_name,
                                            is_parent=False,
                                            job_id=job_id,
                                            node_index=0,
                                            search_type=search_type,
                                            search_style="dynamic")

            os.environ["XT_RUN_NAME"] = run_name
            os.environ["XT_WORKSPACE_NAME"] = ws_name
            os.environ["XT_EXPERIMENT_NAME"] = exper_name

            fake_context = cmd_core.build_mock_context(self.config, job_id,
                                                       ws_name, exper_name,
                                                       run_name)
            metric_name = fake_context.primary_metric

            xt_run = Run(self.config, self.store, supress_normal_output=True)
            xt_run.direct_run = True
            xt_run.context = fake_context

            #print("  starting: concurrent_index={}, child_index={}".format(concurrent_index, index))
            # delay start
            sleep_time = delay * random.random()
            time.sleep(sleep_time)

            hp_set = xt_run.get_next_hp_set_in_search(
                hd, search_type, hparam_search=hparam_search)
            self._assert("channels1" in hp_set)

            # log HPARAMS
            xt_run.log_hparams(hp_set)

            for i in range(reports):
                run_time = (duration / reports) * random.random()
                time.sleep(run_time)

                # log METRICS
                fake_metric = random.random()
                md = {"epoch": 1 + i, "acc": fake_metric}
                xt_run.log_metrics(md, step_name="epoch", stage="test")

            # mark the run as completed
            xt_run.close()
示例#2
0
class HyperparameterHandler():
    def __init__(self):
        """
        Preprocesses the runspec before the call to yaml.load().
        Manages communication with XT regarding hyperparameters.
        """
        self.uploaded_hp_config_filename = 'uploaded_hp_config.yaml'
        self.downloaded_hp_config_filename = 'downloaded_hp_config.yaml'
        self.xt_run_name = os.getenv("XT_RUN_NAME")
        self.xt_run = None
        self.in_hp_search = False
        self.randint_in_spec = False
        if self.xt_run_name:
            from xtlib.run import Run as XTRun
            self.xt_run = XTRun()
            if os.path.isfile(self.downloaded_hp_config_filename):
                self.in_hp_search = True
        self.hparams = []

    def split_spec(self, run_spec_file):
        # Read the spec into 3 sections.
        pre_hp_section = []
        hp_section = []
        post_hp_section = []
        current_section = pre_hp_section
        for line in run_spec_file:
            if current_section == pre_hp_section:
                # Look for the start of the hp section.
                if line.startswith('hyperparameters:'):
                    current_section = hp_section
            elif current_section == hp_section:
                # Look for the end of the hp section.
                if line[0] not in ' -#\n\r':
                    current_section = post_hp_section
            else:
                assert current_section == post_hp_section
            # Append this line to the current section.
            current_section.append(line)
        return pre_hp_section, hp_section, post_hp_section

    def preprocess(self, run_spec_file):
        """ Modifies the hyperparameter section of a runspec before yaml.load() is called on it. """

        # Read the spec into 3 sections.
        pre_hp_section, hp_section, post_hp_section = self.split_spec(run_spec_file)

        # Modify the HP section, if present.
        if len(hp_section) > 0:
            self.hparams = self.parse_hp_section(hp_section)
            if self.in_hp_search:
                self.read_hp_config_file()
            else:
                for hp in self.hparams:
                    hp.choose_value(self.in_hp_search)
            parsed_hp_section = ['hyperparameters:\n']
            for hp in self.hparams:
                parsed_hp_section += hp.format_chosen_value()
            parsed_hp_section.append('\n')
        else:
            parsed_hp_section = []

        # Reassemble the modified runspec.
        spec_str = ''.join(pre_hp_section + parsed_hp_section + post_hp_section)

        # Check for randint.
        self.randint_in_spec = 'randint' in spec_str

        # Return the modified runspec.
        return spec_str

    def parse_hp_section(self, hp_section_in):
        """
        Parses the hyperparameters section of a runspec.
        Returns a list of Hparam objects. For example...
        Input string hp_section_in:
            hyperparameters:
              - name: &rscale
                  ordered_tuning_values: [2, 4, 8, 16, 32]
                  tuned_value: 32
              - name: &units
                  ordered_tuning_values: [128, 192, 256, 384, 512]
                  tuned_value: 384
        Output returned:
            List of Hparam objects:
                hp[0].name = 'rscale'
                     .values = [2, 4, 8, 16, 32]
                     .tuned_value = 32
                hp[1].name = 'units'
                     .values = [128, 192, 256, 384, 512]
                     .tuned_value = 384
        """
        hparams = []
        name_line = ''
        values_line = ''
        i = 0
        for full_line in hp_section_in:
            line = full_line.strip().rstrip()
            if line.startswith('hyperparameters:') or (len(line) == 0) or (line[0] == '#'):
                continue
            if i == 0:
                if line.startswith('- name:'):
                    name_line = line
                    i = 1
                else:
                    raise SyntaxError('First line of a hyperparameter definition must start with "- name:"\n=====> {}'.format(line))
            elif i == 1:
                if (line.startswith('ordered_tuning_values:')) or (line.startswith('unordered_tuning_values:')):
                    values_line = line
                    i = 2
                else:
                    raise SyntaxError('Second line of a hyperparameter definition must start with "ordered_tuning_values:" or "unordered_tuning_values:"\n=====> {}'.format(line))
            elif i == 2:
                if line.startswith('tuned_value:'):
                    hp = Hparam(name_line, values_line, line)
                    hparams.append(hp)
                    i = 0
                else:
                    raise SyntaxError('Third line of a hyperparameter definition must start with "tuned_value:"\n=====> {}'.format(line))
            else:
                raise SyntaxError('Unexpected line in the hyperparameters section of the runspec:{}'.format(line))
        return hparams

    def log_chosen_values(self, logger):
        """ Logs the chosen HP values to the console for reference, and (optionally) to XT. """
        if len(self.hparams) > 0:
            hparam_dict = {}
            logger.write_line("Chosen hyperparameter values:")
            for hp in self.hparams:
                hp.log_chosen_value(logger)
                hparam_dict[hp.name] = hp.chosen_value
            logger.write_line('')
            if self.xt_run:
                self.xt_run.log_hparams(hparam_dict)

    def write_hp_config_file(self):
        """ Generates the file that XT needs to support HP tuning. """
        assert len(self.hparams) > 0, 'Hyperparameters must be specified.'
        # Warn the user if randint is missing from a hyperparameter search.
        if not self.randint_in_spec:
            response = None
            while (response != 'y') and (response != 'n'):
                print("WARNING: Hyperparameter tuning typically requires randomization,")
                print("which is usually achieved by setting the environment or agent seed to randint,")
                print("but randint is missing from this runspec. Are you sure you want to proceed? [y/n]")
                response = input()
            if response == 'n':
                exit(0)
        # Write the hp config file for the job launcher.
        hp_config_file = open(self.uploaded_hp_config_filename, 'w')
        hp_config_file.write('hyperparameter-distributions:\n')
        for hp in self.hparams:
            value_list = []
            for value in hp.values:
                value_list.append(hp.yaml_value_from_python(value))
            values_str = ', '.join(value_list)
            hp_config_file.write('  {}: [{}]\n'.format(hp.name, values_str))
        hp_config_file.close()

    def read_hp_config_file(self):
        """ Reads the file containing the HP values chosen by XT. """
        assert len(self.hparams) > 0, 'Hyperparameters must be specified.'
        print('Reading chosen hp values from downloaded_hp_config.yaml')
        chosen_hp_value_dict = yaml.load(open(self.downloaded_hp_config_filename, 'r'), Loader=yaml.Loader)
        hp_runset = chosen_hp_value_dict['hyperparameter-runset']
        # for hp_name in hp_runset:
        #     print('{}  {}'.format(hp_name, hp_runset[hp_name]))
        assert len(hp_runset) == len(self.hparams)
        for hp in self.hparams:
            hp.chosen_value = hp_runset[hp.name]
示例#3
0
hp_dict = {
    "seed": args.seed,
    "batch-size": args.batch_size,
    "epochs": args.epochs,
    "lr": args.lr,
    "momentum": args.momentum,
    "channels1": args.channels1,
    "channels2": args.channels2,
    "kernel_size": args.kernel_size,
    "mlp-units": args.mlp_units,
    "weight-decay": args.weight_decay,
    "optimizer": args.optimizer,
    "mid-conv": args.mid_conv
}

run.log_hparams(hp_dict)

with open("userapp.txt", "at") as tfile:
    tfile.write("starting...\n")

for epoch in range(1, 1 + args.epochs):
    accuracy = np.random.random()
    with open("userapp.txt", "at") as tfile:
        tfile.write("epoch=" + str(epoch) + "\n")

    print("epoch={}, test-acc={}".format(epoch, accuracy))
    run.log_metrics({"epoch": epoch, "test-acc": accuracy})

    time.sleep(2)

with open("userapp.txt", "at") as tfile: