Пример #1
0
    def check_solutions(self, ntimes=1, nsamples=1000):
        print('-------- REPORT --------')
        acc, std, divesity = self.report_accuracy(ntimes, nsamples)
        ent = self.report_entropy(ntimes, nsamples)

        saved_data = dict(acc=acc, std=std, divesity=divesity, ent=ent)
        write_yaml(self.work_dir / 'performance.yaml', saved_data)
Пример #2
0
    def __init__(self, params: Mapping[str, Any]):
        self.params = params
        self.env: EvaluationEngineBase = import_bb_env(params['env'])
        self.db: Database[Design] = Database(keep_sorted_list_of=['cost'])
        self.rng = np.random.RandomState(seed=self.params.get('seed', 10))

        self.output_path = Path(self.params['output_path'])
        self.output_path.mkdir(parents=True)

        write_yaml(self.output_path / 'spec.yaml', self.params)

        space_dict = {}
        for param_key, param_item in self.env.params.items():
            lo, hi, step = param_item
            space_dict[param_key] = Discrete(lo, hi, step)
        self.space = Space(space_dict)
Пример #3
0
 def _write_cache(self):
     if self.updated_cache:
         # read the yaml if cache file already exists and has been modified since last time visited
         if self.cache_path.exists():
             stat = os.stat(str(self.cache_path))
             if self.last_cache_mtime < stat[-1]:
                 current_cache = read_yaml(self.cache_path)
             else:
                 current_cache = {}
         else:
             current_cache = {}
         current_cache.update(self.cache)
         # print(f'Saving cache for {self.base_design_name} ....')
         write_yaml(self.cache_path, current_cache)
         # update last mtime stamp after updating cache file
         self.last_cache_mtime = os.stat(str(self.cache_path))[-1]
         self.updated_cache = False
Пример #4
0
    def save_records(self):
        # read
        df = self._read_excel()
        cache = self._read_cache()

        # update the records
        for entry, ident in self._new_entries:
            if self._contains_none(ident):
                raise ValueError('Entry cannot contain None value, because of hashing issues.')
            cache.update({ident: entry['idx']})
            df = df.append(entry, ignore_index=True)

        # write back
        df.to_excel(self.fname, na_rep='NaN', float_format='%.6f')
        write_yaml(self._cache_fname, cache)

        # clear memory
        self._new_entries.clear()
Пример #5
0
    def __init__(self, spec_file: str = '', spec_dict: Optional[Mapping[str, Any]] = None,
                 **kwargs) -> None:
        LoggingBase.__init__(self)

        if spec_file:
            specs = read_yaml(spec_file)
        else:
            specs = spec_dict

        self.specs = specs
        params = specs['params']

        try:
            self.work_dir = params['work_dir']
        except KeyError:
            unique_name = time.strftime('%Y%m%d%H%M%S')
            self.work_dir = Path(specs['root_dir']) / f'random_{unique_name}'
            write_yaml(self.work_dir / 'params.yaml', specs, mkdir=True)


        self.ndim = params['ndim']
        self.goal = params['goal_value']
        self.mode = params['mode']
        self.input_scale = params['input_scale']

        eval_fn = params['eval_fn']
        try:
            self.fn = registered_functions[eval_fn]
        except KeyError:
            raise ValueError(f'{eval_fn} is not a valid benchmark function')

        # hacky version of passing input vectors around
        self.input_vectors_norm = [np.linspace(start=-1.0, stop=1.0, dtype='float32',
                                               num=100) for _ in range(self.ndim)]
        self.input_vectors = [self.input_scale * vec for vec in self.input_vectors_norm]
        # TODO: remove this hacky way of keeping track of delta
        self.delta = self.input_vectors_norm[0][-1] - self.input_vectors_norm[0][-2]
Пример #6
0
    def __init__(self,
                 spec_file: str = '',
                 spec_dict: Optional[Mapping[str, Any]] = None,
                 load: bool = False,
                 use_time_stamp: bool = True,
                 **kwargs) -> None:
        """
        Parameters
        ----------
        spec_file: str
        spec_dict: Dict[str, Any]
        some non-obvious fields
            elite_criteria: str
                'optim': from sorted x1, ..., xn choose p-quantile
                'csp': constraint satisfaction is enough, from x1, ..., xn
                choose p-quantile if it is worst than the constraint else choose all which are
                better than the constraint
            allow_repeated: bool
                True to allow repeated samples to be added to the buffer, else all samples in buffer
                will have equal likelihood when drawn from it.
            on_policy: bool
                True to allow on_policy sample usage, meaning that we won't use samples from
                previous policies to train the current policy (samples are not drawn from
                CacheBuffer)
        load: bool
        kwargs: Dict[str, Any]
        """
        LoggingBase.__init__(self)

        if spec_file:
            specs = read_yaml(spec_file)
        else:
            specs = spec_dict

        self.specs = specs
        params = specs['params']

        if load:
            self.work_dir = Path(spec_file).parent
        else:
            suffix = params.get('suffix', '')
            prefix = params.get('prefix', '')
            if use_time_stamp:
                unique_name = time.strftime('%Y%m%d%H%M%S')
                unique_name = get_full_name(unique_name, prefix, suffix)
            else:
                unique_name = f'{prefix}' if prefix else ''
                if suffix:
                    unique_name = f'{unique_name}_{suffix}' if unique_name else f'{suffix}'

            self.work_dir = Path(specs['root_dir']) / f'{unique_name}'
            write_yaml(self.work_dir / 'params.yaml', specs, mkdir=True)

        self.load = load
        self.seed = params['seed']
        self.ndim = params['ndim']
        self.nsamples = params['nsamples']
        self.n_init_samples = params['n_init_samples']
        self.niter = params['niter']
        self.cut_off = params['cut_off']
        self.input_scale = params['input_scale']
        # goal has to always be positive if not we'll change mode and negate self.goal
        self.goal = params['goal_value']
        self.mode = params['mode']

        self.allow_repeated = params.get('allow_repeated', False)
        self.elite_criteria = params.get('elite_criteria', 'optim')
        self.on_policy = params.get('on_policy', False)

        if self.elite_criteria not in ['csp', 'optim']:
            raise ValueError('invalid elite criteria: optim | csp')

        # allow repeated does not make sense when sampling is on-policy (on-policy: T -> repeat: T)
        self.allow_repeated = self.on_policy or self.allow_repeated

        eval_fn = params['fn']
        try:
            fn = registered_functions[eval_fn]
            self.fn = fn
        except KeyError:
            raise ValueError(f'{eval_fn} is not a valid benchmark function')

        if self.goal < 0:
            self.mode = 'le' if self.mode == 'ge' else 'ge'
            self.fn = lambda x: -fn(x)

        # hacky version of passing input vectors around
        self.input_vectors_norm = [
            np.linspace(start=-1.0, stop=1.0, dtype='float32', num=100)
            for _ in range(self.ndim)
        ]
        self.input_vectors = [
            self.input_scale * vec for vec in self.input_vectors_norm
        ]

        self.cem = CEM(self.input_vectors,
                       dist_type=params['base_fn'],
                       average_coeff=params.get('average_coeff', 1),
                       gauss_sigma=params.get('gauss_sigma', None))
        self.buffer = CacheBuffer(self.mode,
                                  self.goal,
                                  self.cut_off,
                                  with_frequencies=self.allow_repeated)

        self.buffer_temp = {}
        self.fvals = SortedList()
Пример #7
0
    processes = []
    for seed_iter in range(_args.nseeds):
        spec_seed = deepcopy(specs)
        seed = (seed_iter + 1) * 10
        spec_seed['params']['seed'] = seed
        spec_seed['params']['prefix'] = f's{seed}'
        spec_seed['params']['suffix'] = ''

        if _args.ur:
            pattern = str(root_dir / f's{seed}')
            ret_paths = glob.glob(pattern)
            if ret_paths:
                if len(ret_paths) == 1:
                    fpath = str(Path(ret_paths[0], 'params.yaml'))
                    write_yaml(fpath, spec_seed)
                    alg = alg_cls(fpath, load=_args.ur, use_time_stamp=False)
                else:
                    raise ValueError(
                        f'More than 1 path with pattern {pattern} was found')
            else:
                raise ValueError(f'No path with pattern {pattern} was found')

        else:
            alg = alg_cls(spec_dict=spec_seed, use_time_stamp=False)

        p = Process(target=alg.main)
        p.start()
        processes.append(p)

    for p in processes:
Пример #8
0
    def __init__(self,
                 spec_file: str = '',
                 spec_dict: Optional[Mapping[str, Any]] = None,
                 load: bool = False,
                 use_time_stamp: bool = True,
                 init_buffer_path=None,
                 **kwargs) -> None:
        LoggingBase.__init__(self)

        if spec_file:
            specs = read_yaml(spec_file)
        else:
            specs = spec_dict

        self.specs = specs
        params = specs['params']

        if load:
            self.work_dir = Path(spec_file).parent
        else:
            suffix = params.get('suffix', '')
            prefix = params.get('prefix', '')
            if use_time_stamp:
                unique_name = time.strftime('%Y%m%d%H%M%S')
                unique_name = get_full_name(unique_name, prefix, suffix)
            else:
                unique_name = f'{prefix}' if prefix else ''
                if suffix:
                    unique_name = f'{unique_name}_{suffix}' if unique_name else f'{suffix}'

            self.work_dir = Path(specs['root_dir']) / f'{unique_name}'
            write_yaml(self.work_dir / 'params.yaml', specs, mkdir=True)

        self.load = load
        self.seed = params.get('seed', 10)
        self.ndim = params['ndim']
        self.bsize = params['batch_size']
        self.hiddens = params['hidden_list']
        self.niter = params['niter']
        self.goal = params['goal_value']
        self.mode = params['mode']
        self.viz_rate = self.niter // 10
        self.lr = params['lr']
        self.nepochs = params['nepochs']
        self.nsamples = params['nsamples']
        self.n_init_samples = params['n_init_samples']
        self.init_nepochs = params['init_nepochs']
        self.cut_off = params['cut_off']
        self.beta = params['beta']
        self.nr_mix = params['nr_mix']
        self.base_fn = params['base_fn']
        self.only_pos = params['only_positive']
        # whether to run 1000 epochs of training for the later round of iteration
        self.full_training = params['full_training_last']
        self.input_scale = params['input_scale']
        self.fixed_sigma = params.get('fixed_sigma', None)
        self.on_policy = params.get('on_policy', False)
        self.problem_type = params.get('problem_type', 'csp')

        self.allow_repeated = params.get('allow_repeated', False)
        self.allow_repeated = self.on_policy or self.allow_repeated

        self.important_sampling = params.get('important_sampling', False)
        self.visited_dist: Optional[nn.Module] = None
        self.visited_fixed_sigma = params.get('visited_fixed_sigma', None)
        self.visited_nr_mix = params.get('visited_nr_mix', None)

        self.explore_coeff = params.get('explore_coeff', None)
        self.nepoch_visited = params.get('nepoch_visited', -1)

        self.normalize_weight = params.get('normalize_weight', True)
        self.add_ent_before_norm = params.get(
            'add_entropy_before_normalization', False)
        self.weight_type = params.get('weight_type', 'ind')

        self.model_visited = self.explore_coeff is not None or self.important_sampling

        if self.model_visited and self.nepoch_visited == -1:
            raise ValueError(
                'nepoch_visited should be specified when a model is '
                'learning visited states')

        self.init_buffer_paths = init_buffer_path

        eval_fn = params['eval_fn']
        try:
            self.fn = registered_functions[eval_fn]
        except KeyError:
            raise ValueError(f'{eval_fn} is not a valid benchmark function')

        self.device = torch.device(
            'cuda') if torch.cuda.is_available() else torch.device('cpu')
        print(f'device: {self.device}')
        self.cpu = torch.device('cpu')
        self.model: Optional[nn.Module] = None
        self.buffer = None
        self.opt = None

        # hacky version of passing input vectors around
        self.input_vectors_norm = [
            np.linspace(start=-1.0, stop=1.0, dtype='float32', num=100)
            for _ in range(self.ndim)
        ]
        self.input_vectors = [
            self.input_scale * vec for vec in self.input_vectors_norm
        ]
        # TODO: remove this hacky way of keeping track of delta
        self.delta = self.input_vectors_norm[0][-1] - self.input_vectors_norm[
            0][-2]

        # keep track of lo and hi for indicies
        self.params_min = np.array([0] * self.ndim)
        self.params_max = np.array([len(x) - 1 for x in self.input_vectors])

        self.fvals = SortedList()