Ejemplo n.º 1
0
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=abstract-class-not-used
# pylint: disable=super-on-old-class

import numpy as np
# Local imports
import gp_core
import kernel
from utils.option_handler import get_option_specs

# Some basic parameters for simple GPs.
basic_gp_args = [
    get_option_specs('kernel_type', False, 'se',
                     'Specify type of kernel. Should be se or poly'),
    get_option_specs(
        'mean_func_type', False, 'median',
        ('Specify the type of mean function. Should be mean, median, const ',
         'or zero. If const, specifcy value in mean-func-const.')),
    get_option_specs('mean_func_const', False, 0.0,
                     'The constant value to use if mean_func_type is const.'),
    get_option_specs(
        'noise_var_type', False, 'tune',
        ('Specify how to obtain the noise variance. Should be tune, label ',
         'or value. Specify appropriate value in noise_var_label or',
         'noise_var_value')),
    get_option_specs(
        'noise_var_label', False, 0.05,
        'The fraction of label variance to use as noise variance.'),
    get_option_specs('noise_var_value', False, 0.1,
Ejemplo n.º 2
0
from chemist_opt.blackbox_optimiser import blackbox_opt_args
from mols.mol_gp import mol_gp_args, MolGPFitter
from mols.mol_kernels import *  # kernel names
from datasets.loaders import get_initial_pool
from chemist_opt.gp_bandit import GPBandit, gp_bandit_args
from utils.general_utils import block_augment_array
from utils.reporters import get_reporter
from utils.option_handler import get_option_specs, load_options

# Options for acquisition optimizers:
# - random_ga: randomly select subsets and synthesize
# - ToBeAdded

chemist_specific_args = [
    get_option_specs(
        'chemist_acq_opt_method', False, 'ga',
        'Which method to use when optimising the acquisition. Will override acq_opt_method'
        + ' in the arguments for gp_bandit.'),
]


all_chemist_args = chemist_specific_args + gp_bandit_args + \
                     blackbox_opt_args + mol_gp_args


class Chemist(GPBandit):
    """
    Analog of NASBOT class.
    To not have it inherit from any GPBandit,
    must merge and simplify all functionality.
    """
    def __init__(self,
Ejemplo n.º 3
0
_DFLT_RECTIFIER_SWAP = MLP_RECTIFIERS

_PRIMITIVE_PROB_MASSES = {'inc_single': 0.1,
                          'dec_single': 0.1,
                          'inc_en_masse': 0.1,
                          'dec_en_masse': 0.1,
                          'swap_layer': 0.2,
                          'wedge_layer': 0.1,
                          'remove_layer': 0.1,
                          'branch': 0.2,
                          'skip': 0.2,
                         }

nn_modifier_args = [
  # Change fractions for increasing the number of units in layers.
  get_option_specs('single_inc_change_frac', False, _DFLT_CHANGE_FRAC,
    'Default change fraction when increasing a single layer.'),
  get_option_specs('single_dec_change_frac', False, _DFLT_CHANGE_FRAC,
    'Default change fraction when decreasing a single layer.'),
  get_option_specs('en_masse_inc_change_frac', False, _DFLT_CHANGE_FRAC,
    'Default change fraction when increasing layers en_masse.'),
  get_option_specs('en_masse_dec_change_frac', False, _DFLT_CHANGE_FRAC,
    'Default change fraction when decreasing layers en_masse.'),
  # Number of networks to spawn by changing number of units in a single layer.
  get_option_specs('spawn_single_inc_num_units', False, _DFLT_CHANGE_NUM_UNITS_SPAWN,
    'Default number of networks to spawn by increasing # units in a single layer.'),
  get_option_specs('spawn_single_dec_num_units', False, _DFLT_CHANGE_NUM_UNITS_SPAWN,
    'Default number of networks to spawn by decreasing # units in a single layer.'),
  # Number of networks to spawn by adding or deleting a single layer.
  get_option_specs('spawn_add_layer', False, _DFLT_CHANGE_LAYERS_SPAWN,
    'Default number of networks to spawn by adding a layer.'),
  get_option_specs('spawn_del_layer', False, _DFLT_CHANGE_LAYERS_SPAWN,
Ejemplo n.º 4
0
  [email protected]
"""

# pylint: disable=invalid-name
# pylint: disable=no-member
# pylint: disable=abstract-method

from numpy.random import choice
# Local imports
from opt.blackbox_optimiser import BlackboxOptimiser, blackbox_opt_args
from utils.general_utils import sample_according_to_exp_probs
from utils.option_handler import get_option_specs, load_options
from utils.reporters import get_reporter

ga_specific_opt_args = [
    get_option_specs('num_mutations_per_epoch', False, 50,
                     'Number of mutations per epoch.'),
    get_option_specs('num_candidates_to_mutate_from', False, -1,
                     'The number of candidates to choose the mutations from.'),
    get_option_specs(
        'fitness_sampler_scaling_const', False, 2,
        'The scaling constant for sampling according to exp_probs.'),
]

ga_opt_args = ga_specific_opt_args + blackbox_opt_args


class GAOptimiser(BlackboxOptimiser):
    """ Class for optimisation based on Genetic algorithms. """
    def __init__(self,
                 func_caller,
                 worker_manager,
Ejemplo n.º 5
0
# Local
from opt.gp_bandit import get_all_gp_bandit_args_from_gp_args
from gp.euclidean_gp import euclidean_gp_args
from utils.option_handler import get_option_specs, load_options
from parse.config_parser import config_parser
from utils.reporters import get_reporter
from exd.worker_manager import SyntheticWorkerManager
from exd.experiment_caller import EuclideanFunctionCaller, FunctionCaller
from opt.gp_bandit import EuclideanGPBandit
from exd import domains

dragonfly_args = [ \
  get_option_specs('config', False, None, 'Path to the json or pb config file. '),
get_option_specs('options', False, None, 'Path to the options file. '),
get_option_specs('max_capital', False, 0.0,
                   'Maximum capital to be used in the experiment. '),
get_option_specs('budget', False, 0.0, \
      'The budget of evaluations. If max_capital is none, will use this as max_capital.'),
                 ]


def maximise_function(func,
                      max_capital,
                      domain=None,
                      domain_bounds=None,
                      config=None,
                      options=None,
                      hp_tune_criterion='post_sampling',
                      hp_tune_method='slice',
                      init_capital=None,
                      init_capital_frac=None,
Ejemplo n.º 6
0
import sys
import numpy as np
from scipy.linalg import solve_triangular
# Local imports
from utils.general_utils import stable_cholesky, draw_gaussian_samples, \
                                project_symmetric_to_psd_cone
from utils.oper_utils import direct_ft_maximise, random_maximise, random_sample
from utils.option_handler import get_option_specs, load_options
from utils.reporters import get_reporter


# These are mandatory requirements. Every GP implementation should probably use them.
mandatory_gp_args = [
  get_option_specs('hp_tune_criterion', False, 'ml',
                   'Which criterion to use when tuning hyper-parameters. Other ' +
                   'options are post_sampling and post_mean.'),
  get_option_specs('ml_hp_tune_opt', False, 'direct',
                   'Which optimiser to use when maximising the tuning criterion.'),
  get_option_specs('hp_tune_max_evals', False, -1,
                   'How many evaluations to use when maximising the tuning criterion.'),
  get_option_specs('handle_non_psd_kernels', False, '',
                   'How to handle kernels that are non-psd.')
  ]

def _solve_lower_triangular(A, b):
  """ Solves Ax=b when A is lower triangular. """
  return solve_triangular(A, b, lower=True)

def _solve_upper_triangular(A, b):
  """ Solves Ax=b when A is upper triangular. """
Ejemplo n.º 7
0
# pylint: disable=invalid-name
# pylint: disable=relative-import


import sys
import numpy as np

from utils.general_utils import stable_cholesky, draw_gaussian_samples
from utils.optimisers import direct_ft_maximise
from utils.option_handler import get_option_specs, load_options
from utils.reporters import get_reporter


# These are mandatory requirements. Every GP implementation should probably use them.
mandatory_gp_args = [
  get_option_specs('hp_tune_criterion', False, 'ml',
                   'Which criterion to use when tuning hyper-parameters.'),
  get_option_specs('hp_tune_opt', False, 'direct',
                   'Which optimiser to use when maximising the tuning criterion.'),
  get_option_specs('hp_tune_max_evals', False, -1,
                   'How many evaluations to use when maximising the tuning criterion.'),
  ]


def _check_feature_label_lengths_and_format(X, Y):
  """ Checks if the length of X and Y are the same. """
  if X.shape[0] != len(Y):
    raise ValueError('Size of X (' + str(X.shape) + ') and Y (' +
      str(Y.shape) + ') do not match.')
  if len(X.shape) != 2 or len(Y.shape) != 1:
    raise ValueError('X should be an nxd matrix and Y should be an n-vector.' +
      'Given shapes of X, Y are: ', str(X.shape) + ', ' + str(Y.shape))
Ejemplo n.º 8
0
# Local imports
from gp import gp_core, mf_gp
import gp.kernel as gp_kernel
from utils.ancillary_utils import get_list_of_floats_as_str
from utils.general_utils import get_sublist_from_indices, map_to_bounds
from utils.option_handler import get_option_specs, load_options
from utils.oper_utils import random_sample_from_discrete_domain
from utils.reporters import get_reporter

_DFLT_KERNEL_TYPE = 'se'

# Some basic parameters for Euclidean GPs.
basic_euc_gp_args = [ \
  get_option_specs('kernel_type', False, 'default',
                   'Specify type of kernel. This depends on the application.'),
get_option_specs('use_same_bandwidth', False, False,
                   ('If true, will use same bandwidth on all dimensions. Applies only '
                    'when kernel_type is se or matern. Default=False.')), \
  ]
# Parameters for the SE kernel.
se_gp_args = [ \
  ]
# Parameters for the matern kernel
matern_gp_args = [ \
  get_option_specs('matern_nu', False, -1.0, \
    ('Specify the nu value for the matern kernel. If negative, will fit.')),
                 ]
# Parameters for the Polynomial kernel.
poly_gp_args = [ \
  get_option_specs('use_same_scalings', False, False,
                   'If true uses same scalings on all dimensions. Default is False.'),
Ejemplo n.º 9
0
gp_bandit_args = [ \
  get_option_specs('acq', False, 'ei-ucb-ttei', \
    'Which acquisition to use: ts, ucb, ei, ttei, bucb. If using multiple ' + \
    'give them as a hyphen separated list e.g. ucb-ts-ei-ttei'),
get_option_specs('acq_probs', False, 'adaptive', \
    'With what probability should we choose each strategy given in acq. If "uniform" ' + \
    'we we will use uniform probabilities and if "adaptive" we will use adaptive ' + \
    'probabilities which weight acquisitions according to how well they do.'),
get_option_specs('acq_opt_method', False, 'direct', \
    'Which optimiser to use when maximising the acquisition function.'),
get_option_specs('handle_parallel', False, 'halluc', \
    'How to handle parallelisations. Should be halluc or naive.'),
get_option_specs('acq_opt_max_evals', False, -1, \
    'Number of evaluations when maximising acquisition. If negative uses default value.'),
# The following are for managing GP hyper-parameters. They override hp_tune_criterion
# and ml_hp_tune_opt from the GP args.
get_option_specs('gpb_hp_tune_criterion', False, 'ml-post_sampling',
                   'Which criterion to use when tuning hyper-parameters. Other ' +
                   'options are post_sampling and post_mean.'),
get_option_specs('gpb_hp_tune_probs', False, '0.3-0.7', \
    'With what probability should we choose each strategy given in hp_tune_criterion.' + \
    'If "uniform" we we will use uniform probabilities and if "adaptive" we will use ' + \
    'adaptive probabilities which weight acquisitions according to how well they do.'),
get_option_specs('gpb_ml_hp_tune_opt', False, 'direct',
                   'Which optimiser to use when maximising the tuning criterion.'),
get_option_specs('gpb_post_hp_tune_method', False, 'slice',
                   'Which sampling to use when maximising the tuning criterion. Other ' +
                   'option is nuts'),
get_option_specs('gpb_post_hp_tune_burn', False, -1,
                   'How many initial samples to ignore during sampling.'),
get_option_specs('gpb_post_hp_tune_offset', False, 25,
                   'How many samples to ignore between samples.'),
get_option_specs('rand_exp_sampling_replace', False, False, \
    'Whether to replace already sampled values or not in rand_exp_sampling.'),
# For multi-fidelity BO
get_option_specs('mf_strategy', False, 'boca',
                   'Which multi-fidelity strategy to use. Should be one of {boca}.'),
# The following are perhaps not so important. Some have not been implemented yet.
get_option_specs('shrink_kernel_with_time', False, 0,
                   'If True, shrinks the kernel with time so that we don\'t get stuck.'),
get_option_specs('perturb_thresh', False, 1e-4, \
    ('If the next point chosen is too close to an exisiting point by this times the ' \
     'diameter, then we will perturb the point a little bit before querying. This is ' \
     'mainly to avoid numerical stability issues.')),
get_option_specs('track_every_time_step', False, 0,
                   ('If 1, it tracks every time step.')),
get_option_specs('next_pt_std_thresh', False, 0.005, \
    ('If the std of the queried point queries below this times the kernel scale ', \
     'frequently we will reduce the bandwidth range')), \
  ]
Ejemplo n.º 10
0
# pylint: disable=super-on-old-class

import numpy as np

# Local imports
from gp.kernel import CoordinateProductKernel, PolyKernel, SEKernel
from gp.gp_core import GP, GPFitter, mandatory_gp_args
from utils.option_handler import get_option_specs, load_options
from utils.reporters import get_reporter
from utils.ancillary_utils import get_list_of_floats_as_str


# Define hyper-parameters for Multi-fidelity GPs.
mf_gp_args = [
  # Fidelity kernel
  get_option_specs('fidel_kernel_type', False, 'se',
    'Type of kernel for the fidelity space. Should be se or poly'),
  get_option_specs('fidel_use_same_bandwidth', False, False,
    ('If true, will use same bandwidth on all fidelity dimensions. Applies only when '
     'fidel_kernel_type is se. Default=False.')),
  get_option_specs('fidel_use_same_scalings', False, False,
    ('If true, will use same scaling on all fidelity dimensions. Applies only when '
     'fidel_kernel_type is poly. Default=False.')),
  get_option_specs('fidel_poly_order', False, 1,
    ('Order of the polynomial for the fidelity kernel. Default = 1 (linear kernel)')),
  # Domain kernel
  get_option_specs('domain_kernel_type', False, 'se',
    'Type of kernel for the domain. Should be se or poly'),
  get_option_specs('domain_use_same_bandwidth', False, False,
    ('If true, will use same bandwidth on all domain dimensions. Applies only when '
     'domain_kernel_type is se. Default=False.')),
  get_option_specs('domain_use_same_scalings', False, False,
Ejemplo n.º 11
0
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
# pylint: disable=abstract-class-little-used

from argparse import Namespace
import time
import numpy as np

# Local imports
import gpb_utils
from utils.general_utils import map_to_bounds
from utils.option_handler import get_option_specs, load_options
from utils.reporters import get_reporter

blackbox_opt_args = [
    get_option_specs('max_num_steps', False, 1e7,
                     'If exceeds this many evaluations, stop.'),
    get_option_specs('capital_type', False, 'return_value',
                     'Should be one of return_value, cputime, or realtime'),
    get_option_specs(
        'init_method', False, 'randomkmeans',
        'How to initialise. Should be either random, random_kmeans, or latin.'
    ),
    get_option_specs('num_init_evals', False, 0, (
        'The amount of evaluations for initialisation. If <0, will use default.'
    )),
    get_option_specs('num_workers', False, 1,
                     'The number of workers in parallel.'),
    get_option_specs(
        'mode', False, 'asy',
        'If \'syn\', uses synchronous parallelisation, else asynchronous.'),
    get_option_specs(
Ejemplo n.º 12
0
import numpy as np

# Local imports
from bo import acquisitions
from blackbox_optimiser import blackbox_opt_args, BlackboxOptimiser
from gp.kernel import SEKernel
from gp.gp_core import GP, mandatory_gp_args
from gp.gp_instances import SimpleGPFitter, all_simple_gp_args
from utils.optimisers import random_maximise
from utils.option_handler import get_option_specs, load_options
from utils.function_caller import get_function_caller_from_function
from utils.reporters import get_reporter

gp_bandit_args = [
    # Acquisition
    get_option_specs('acq', False, None,
                     'Which acquisition to use: TS, UCB, BUCB, UCBPE.'),
    get_option_specs(
        'acq_opt_criterion', False, 'rand',
        'Which optimiser to use when maximising the acquisition function.'),
    get_option_specs(
        'acq_opt_max_evals', False, -1,
        'Number of evaluations when maximising acquisition. If negative uses default value.'
    ),
    # The following are perhaps not so important.
    get_option_specs(
        'shrink_kernel_with_time', False, 0,
        'If True, shrinks the kernel with time so that we don\'t get stuck.'),
    get_option_specs('perturb_thresh', False, 1e-4, (
        'If the next point chosen is too close to an exisiting point by this times the '
        'diameter, then we will perturb the point a little bit before querying. This is '
        'mainly to avoid numerical stability issues.')),
Ejemplo n.º 13
0
from utils.reporters import get_reporter

exd_core_args = [ \
  get_option_specs('max_num_steps', False, 1e7,
                   'If exceeds this many evaluations, stop.'),
get_option_specs('capital_type', False, 'return_value',
                   'Should be one of return_value, cputime, or realtime'),
get_option_specs('mode', False, 'asy', \
    'If \'syn\', uses synchronous parallelisation, else asynchronous.'),
get_option_specs('build_new_model_every', False, 17, \
    'Updates the model via a suitable procedure every this many iterations.'),
get_option_specs('report_results_every', False, 13,
                   'Report results every this many iterations.'),
# Initialisation
get_option_specs('init_capital', False, None,
                   ('The capital to be used for initialisation.')),
get_option_specs('init_capital_frac', False, None,
                   ('The fraction of the total capital to be used for initialisation.')),
get_option_specs('num_init_evals', False, 20, \
    ('The number of evaluations for initialisation. If <0, will use default.')),
# The amount of effort we will use for initialisation is prioritised by init_capital,
# init_capital_frac and num_init_evals.
get_option_specs('prev_evaluations', False, None,
                   'Data for any previous evaluations.'),
get_option_specs('get_initial_qinfos', False, None,
                   'A function to obtain initial qinfos.'),
get_option_specs('init_method', False, 'rand', \
    'Method to obtain initial queries. Is used if get_initial_qinfos is None.'), \
  ]

mf_exd_args = [ \
Ejemplo n.º 14
0
mandatory_gp_args = [ \
  get_option_specs('hp_tune_criterion', False, 'ml',
                   'Which criterion to use when tuning hyper-parameters. Other ' +
                   'options are post_sampling and post_mean.'),
get_option_specs('hp_tune_probs', False, 'uniform', \
    'With what probability should we choose each strategy given in hp_tune_criterion.' + \
    'If "uniform" we we will use uniform probabilities and if "adaptive" we will use ' + \
    'adaptive probabilities which weight acquisitions according to how well they do.'),
get_option_specs('ml_hp_tune_opt', False, 'direct',
                   'Which optimiser to use when maximising the tuning criterion.'),
get_option_specs('hp_tune_max_evals', False, -1,
                   'How many evaluations to use when maximising the tuning criterion.'),
get_option_specs('handle_non_psd_kernels', False, 'guaranteed_psd',
                   'How to handle kernels that are non-psd.'),
# The mean and noise variance of the GP
get_option_specs('mean_func_type', False, 'tune',
                   ('Specify the type of mean function. Should be mean, median, const ',
                    'zero, or tune. If const, specifcy value in mean-func-const.')),
get_option_specs('mean_func_const', False, 0.0,
                   'The constant value to use if mean_func_type is const.'),
get_option_specs('noise_var_type', False, 'tune', \
    ('Specify how to obtain the noise variance. Should be tune, label or value. ' \
     'Specify appropriate value in noise_var_label or noise_var_value')),
get_option_specs('noise_var_label', False, 0.05,
                   'The fraction of label variance to use as noise variance.'),
get_option_specs('noise_var_value', False, 0.1,
                   'The (absolute) value to use as noise variance.'),
get_option_specs('post_hp_tune_method', False, 'slice',
                   'Which sampling to use when maximising the tuning criterion. Other ' +
                   'option is nuts.'),
get_option_specs('post_hp_tune_burn', False, -1,
                   'How many initial samples to ignore during sampling.'),
get_option_specs('post_hp_tune_offset', False, 25,
                   'How many samples to ignore between samples.'), \
  ]
Ejemplo n.º 15
0
from argparse import Namespace
import numpy as np

# Local imports
from chemist_opt import gpb_acquisitions
from chemist_opt.blackbox_optimiser import blackbox_opt_args, BlackboxOptimiser
from gp.gp_core import GP
from gp.gp_instances import SimpleGPFitter, all_simple_gp_args
from utils.option_handler import get_option_specs, load_options
from utils.reporters import get_reporter

gp_bandit_args = [
    # Acquisition
    get_option_specs(
        'acq', False, 'ei',
        'Which acquisition to use: ts, ucb, ei, ttei, bucb, ucbpe. If using multiple '
        + 'give them as a hyphen separated list e.g. ucb-ts-ei-ttei'),
    get_option_specs(
        'acq_probs', False, 'uniform',
        'With what probability should we choose each strategy given in acq.'),
    get_option_specs(
        'acq_opt_method', False, 'rand',
        'Which optimiser to use when maximising the acquisition function.'),
    get_option_specs(
        'handle_parallel', False, 'halluc',
        'How to handle parallelisations. Should be halluc or naive.'),
    get_option_specs(
        'acq_opt_max_evals', False, -1,
        'Number of evaluations when maximising acquisition. If negative uses default value.'
    ),
    # The following are perhaps not so important.
Ejemplo n.º 16
0
import numpy as np
# Local imports
from opt.blackbox_optimiser import blackbox_opt_args
from opt import gpb_acquisitions
from nn.nn_gp import nn_gp_args, NNGPFitter
from nn.nn_modifiers import get_nn_modifier_from_args
from nn.nn_comparators import get_default_otmann_distance
from opt.nn_opt_utils import get_initial_pool
from opt.gp_bandit import GPBandit, gp_bandit_args
from utils.general_utils import block_augment_array
from utils.reporters import get_reporter
from utils.option_handler import get_option_specs, load_options

nasbot_specific_args = [
    get_option_specs(
        'nasbot_acq_opt_method', False, 'ga',
        'Which method to use when optimising the acquisition. Will override acq_opt_method'
        + ' in the arguments for gp_bandit.'),
    get_option_specs(
        'ga_mutation_op_distro', False, 'd0.5-0.25-0.125-0.075-0.05',
        'Which method to use when optimising the acquisition. Will override acq_opt_method'
        + ' in the arguments for gp_bandit.'),
]

all_nasbot_args = nasbot_specific_args + gp_bandit_args + \
                        blackbox_opt_args + nn_gp_args
all_nn_random_bandit_args = all_nasbot_args


# NN GP Bandit Class --------------------------------------------------------------------
class NASBOT(GPBandit):
    """ NN GP Bandit. """
Ejemplo n.º 17
0
# pylint: disable=arguments-differ

import numpy as np
# Local
from gp import gp_core
from gp.gp_instances import basic_gp_args
from nn import nn_comparators
from utils.ancillary_utils import get_list_of_floats_as_str
from utils.reporters import get_reporter
from utils.option_handler import get_option_specs, load_options

_DFLT_KERNEL_TYPE = 'lpemd_sum'

nn_gp_specific_args = [
    get_option_specs(
        'dist_type', False, 'lp-emd',
        'The type of distance. This should be lp, emd or lp-emd.'),
    # Use given coeffcients by default
    get_option_specs('choose_mislabel_struct_coeffs', False, 'use_given', (
        'How to choose the mislabel and struct coefficients. Should be one of '
        +
        'tune_coeffs or use_given. In the latter case, mislabel_coeffs and struct_coeffs '
        + 'should be non-empty.')),
    get_option_specs(
        'compute_kernel_from_dists', False, True,
        'Should you compute the kernel from pairwise distances whenever possible.'
    ),
    get_option_specs(
        'mislabel_coeffs', False, '1.0-1.0-1.0-1.0',
        'The mislabel coefficients specified as a string. If -1, it means we will tune.'
    ),
Ejemplo n.º 18
0
"""

import numpy as np
from mols.mol_kernels import MolKernel
from gp import gp_core
from gp.gp_instances import basic_gp_args
from utils.ancillary_utils import get_list_of_floats_as_str
from utils.reporters import get_reporter
from utils.option_handler import get_option_specs, load_options

_DFLT_KERNEL_TYPE = 'wl_kernel'

# dict: name, required, default, help -> these values
mol_gp_specific_args = [
    get_option_specs(
        'cont_par', False, '0.1-0.25-0.61-1.5',
        'Continuous parameter for single-parameter kernels. for If -1, it means we will tune.'
    ),
    get_option_specs(
        'int_par', False, 3,
        'Integer parameter for single-parameter kernels. for If -1, it means we will tune.'
    ),
    # get_option_specs('non_assignment_penalty', False, 1.0,
    # 'The non-assignment penalty.'),
]  # check what these should be

mol_gp_args = gp_core.mandatory_gp_args + basic_gp_args + mol_gp_specific_args

# GP implementation for molecules ---------------------------------------------


class MolGP(gp_core.GP):
Ejemplo n.º 19
0
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=abstract-class-little-used

from argparse import Namespace
import time
import numpy as np
# Local imports
from opt.function_caller import EVAL_ERROR_CODE
from nn.nn_examples import generate_many_neural_networks
from utils.option_handler import get_option_specs, load_options
from utils.reporters import get_reporter

blackbox_opt_args = [
    get_option_specs('max_num_steps', False, 1e7,
                     'If exceeds this many evaluations, stop.'),
    get_option_specs('capital_type', False, 'return_value',
                     'Should be one of return_value, cputime, or realtime'),
    get_option_specs('num_init_evals', False, 20, (
        'The number of evaluations for initialisation. If <0, will use default.'
    )),
    get_option_specs(
        'mode', False, 'asy',
        'If \'syn\', uses synchronous parallelisation, else asynchronous.'),
    get_option_specs(
        'build_new_model_every', False, 7,
        'Updates the GP via a suitable procedure every this many iterations.'),
    get_option_specs('report_results_every', False, 1,
                     'Report results every this many iterations.'),
]
Ejemplo n.º 20
0
from copy import deepcopy
import time
import numpy as np

# Local imports
from mf_func import MFOptFunction
from mf_gp import all_mf_gp_args, MFGPFitter
from mf_gpb_utils import acquisitions, fidelity_choosers
from mf_gpb_utils import is_an_opt_fidel_query, latin_hc_sampling
from utils.optimisers import direct_ft_maximise, random_maximise
from utils.option_handler import get_option_specs, load_options
from utils.reporters import get_reporter

mf_gp_bandit_args = [
    get_option_specs('capital_type', False, 'given', (
        'The type of capital to be used. If \'given\', it will use the cost specified. '
        'Could be one of given, cputime, or realtime')),
    get_option_specs(
        'max_iters', False, 1e5,
        'The maximum number of iterations, regardless of capital.'),
    get_option_specs('gamma_0', False, '1', (
        'The multiplier in front of the default threshold value for switching. Should be',
        'a scalar or the string \'adapt\'.')),
    get_option_specs(
        'acq', False, 'mf_gp_ucb',
        'Which acquisition to use. Should be one of mf_gp_ucb, gp_ucb or gp_ei'
    ),
    get_option_specs(
        'acq_opt_criterion', False, 'rand',
        'Which optimiser to use when maximising the acquisition function.'),
    get_option_specs(