コード例 #1
0
ファイル: multi.py プロジェクト: macoldibelli/opytimizer
    def _create_functions(self, pointers):
        """Creates functions class instances using the provided list of pointers.

        Args:
            pointers (list): A list of pointers to create respective Function's instances.

        Returns:
            A list of Function's class instances.

        Raises:
            RuntimeError

        """

        # Creates an empty functions list
        functions = []

        # Checks if pointers is a list
        if type(pointers).__name__ == 'list':
            # Iterate through every item in list
            for pointer in pointers:
                # Creates a function class instance for each item
                functions.append(Function(pointer=pointer))

        # If not, raises an runtime error
        else:
            e = f"Property 'pointers' needs to be a list."
            logger.error(e)
            raise RuntimeError(e)

        return functions
コード例 #2
0
ファイル: opt.py プロジェクト: gugarosa/dropout_rbm
def optimize(opt, target, n_agents, n_iterations, hyperparams):
    """Abstracts all Opytimizer's mechanisms into a single method.

    Args:
        opt (Optimizer): An Optimizer-child class.
        target (callable): The method to be optimized.
        n_agents (int): Number of agents.
        n_iterations (int): Number of iterations.
        hyperparams (dict): Dictionary of hyperparameters.

    Returns:
        A History object containing all optimization's information.

    """

    # Creating the SearchSpace
    space = SearchSpace(n_agents=n_agents,
                        n_variables=1,
                        n_iterations=n_iterations,
                        lower_bound=[0.0001],
                        upper_bound=[1])

    # Creating the Optimizer
    optimizer = opt(hyperparams=hyperparams)

    # Creating the Function
    function = Function(pointer=target)

    # Creating the optimization task
    task = Opytimizer(space=space, optimizer=optimizer, function=function)

    return task.start(store_best_only=True)
コード例 #3
0
def test_optimizer_run():
    new_optimizer = optimizer.Optimizer()

    with pytest.raises(NotImplementedError):
        target_fn = Function(lambda x: x)
        search_space = SearchSpace()
        new_optimizer.run(search_space, target_fn)
コード例 #4
0
    def _build(self, functions, constraints):
        """This method serves as the object building process.

        One can define several commands here that does not necessarily
        needs to be on its initialization.

        Args:
            functions (list): Pointers to functions that will return the fitness value.
            constraints (list): List of constraints to be applied to the fitness function.

        """

        logger.debug('Running private method: build().')

        # Populating pointers with real functions
        self.functions = [Function(f, constraints) for f in functions]

        # Creating a multi-objective method strategy as the real pointer
        self._create_multi_objective()

        # Set built variable to 'True'
        self.built = True

        # Logging attributes
        logger.debug(f'Functions: {[f.name for f in self.functions]} | Weights: {self.weights} | Built: {self.built}')
コード例 #5
0
ファイル: weighted.py プロジェクト: mrkaiser/opytimizer
    def _build(self, constraints, penalty):
        """This method serves as the object building process.

        One can define several commands here that does not necessarily
        needs to be on its initialization.

        Args:
            constraints (list): List of constraints to be applied to the fitness function.
            penalty (float): Penalization factor when a constraint is not valid.

        """

        logger.debug('Running private method: build().')

        # Populating pointers with real functions
        self.functions = [
            Function(f, constraints, penalty) for f in self.functions
        ]

        # Creating a multi-objective method strategy as the real pointer
        self._create_multi_objective()

        # Set built variable to 'True'
        self.built = True

        # Logging attributes
        logger.debug('Functions: %s | Weights: %s | Built: %s',
                     [f.name for f in self.functions], self.weights,
                     self.built)
コード例 #6
0
def optimize_umda(target, n_agents, n_variables, n_iterations, hyperparams):
    """Abstracts Opytimizer's Univariate Marginal Distribution Algorithm into a single method.

    Args:
        target (callable): The method to be optimized.
        n_agents (int): Number of agents.
        n_variables (int): Number of variables.
        n_iterations (int): Number of iterations.
        hyperparams (dict): Dictionary of hyperparameters.

    Returns:
        A History object containing all optimization's information.

    """

    # Creating the BooleanSpace
    space = BooleanSpace(n_agents=n_agents,
                         n_iterations=n_iterations,
                         n_variables=n_variables)

    # Creating the Function
    function = Function(pointer=target)

    # Creating UMDA's optimizer
    optimizer = UMDA(hyperparams=hyperparams)

    # Creating the optimization task
    task = Opytimizer(space=space, optimizer=optimizer, function=function)

    return task.start(store_best_only=True)
コード例 #7
0
def test_store_best_agent_only():
    pso = PSO()
    n_iters = 10
    target_fn = Function(pointer=square)
    space = SearchSpace(lower_bound=[-10], upper_bound=[10], n_iterations=n_iters)

    history = Opytimizer(space, pso, target_fn).start(store_best_only=True)
    assert not hasattr(history, 'agents')

    assert hasattr(history, 'best_agent')
    assert len(history.best_agent) == n_iters
コード例 #8
0
def test_store_all_agents():
    pso = PSO()
    n_iters = 10
    n_agents = 2
    target_fn = Function(pointer=square)
    space = SearchSpace(lower_bound=[-10], upper_bound=[10], n_iterations=n_iters, n_agents=n_agents)

    history = Opytimizer(space, pso, target_fn).start()
    assert hasattr(history, 'agents')

    # Ensuring that the amount of entries is the same as the amount of iterations and
    # that for each iteration all agents are kept
    assert len(history.agents) == n_iters
    assert all(len(iter_agents) == n_agents for iter_agents in history.agents)

    assert hasattr(history, 'best_agent')
    assert len(history.best_agent) == n_iters
コード例 #9
0
def optimize_gp(target, n_trees, n_terminals, n_variables, n_iterations,
                min_depth, max_depth, functions, lb, ub, hyperparams):
    """Abstracts Opytimizer's Genetic Programming into a single method.

    Args:
        target (callable): The method to be optimized.
        n_trees (int): Number of agents.
        n_terminals (int): Number of terminals
        n_variables (int): Number of variables.
        n_iterations (int): Number of iterations.
        min_depth (int): Minimum depth of trees.
        max_depth (int): Maximum depth of trees.
        functions (list): Functions' nodes.
        lb (list): List of lower bounds.
        ub (list): List of upper bounds.
        hyperparams (dict): Dictionary of hyperparameters.

    Returns:
        A History object containing all optimization's information.

    """

    # Creating the TreeSpace
    space = TreeSpace(n_trees=n_trees,
                      n_terminals=n_terminals,
                      n_variables=n_variables,
                      n_iterations=n_iterations,
                      min_depth=min_depth,
                      max_depth=max_depth,
                      functions=functions,
                      lower_bound=lb,
                      upper_bound=ub)

    # Creating the Function
    function = Function(pointer=target)

    # Creating GP's optimizer
    optimizer = GP(hyperparams=hyperparams)

    # Creating the optimization task
    task = Opytimizer(space=space, optimizer=optimizer, function=function)

    return task.start(store_best_only=True)
コード例 #10
0
def test_hook():
    pso = PSO()
    n_iters = 10
    counter = 0

    target_fn = Function(pointer=square)
    space = SearchSpace(lower_bound=[-10], upper_bound=[10], n_iterations=n_iters, n_agents=15)

    def eval_hook(arg_opt, arg_space, arg_target_fn):
        assert arg_opt is pso
        assert arg_space is space
        assert arg_target_fn is target_fn

        nonlocal counter
        counter += 1

    Opytimizer(space, pso, target_fn).start(pre_evaluation=eval_hook)

    # The hook is evaluated for each iteration plus initialization
    assert counter == n_iters + 1
コード例 #11
0
            # Cost will be the loss accumulated from model's fitting
            cost += fit(model, loss, opt,
                        X_train[start:end], Y_train[start:end])

    # Predicting samples from evaluating set
    preds = predict(model, X_val)

    # Calculating accuracy
    acc = np.mean(preds == Y_val)

    return 1 - acc


# Creating Function's object
f = Function(pointer=logistic_regression)

# Number of agents
n_agents = 10

# Number of decision variables
n_variables = 2

# Number of running iterations
n_iterations = 100

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = (0, 0)
upper_bound = (1, 1)

# Creating the SearchSpace class
コード例 #12
0
from opytimizer.optimizers.misc.gs import GS
from opytimizer.spaces.grid import GridSpace

# Number of decision variables
n_variables = 2

# And also the size of the step in the grid
step = 0.1

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = [-10, -10]
upper_bound = [10, 10]

# Creating the GridSpace class
s = GridSpace(n_variables=n_variables,
              step=step,
              lower_bound=lower_bound,
              upper_bound=upper_bound)

# Creating GS optimizer
p = GS()

# Creating Function's object
f = Function(pointer=Sphere())

# Finally, we can create an Opytimizer class
o = Opytimizer(space=s, optimizer=p, function=f)

# Running the optimization task
history = o.start()
コード例 #13
0
    opf.fit(X_train, Y_train)

    # If data is labeled, one can propagate predicted labels instead of only the cluster identifiers
    opf.propagate_labels()

    # Predicts new data
    preds, _ = opf.predict(X_test)

    # Calculating accuracy
    acc = g.opf_accuracy(Y_test, preds)

    return 1 - acc


# Creating Function's object
f = Function(pointer=unsupervised_opf_clustering)

# Number of agents
n_agents = 5

# Number of decision variables
n_variables = 1

# Number of running iterations
n_iterations = 3

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = [1]
upper_bound = [15]

# Creating the SearchSpace class
コード例 #14
0
            # Cost will be the loss accumulated from model's fitting
            cost += fit(model, loss, opt,
                        X_train[start:end], Y_train[start:end])

    # Predicting samples from evaluating set
    preds = predict(model, X_val)

    # Calculating accuracy
    acc = np.mean(preds == Y_val)

    return 1 - acc


# Creating Function's object
f = Function(pointer=neural_network)

# Number of agents
n_agents = 10

# Number of decision variables
n_variables = 2

# Number of running iterations
n_iterations = 100

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = (0, 0)
upper_bound = (1, 1)

# Creating the SearchSpace class
コード例 #15
0
        # For every batch
        for k in range(num_batches):
            # Declaring initial and ending for each batch
            start, end = k * batch_size, (k + 1) * batch_size

            # Cost will be the loss accumulated from model's fitting
            cost += fit(model, loss, opt, X[start:end], Y[start:end])

    # Calculating final cost
    final_cost = cost / num_batches

    return final_cost


# Creating Function's object
f = Function(pointer=linear_regression)

# Number of agents
n_agents = 10

# Number of decision variables
n_variables = 2

# Number of running iterations
n_iterations = 100

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = (0, 0)
upper_bound = (1, 1)

# Creating the SearchSpace class
コード例 #16
0
            # Cost will be the loss accumulated from model's fitting
            cost += fit(model, loss, opt, X_train[:, start:end, :],
                        Y_train[start:end])

    # Predicting samples from evaluating set
    preds = predict(model, X_val)

    # Calculating accuracy
    acc = np.mean(preds == Y_val)

    return 1 - acc


# Creating Function's object
f = Function(pointer=long_sort_term_memory)

# Number of agents
n_agents = 10

# Number of decision variables
n_variables = 2

# Number of running iterations
n_iterations = 100

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = [0, 0]
upper_bound = [1, 1]

# Creating the SearchSpace class
コード例 #17
0
from opytimizer.spaces.hyper import HyperSpace


def sphere(x):
    # When using hypercomplex numbers, we always need to span them
    # before feeding into the function
    x_span = h.span(x, lower_bound, upper_bound)

    # Declaring Sphere's function
    y = x_span**2

    return np.sum(y)


# Creating Function's object
f = Function(pointer=sphere)

# Number of agents
n_agents = 20

# Number of decision variables
n_variables = 2

# Number of space dimensions
n_dimensions = 4

# Number of running iterations
n_iterations = 10000

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = [-10, -10]
コード例 #18
0
ファイル: dropout_rbm.py プロジェクト: stjordanis/opytimizer
def dropout_rbm(opytimizer):
    # Gathering hyperparams
    dropout = opytimizer[0][0]

    # Creating an RBM
    model = DropoutRBM(n_visible=784, n_hidden=128, steps=1, learning_rate=0.1,
                       momentum=0, decay=0, temperature=1, dropout=dropout, use_gpu=False)

    # Training an RBM
    error, pl = model.fit(train, batch_size=128, epochs=5)

    return error


# Creating Function's object
f = Function(pointer=dropout_rbm)

# Number of agents
n_agents = 5

# Number of decision variables
n_variables = 1

# Number of running iterations
n_iterations = 5

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = [0]
upper_bound = [1]

# Creating the SearchSpace class
コード例 #19
0
n_variables = 2

# Number of running iterations
n_iterations = 10

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = [-10, -10]
upper_bound = [10, 10]

# Creating the SearchSpace class
s = SearchSpace(n_agents=n_agents,
                n_iterations=n_iterations,
                n_variables=n_variables,
                lower_bound=lower_bound,
                upper_bound=upper_bound)

# Hyperparameters for the optimizer
hyperparams = {'w': 0.7, 'c1': 1.7, 'c2': 1.7}

# Creating PSO's optimizer
p = PSO(hyperparams=hyperparams)

# Creating Function's object
f = Function(pointer=Sphere(), constraints=[c_1])

# Finally, we can create an Opytimizer class
o = Opytimizer(space=s, optimizer=p, function=f)

# Running the optimization task
history = o.start()
コード例 #20
0
            # Cost will be the loss accumulated from model's fitting
            cost += fit(model, loss, opt, X_train[start:end],
                        Y_train[start:end])

    # Predicting samples from evaluating set
    preds = predict(model, X_val)

    # Calculating accuracy
    acc = np.mean(preds == Y_val)

    return 1 - acc


# Creating Function's object
f = Function(pointer=conv_neural_network)

# Number of agents
n_agents = 10

# Number of decision variables
n_variables = 2

# Number of running iterations
n_iterations = 100

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = [0, 0]
upper_bound = [1, 1]

# Creating the SearchSpace class
コード例 #21
0
from opytimizer.core.function import Function


# One should declare a function of x, where it should return a value
def test_function(x):
    return x + 2


# Declaring x variable for further use
x = 0

# Functions can be used if your objective
# function is an internal python code
f = Function(pointer=test_function)

# Testing out your new Function class
print(f'x: {x}')
print(f'f(x): {f.pointer(x)}')
コード例 #22
0
    rnn.compile(
        optimizer=tf.optimizers.Adam(learning_rate=learning_rate),
        loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=[tf.metrics.SparseCategoricalAccuracy(name='accuracy')])

    # Fitting the RNN
    history = rnn.fit(dataset.batches, epochs=100)

    # Gathering last iteration's accuracy
    acc = history.history['accuracy'][-1]

    return 1 - acc


# Creating Function's object
f = Function(pointer=rnn)

# Number of agents
n_agents = 5

# Number of decision variables
n_variables = 1

# Number of running iterations
n_iterations = 3

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = (0, )
upper_bound = (1, )

# Creating the SearchSpace class
コード例 #23
0
    n_clusters = int(opytimizer[0][0])

    # Instanciating an KMeans class
    kmeans = KMeans(n_clusters=n_clusters, random_state=1).fit(X)

    # Gathering predicitions
    preds = kmeans.labels_

    # Calculating adjusted rand index
    ari = metrics.adjusted_rand_score(Y, preds)

    return 1 - ari


# Creating Function's object
f = Function(pointer=k_means_clustering)

# Number of agents
n_agents = 10

# Number of decision variables
n_variables = 1

# Number of running iterations
n_iterations = 100

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = [1]
upper_bound = [100]

# Creating the SearchSpace class
コード例 #24
0
ファイル: rbm.py プロジェクト: stjordanis/opytimizer
                n_hidden=128,
                steps=1,
                learning_rate=lr,
                momentum=momentum,
                decay=decay,
                temperature=1,
                use_gpu=False)

    # Training an RBM
    error, pl = model.fit(train, batch_size=128, epochs=5)

    return error


# Creating Function's object
f = Function(pointer=rbm)

# Number of agents
n_agents = 10

# Number of decision variables
n_variables = 3

# Number of running iterations
n_iterations = 10

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = [0, 0, 0]
upper_bound = [1, 1, 1]

# Creating the SearchSpace class
コード例 #25
0
    lstm.compile(
        optimizer=tf.optimizers.Adam(learning_rate=learning_rate),
        loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=[tf.metrics.SparseCategoricalAccuracy(name='accuracy')])

    # Fitting the LSTM
    history = lstm.fit(dataset.batches, epochs=100)

    # Gathering last iteration's accuracy
    acc = history.history['accuracy'][-1]

    return 1 - acc


# Creating Function's object
f = Function(pointer=lstm)

# Number of agents
n_agents = 5

# Number of decision variables
n_variables = 1

# Number of running iterations
n_iterations = 3

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = (0, )
upper_bound = (1, )

# Creating the SearchSpace class
コード例 #26
0
    # Gathering validation accuracy
    val_acc = history.history['val_accuracy'][-1]

    # Cleaning up memory
    del history
    del model

    # Calling the garbage collector
    gc.collect()

    return 1 - val_acc


# Creating Function's object
f = Function(pointer=cnn)

# Number of agents
n_agents = 5

# Number of decision variables
n_variables = 2

# Number of running iterations
n_iterations = 3

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = [0, 0]
upper_bound = [0.001, 1]

# Creating the SearchSpace class
コード例 #27
0
                        pre_computed_distance=None)

    # Fits training data into the classifier
    opf.fit(X_train_selected, Y_train)

    # Predicts new data
    preds = opf.predict(X_val_selected)

    # Calculating accuracy
    acc = g.opf_accuracy(Y_val, preds)

    return 1 - acc


# Creating Function's object
f = Function(pointer=supervised_opf_feature_selection)

# Number of agents
n_agents = 5

# Number of decision variables
n_variables = 64

# Number of running iterations
n_iterations = 3

# Creating the SearchSpace class
b = BooleanSpace(n_agents=n_agents,
                 n_iterations=n_iterations,
                 n_variables=n_variables)
コード例 #28
0
# Finally, we define the lower and upper bounds
# Note that they have to be the same size as n_variables
lower_bound = [-10, -10]
upper_bound = [10, 10]

# Creating the TreeSpace object
s = TreeSpace(n_trees=n_trees, n_terminals=n_terminals, n_variables=n_variables,
              n_iterations=n_iterations, min_depth=min_depth, max_depth=max_depth,
              functions=functions, lower_bound=lower_bound, upper_bound=upper_bound)

# Hyperparameters for the optimizer
hyperparams = {
    'p_reproduction': 0.25,
    'p_mutation': 0.1,
    'p_crossover': 0.2,
    'prunning_ratio': 0.0
}

# Creating GP's optimizer
p = GP(hyperparams=hyperparams)

# Creating Function's object
f = Function(pointer=benchmark.sphere)

# Finally, we can create an Opytimizer class
o = Opytimizer(space=s, optimizer=p, function=f)

# Running the optimization task
history = o.start()
コード例 #29
0
    svc = svm.SVC(C=C, kernel='linear')

    # Creating a cross-validation holder
    k_fold = KFold(n_splits=5)

    # Fitting model using cross-validation
    scores = cross_val_score(svc, X, Y, cv=k_fold, n_jobs=-1)

    # Calculating scores mean
    mean_score = np.mean(scores)

    return 1 - mean_score


# Creating Function's object
f = Function(pointer=support_vector_machine)

# Number of agents
n_agents = 10

# Number of decision variables
n_variables = 1

# Number of running iterations
n_iterations = 100

# Lower and upper bounds (has to be the same size as n_variables)
lower_bound = [0.00001]
upper_bound = [10]

# Creating the SearchSpace class
コード例 #30
0
# Number of agents
n_agents = 5

# Number of decision variables
n_variables = 5

# Number of running iterations
n_iterations = 10

# Creating the BooleanSpace class
s = BooleanSpace(n_agents=n_agents, n_iterations=n_iterations, n_variables=n_variables)

# Hyperparameters for the optimizer
hyperparams = {
    'c1': r.generate_binary_random_number(size=(n_variables, 1)),
    'c2': r.generate_binary_random_number(size=(n_variables, 1))
}

# Creating BPSO's optimizer
p = BPSO(hyperparams=hyperparams)

# Creating Function's object
f = Function(pointer=Knapsack(values=[55, 10, 47, 5, 4], weights=[95, 4, 60, 32, 23], max_capacity=100))

# Finally, we can create an Opytimizer class
o = Opytimizer(space=s, optimizer=p, function=f)

# Running the optimization task
history = o.start()