예제 #1
0
    def __init__(self, functions, weights):
        """Initialization method.

        Args:
            functions (list): Pointers to functions that will return the fitness value.
            weights (list): Weights for weighted-sum strategy.

        """

        logger.info('Creating class: WeightedFunction.')

        # List of functions
        self.functions = [Function(f) for f in functions] or []

        # List of weights
        self.weights = weights or []

        # Set built variable to 'True'
        self.built = True

        # Logging attributes
        logger.debug('Functions: %s | Weights: %s | Built: %s',
                     [f.name for f in self.functions], self.weights,
                     self.built)
        logger.info('Class created.')
예제 #2
0
    def __init__(self, functions: List[callable]) -> None:
        """Initialization method.

        Args:
            functions: Pointers to functions that will return the fitness value.

        """

        logger.info("Creating class: MultiObjectiveFunction.")

        # List of functions
        self.functions = [Function(f) for f in functions] or []

        # Set built variable to 'True'
        self.built = True

        logger.debug(
            "Functions: %s | Built: %s", [f.name for f in self.functions], self.built
        )
        logger.info("Class created.")
예제 #3
0
from opytimark.markers.n_dimensional import Sphere

from opytimizer import Opytimizer
from opytimizer.core import Function
from opytimizer.optimizers.misc import GS
from opytimizer.spaces import GridSpace

# Number of decision variables and step size of the grid
n_variables = 2
step = [0.1, 1]

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [-10, -10]
upper_bound = [10, 10]

# Creates the space, optimizer and function
space = GridSpace(n_variables, step, lower_bound, upper_bound)
optimizer = GS()
function = Function(Sphere())

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function, save_agents=False)

# Runs the optimization task
opt.start()
예제 #4
0
    val_acc = history.history['val_accuracy'][-1]

    # Cleaning up memory
    del history
    del model

    # Calling the garbage collector
    gc.collect()

    return 1 - val_acc


# Number of agents and decision variables
n_agents = 5
n_variables = 2

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [0, 0]
upper_bound = [0.001, 1]

# Creates the space, optimizer and function
space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound)
optimizer = PSO()
function = Function(cnn)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)

# Runs the optimization task
opt.start(n_iterations=3)
예제 #5
0
    # Instanciating an KMeans class
    kmeans = KMeans(n_clusters=n_clusters, random_state=1).fit(X)

    # Gathers predicitions
    preds = kmeans.labels_

    # Calculates adjusted rand index
    ari = metrics.adjusted_rand_score(Y, preds)

    return 1 - ari


# Number of agents and decision variables
n_agents = 10
n_variables = 1

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [1]
upper_bound = [100]

# Creates the space, optimizer and function
space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound)
optimizer = PSO()
function = Function(k_means_clustering)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)

# Runs the optimization task
opt.start(n_iterations=100)
예제 #6
0
            cost += fit(model, loss, opt, X_train[:, start:end, :],
                        Y_train[start:end])

    # Predicting samples from evaluating set
    preds = predict(model, X_val)

    # Calculates accuracy
    acc = np.mean(preds == Y_val)

    return 1 - acc


# Number of agents and decision variables
n_agents = 10
n_variables = 2

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [0, 0]
upper_bound = [1, 1]

# Creates the space, optimizer and function
space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound)
optimizer = PSO()
function = Function(lstm)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)

# Runs the optimization task
opt.start(n_iterations=100)
예제 #7
0
from opytimizer.core import Function
from opytimizer.optimizers.boolean import BPSO
from opytimizer.spaces import BooleanSpace

# Random seed for experimental consistency
np.random.seed(0)

# Number of agents and decision variables
n_agents = 5
n_variables = 5

# Parameters for the optimizer
params = {
    'c1': r.generate_binary_random_number(size=(n_variables, 1)),
    'c2': r.generate_binary_random_number(size=(n_variables, 1))
}

# Creates the space, optimizer and function
space = BooleanSpace(n_agents, n_variables)
optimizer = BPSO(params)
function = Function(
    Knapsack(values=(55, 10, 47, 5, 4),
             weights=(95, 4, 60, 32, 23),
             max_capacity=100))

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function, save_agents=False)

# Runs the optimization task
opt.start(n_iterations=1000)
예제 #8
0
    # Creates a cross-validation holder
    k_fold = KFold(n_splits=5)

    # Fitting model using cross-validation
    scores = cross_val_score(svc, X, Y, cv=k_fold, n_jobs=-1)

    # Calculates scores mean
    mean_score = np.mean(scores)

    return 1 - mean_score


# Number of agents and decision variables
n_agents = 10
n_variables = 1

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [0.000001]
upper_bound = [10]

# Creates the space, optimizer and function
space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound)
optimizer = PSO()
function = Function(_svm)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)

# Runs the optimization task
opt.start(n_iterations=100)
np.random.seed(0)

# Number of agents, decision variables and dimensions
n_agents = 20
n_variables = 2
n_dimensions = 4

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [-10, -10]
upper_bound = [10, 10]


# Wraps the objective function with a spanning decorator,
# allowing values to be spanned between lower and upper bounds
@h.span_to_hyper_value(lower_bound, upper_bound)
def wrapper(x):
    z = Sphere()
    return z(x)


# Creates the space, optimizer and function
space = HyperComplexSpace(n_agents, n_variables, n_dimensions)
optimizer = PSO()
function = Function(wrapper)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function, save_agents=False)

# Runs the optimization task
opt.start(n_iterations=1000)
예제 #10
0
            cost += fit(model, loss, opt, X_train[start:end],
                        Y_train[start:end])

    # Predicting samples from evaluating set
    preds = predict(model, X_val)

    # Calculates accuracy
    acc = np.mean(preds == Y_val)

    return 1 - acc


# Number of agents and decision variables
n_agents = 10
n_variables = 3

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [0, 0, 0]
upper_bound = [1, 1, 1]

# Creates the space, optimizer and function
space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound)
optimizer = PSO()
function = Function(enhanced_neural_network)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)

# Runs the optimization task
opt.start(n_iterations=100)
예제 #11
0
            # Declares initial and ending for each batch
            start, end = k * batch_size, (k + 1) * batch_size

            # Cost will be the loss accumulated from model's fitting
            cost += fit(model, loss, opt, X[start:end], Y[start:end])

    # Calculates final cost
    final_cost = cost / num_batches

    return final_cost


# Number of agents and decision variables
n_agents = 10
n_variables = 2

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [0, 0]
upper_bound = [1, 1]

# Creates the space, optimizer and function
space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound)
optimizer = PSO()
function = Function(linear_regression)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)

# Runs the optimization task
opt.start(n_iterations=100)
예제 #12
0
                       momentum=0,
                       decay=0,
                       temperature=1,
                       dropout=dropout,
                       use_gpu=False)

    # Training an RBM
    error, _ = model.fit(train, batch_size=128, epochs=5)

    return error


# Number of agents and decision variables
n_agents = 5
n_variables = 1

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [0]
upper_bound = [1]

# Creates the space, optimizer and function
space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound)
optimizer = PSO()
function = Function(dropout_rbm)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)

# Runs the optimization task
opt.start(n_iterations=5)
예제 #13
0
파일: rnn.py 프로젝트: gugarosa/opytimizer
        metrics=[tf.metrics.SparseCategoricalAccuracy(name="accuracy")],
    )

    # Fitting the RNN
    history = rnn.fit(dataset.batches, epochs=100)

    # Gathers last iteration's accuracy
    acc = history.history["accuracy"][-1]

    return 1 - acc


# Number of agents and decision variables
n_agents = 5
n_variables = 1

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [0]
upper_bound = [1]

# Creates the space, optimizer and function
space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound)
optimizer = PSO()
function = Function(rnn)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)

# Runs the optimization task
opt.start(n_iterations=3)
예제 #14
0
import numpy as np

import opytimizer.math.random as r
from opytimizer import Opytimizer
from opytimizer.core import Function
from opytimizer.optimizers.misc.nds import NDS
from opytimizer.spaces import ParetoSpace

# Random seed for experimental consistency
np.random.seed(0)

# Defines the number of points `n` and the number of objectives `k`
n_points = 100
n_objectives = 3

# Defines the agents to be initialized within the ParetoSpace
# Note they are a multi-dimensional vector of shape [n, k],
data_points = r.generate_uniform_random_number(size=(n_points, n_objectives))

# Creates the space, optimizer and function
space = ParetoSpace(data_points)
optimizer = NDS()
function = Function(lambda x: 0)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function, save_agents=False)

# Runs the optimization task
opt.start()
예제 #15
0
            cost += fit(model, loss, opt, X_train[start:end],
                        Y_train[start:end])

    # Predicting samples from evaluating set
    preds = predict(model, X_val)

    # Calculates accuracy
    acc = np.mean(preds == Y_val)

    return 1 - acc


# Number of agents and decision variables
n_agents = 10
n_variables = 2

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [0, 0]
upper_bound = [1, 1]

# Creates the space, optimizer and function
space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound)
optimizer = PSO()
function = Function(logistic_regression)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)

# Runs the optimization task
opt.start(n_iterations=100)
예제 #16
0
        momentum=momentum,
        decay=decay,
        temperature=1,
        use_gpu=False,
    )

    # Training an RBM
    error, _ = model.fit(train, batch_size=128, epochs=5)

    return error


# Number of agents and decision variables
n_agents = 10
n_variables = 3

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [0, 0, 0]
upper_bound = [1, 1, 1]

# Creates the space, optimizer and function
space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound)
optimizer = PSO()
function = Function(rbm)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)

# Runs the optimization task
opt.start(n_iterations=10)
예제 #17
0
    # If data is labeled, one can propagate predicted labels instead of only the cluster identifiers
    opf.propagate_labels()

    # Predicts new data
    preds, _ = opf.predict(X_test)

    # Calculates accuracy
    acc = g.opf_accuracy(Y_test, preds)

    return 1 - acc


# Number of agents and decision variables
n_agents = 5
n_variables = 1

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [1]
upper_bound = [15]

# Creates the space, optimizer and function
space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound)
optimizer = PSO()
function = Function(unsupervised_opf_clustering)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)

# Runs the optimization task
opt.start(n_iterations=3)
예제 #18
0
            cost += fit(model, loss, opt, X_train[start:end],
                        Y_train[start:end])

    # Predicting samples from evaluating set
    preds = predict(model, X_val)

    # Calculates accuracy
    acc = np.mean(preds == Y_val)

    return 1 - acc


# Number of agents and decision variables
n_agents = 10
n_variables = 2

# Lower and upper bounds (has to be the same size as `n_variables`)
lower_bound = [0, 0]
upper_bound = [1, 1]

# Creates the space, optimizer and function
space = SearchSpace(n_agents, n_variables, lower_bound, upper_bound)
optimizer = PSO()
function = Function(neural_network)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)

# Runs the optimization task
opt.start(n_iterations=100)
예제 #19
0
    # Predicts new data
    preds = opf.predict(X_val_selected)

    # Calculates accuracy
    acc = g.opf_accuracy(Y_val, preds)

    return 1 - acc


# Number of agents and decision variables
n_agents = 5
n_variables = 64

# Parameters for the optimizer
params = {
    'c1': r.generate_binary_random_number(size=(n_variables, 1)),
    'c2': r.generate_binary_random_number(size=(n_variables, 1))
}

# Creates the space, optimizer and function
space = BooleanSpace(n_agents, n_variables)
optimizer = BPSO()
function = Function(supervised_opf_feature_selection)

# Bundles every piece into Opytimizer class
opt = Opytimizer(space, optimizer, function)

# Runs the optimization task
opt.start(n_iterations=3)
예제 #20
0
from opytimizer.core import Function


# Defines a function with a single input and a float-based return
def test_function(z):
    return z + 2


# Declares `x`
x = 0

# Any type of internal python-coded function
# can be used as a pointer
f = Function(test_function)

# Prints out some properties
print(f"x: {x}")
print(f"f(x): {f(x)}")