def experiment1():
  l = CMAES(fitnessFunction, myNetwork.params)
  l.minimize = True
  l.verbose = True
  l.maxLearningSteps = 500
  params, fitness = l.learn()
  myNetwork._setParameters(params)
  logNet()
def markovSourceDenoise(transition_prob, deletion_rate, weighted=False):
    global delta
    delta = transition_prob
    global rho
    rho = deletion_rate
    global max_del
    max_del = 2
    global n
    n = 10000
    display = 50
    global alphabet
    alphabet = ['+', '-']
    global orig
    orig = generateSequence(n, delta)
    global noisy
    noisy = deletionChannel(orig, rho)
    est1 = denoiseSequence2(noisy, k, alphabet, rho, max_del)
    #est2 = denoiseSequence3(noisy, k, alphabet, rho)
    est4 = denoiseSequence4(noisy, k, alphabet, rho)
    est = optimalDenoise(noisy, k, alphabet, rho, delta)
    print 'Setting: delta = ', delta, ', rho = ', rho
    print 'Original: ', orig[:display], '(length ', len(
        orig), ' error ', error(orig, orig) / (n + 0.0), levenshtein(
            orig, orig) / (n + 0.0), ')'
    print 'Noisy: ', noisy[:display], '(length ', len(noisy), ' error ', error(
        noisy, orig) / (n + 0.0), levenshtein(noisy, orig) / (n + 0.0), ')'
    print 'Optimal Denoised: ', est[:display], '(length ', len(
        est), ' error ', error(est, orig) / (n + 0.0), levenshtein(
            est, orig) / (n + 0.0), ' )'
    print 'Denoiser 1: ', est1[:display], '(length ', len(
        est1), ' error ', error(est1, orig) / (n + 0.0), levenshtein(
            est1, orig) / (n + 0.0), ')'
    print 'Denoiser 4: ', est4[:display], '(length ', len(
        est4), ' error ', error(est4, orig) / (n + 0.0), levenshtein(
            est4, orig) / (n + 0.0), ')'
    #print 'Denoiser 2: ', est2[:display], '(length ', len(est2), ' error ', error(est2, orig)/(n+0.0), ')'
    if weighted:
        l1 = CMAES(weightWrapper1, [1])
        l1.minimize = True
        l1.maxEvaluations = 200
        opt_vals1 = l1.learn()
        weights1 = opt_vals1[0]
        err1 = opt_vals1[1]
        #l2 = CMAES(weightWrapper2, [1]*k)
        #l2.minimize = True
        #l2.maxEvaluations = 200
        #opt_vals2 = l2.learn()
        #weights2 = opt_vals2[0]
        #err2 = opt_vals2[1]
        print(weights1)
        weighted_est1 = denoiseSequence2(noisy, k, alphabet, rho, max_del,
                                         weights1)
        #weighted_est2 = denoiseSequence3(noisy, k, alphabet, rho, k, weights2)
        print 'Weighted Denoiser 1: ', weighted_est1[:display], '(length ', len(
            weighted_est1), ' error ', err1, ')'
        #print 'Weighted Denoiser 2: ', weighted_est2[:display], '(length ', len(weighted_est2), ' error ', err2, ')'
    print '\n' * 5
 def trainOne(index):
     def opt_func(w):
         return (np.sum((np.dot(x_train, w)
                         - y_train2[:, index]) ** 2)
                 - 1000 * np.sum(np.sign(w) - 1))
     l = CMAES(opt_func, np.random.randn(np.shape(x_train)[1]))
     l.minimize = True
     opt_w = l.learn()[0]
     return opt_w
Beispiel #4
0
 def trainOne(index):
     def opt_func(w):
         return (np.sum((np.dot(x_train, w)
                         - y_train2[:, index]) ** 2)
                 - 1000 * np.sum(np.sign(w) - 1))
     l = CMAES(opt_func, np.random.randn(np.shape(x_train)[1]))
     l.minimize = True
     opt_w = l.learn()[0]
     return opt_w
  def experiment1(self):
    l = CMAES(self.fitnessFunction, self.myNetwork.params[self.indices])
    l.minimize = True
    l.verbose = True
    l.maxLearningSteps = 500
    params, fitness = l.learn()
    self.myNetwork.params[self.indices] = params
    self.metaInfo["numsteps"] = l.maxLearningSteps
    self.metaInfo["fitness"] = fitness
#     self.myNetwork._setParameters(self.originalWeights)
    self.logNet()
Beispiel #6
0
def trainNetwork(data, n_classes, buildNet, file, seed, max_evaluations, num_samples):
    # The training functions uses the average of the cumulated reward and maximum height as fitness
    X_train = data["X_train"]
    y_train = data["y_train"]


    def objF(params):
        nn = buildNet(X_train.shape[1], n_classes)
        nn._setParameters(np.array(params))

        random_state = np.random.get_state()
        np.random.seed(l.numLearningSteps)
        sampled_data = np.random.choice(len(X_train), num_samples, replace=False)
        np.random.set_state(random_state)
        cur_data = X_train[sampled_data]
        cur_label = y_train[sampled_data]

        cum_correct = 0

        for example, cor in zip(cur_data, cur_label):
            result = nn.activate(example)
            loss_sum = 0
            for q, out in enumerate(result):
                if q != cor:
                    loss_sum += max(0, out - result[int(cor)] + 1)
            # guess = np.argmax(result)
            #if guess == cor:
                #cum_correct += 1
            cum_correct += loss_sum
            nn.reset()

        return cum_correct

    # Build net for initial random params
    n = buildNet(X_train.shape[1], n_classes)
    learned = n.params

    testNetwork(data, n_classes, learned, buildNet, 0, file, seed)

    l = CMAES(objF, learned, verbose=False)
    batch_size = l.batchSize
    # l._setBatchSize = batch_size
    l.maxEvaluations = max_evaluations
    l.minimize = True

    for i in xrange((max_evaluations/batch_size)):
        result = l.learn(additionalLearningSteps=1)
        learned = result[0]

        testNetwork(data, n_classes, learned, buildNet, num_samples * (i + 1) * batch_size, file, seed)

    return learned
    def __init__(self, x_learn, y_learn):
        _, y = nclass_to_nbinary(y_learn)
        # x = np.swapaxes(x_learn, 1, 2)
        w = np.random.randn(np.shape(x)[2])
        print "x size", x.shape
        print "y size", y.shape

        func = lambda w: np.sum((np.dot(x, w) - y) ** 2) \
                + 10000 * np.sum(np.float32(w < 0)) \
                + np.sum(w ** 2)

        self.weights = np.random.randn(np.shape(x)[2])
        print "weights size", self.weights.shape
        optimizer = CMAES(func, self.weights)
        optimizer.minimize = True
        self.weights = optimizer.learn()[0]
Beispiel #8
0
 def optimize_pose(self, marker_id, save_transform=True):
     """
     Find optimized transform for marker relative to master tag
     :param marker_id: id for marker
     :param save_transform: bool, update optimized marker pose dict or not
     :return optimized pose
     """
     x0 = np.zeros(7)
     self.optimize_id = marker_id
     l = CMAES(self.objF, x0)
     l.minimize = True
     l.maxEvaluations = 1000
     pose = l.learn()
     print marker_id, pose  # DEBUG
     if save_transform:
         self.optimized_marker_poses[marker_id] = pose[0]
     return pose[0]
__author__ = 'estsauver'
from numpy import array
import pylab
def objF(x): return sum(x**2)

x0 = array([2.1, -1])

from pybrain.optimization import CMAES

l=CMAES(objF,x0)
l.minimize = True
l.maxEvaluations = 200
print l.learn()