def experiment1():
  l = CMAES(fitnessFunction, myNetwork.params)
  l.minimize = True
  l.verbose = True
  l.maxLearningSteps = 500
  params, fitness = l.learn()
  myNetwork._setParameters(params)
  logNet()
def markovSourceDenoise(transition_prob, deletion_rate, weighted=False):
    global delta
    delta = transition_prob
    global rho
    rho = deletion_rate
    global max_del
    max_del = 2
    global n
    n = 10000
    display = 50
    global alphabet
    alphabet = ['+', '-']
    global orig
    orig = generateSequence(n, delta)
    global noisy
    noisy = deletionChannel(orig, rho)
    est1 = denoiseSequence2(noisy, k, alphabet, rho, max_del)
    #est2 = denoiseSequence3(noisy, k, alphabet, rho)
    est4 = denoiseSequence4(noisy, k, alphabet, rho)
    est = optimalDenoise(noisy, k, alphabet, rho, delta)
    print 'Setting: delta = ', delta, ', rho = ', rho
    print 'Original: ', orig[:display], '(length ', len(
        orig), ' error ', error(orig, orig) / (n + 0.0), levenshtein(
            orig, orig) / (n + 0.0), ')'
    print 'Noisy: ', noisy[:display], '(length ', len(noisy), ' error ', error(
        noisy, orig) / (n + 0.0), levenshtein(noisy, orig) / (n + 0.0), ')'
    print 'Optimal Denoised: ', est[:display], '(length ', len(
        est), ' error ', error(est, orig) / (n + 0.0), levenshtein(
            est, orig) / (n + 0.0), ' )'
    print 'Denoiser 1: ', est1[:display], '(length ', len(
        est1), ' error ', error(est1, orig) / (n + 0.0), levenshtein(
            est1, orig) / (n + 0.0), ')'
    print 'Denoiser 4: ', est4[:display], '(length ', len(
        est4), ' error ', error(est4, orig) / (n + 0.0), levenshtein(
            est4, orig) / (n + 0.0), ')'
    #print 'Denoiser 2: ', est2[:display], '(length ', len(est2), ' error ', error(est2, orig)/(n+0.0), ')'
    if weighted:
        l1 = CMAES(weightWrapper1, [1])
        l1.minimize = True
        l1.maxEvaluations = 200
        opt_vals1 = l1.learn()
        weights1 = opt_vals1[0]
        err1 = opt_vals1[1]
        #l2 = CMAES(weightWrapper2, [1]*k)
        #l2.minimize = True
        #l2.maxEvaluations = 200
        #opt_vals2 = l2.learn()
        #weights2 = opt_vals2[0]
        #err2 = opt_vals2[1]
        print(weights1)
        weighted_est1 = denoiseSequence2(noisy, k, alphabet, rho, max_del,
                                         weights1)
        #weighted_est2 = denoiseSequence3(noisy, k, alphabet, rho, k, weights2)
        print 'Weighted Denoiser 1: ', weighted_est1[:display], '(length ', len(
            weighted_est1), ' error ', err1, ')'
        #print 'Weighted Denoiser 2: ', weighted_est2[:display], '(length ', len(weighted_est2), ' error ', err2, ')'
    print '\n' * 5
 def trainOne(index):
     def opt_func(w):
         return (np.sum((np.dot(x_train, w)
                         - y_train2[:, index]) ** 2)
                 - 1000 * np.sum(np.sign(w) - 1))
     l = CMAES(opt_func, np.random.randn(np.shape(x_train)[1]))
     l.minimize = True
     opt_w = l.learn()[0]
     return opt_w
Beispiel #4
0
 def trainOne(index):
     def opt_func(w):
         return (np.sum((np.dot(x_train, w)
                         - y_train2[:, index]) ** 2)
                 - 1000 * np.sum(np.sign(w) - 1))
     l = CMAES(opt_func, np.random.randn(np.shape(x_train)[1]))
     l.minimize = True
     opt_w = l.learn()[0]
     return opt_w
  def experiment1(self):
    l = CMAES(self.fitnessFunction, self.myNetwork.params[self.indices])
    l.minimize = True
    l.verbose = True
    l.maxLearningSteps = 500
    params, fitness = l.learn()
    self.myNetwork.params[self.indices] = params
    self.metaInfo["numsteps"] = l.maxLearningSteps
    self.metaInfo["fitness"] = fitness
#     self.myNetwork._setParameters(self.originalWeights)
    self.logNet()
Beispiel #6
0
def trainNetwork(data, n_classes, buildNet, file, seed, max_evaluations, num_samples):
    # The training functions uses the average of the cumulated reward and maximum height as fitness
    X_train = data["X_train"]
    y_train = data["y_train"]


    def objF(params):
        nn = buildNet(X_train.shape[1], n_classes)
        nn._setParameters(np.array(params))

        random_state = np.random.get_state()
        np.random.seed(l.numLearningSteps)
        sampled_data = np.random.choice(len(X_train), num_samples, replace=False)
        np.random.set_state(random_state)
        cur_data = X_train[sampled_data]
        cur_label = y_train[sampled_data]

        cum_correct = 0

        for example, cor in zip(cur_data, cur_label):
            result = nn.activate(example)
            loss_sum = 0
            for q, out in enumerate(result):
                if q != cor:
                    loss_sum += max(0, out - result[int(cor)] + 1)
            # guess = np.argmax(result)
            #if guess == cor:
                #cum_correct += 1
            cum_correct += loss_sum
            nn.reset()

        return cum_correct

    # Build net for initial random params
    n = buildNet(X_train.shape[1], n_classes)
    learned = n.params

    testNetwork(data, n_classes, learned, buildNet, 0, file, seed)

    l = CMAES(objF, learned, verbose=False)
    batch_size = l.batchSize
    # l._setBatchSize = batch_size
    l.maxEvaluations = max_evaluations
    l.minimize = True

    for i in xrange((max_evaluations/batch_size)):
        result = l.learn(additionalLearningSteps=1)
        learned = result[0]

        testNetwork(data, n_classes, learned, buildNet, num_samples * (i + 1) * batch_size, file, seed)

    return learned
    def __init__(self, x_learn, y_learn):
        _, y = nclass_to_nbinary(y_learn)
        # x = np.swapaxes(x_learn, 1, 2)
        w = np.random.randn(np.shape(x)[2])
        print "x size", x.shape
        print "y size", y.shape

        func = lambda w: np.sum((np.dot(x, w) - y) ** 2) \
                + 10000 * np.sum(np.float32(w < 0)) \
                + np.sum(w ** 2)

        self.weights = np.random.randn(np.shape(x)[2])
        print "weights size", self.weights.shape
        optimizer = CMAES(func, self.weights)
        optimizer.minimize = True
        self.weights = optimizer.learn()[0]
Beispiel #8
0
 def optimize_pose(self, marker_id, save_transform=True):
     """
     Find optimized transform for marker relative to master tag
     :param marker_id: id for marker
     :param save_transform: bool, update optimized marker pose dict or not
     :return optimized pose
     """
     x0 = np.zeros(7)
     self.optimize_id = marker_id
     l = CMAES(self.objF, x0)
     l.minimize = True
     l.maxEvaluations = 1000
     pose = l.learn()
     print marker_id, pose  # DEBUG
     if save_transform:
         self.optimized_marker_poses[marker_id] = pose[0]
     return pose[0]
from pybrain.optimization import CMAES
from numpy import array

def objF(x):
    return sum(x ** 2)

x0 = array([2.1, -1])
l = CMAES(objF, x0, minimize=True)
l.maxEvaluations=200
print l.learn()
Beispiel #10
0
		fop.write(str(NofFuncCalls)+ " Chi="+ str(F) + " ")
		fop.write(str(int(round(x[0])))+" ")
		fop.write(str(int(round(x[1])))+" ")
		fop.write(str(round(x[2],2))+" ")
		fop.write(str(round(x[3],2))+" ")
		fop.write(str(round(x[4],2))+" ")
		fop.write(str(round(x[5],2))+" ")
		fop.write(str(round(x[6],2))+" ")
		fop.write(str(round(x[7],2))+" ")
		fop.write(str(round(x[8],2))+" ")
		fop.write(str(round(x[9],2))+" ")
		fop.write(str(round(x[10],2))+"\n")
		fop.close()
		print NofFuncCalls,F.strip("\n"),round(x[0]),round(x[1]),round(x[2],2),round(x[3],2),round(x[4],2),round(x[5],2),round(x[6],2),round(x[7],2),round(x[8],2),round(x[9],2),round(x[10],2)
	return float(F)


x0 = array([10, 5, 2, 4, -10.0, 30.0, -3.0, 30.0, 0.46, 0.55, 0.3])
l = CMAES(objF, x0, minimize = True)
l.maxEvaluations = 50
result = l.learn()

fop = open("curres",'a')
output = str(round(result[1],2))+" "+ str(int(round(result[0][0]))) + " " + str(int(round(result[0][1])))+ " " + str(round(result[0][2],2))
output += " " + str(round(result[0][3],2)) + " " + str(round(result[0][4],2)) + " " + str(round(result[0][5],2))
output += " " + str(round(result[0][6],2)) + " " + str(round(result[0][7],2)) + " " + str(round(result[0][8],2))
output += " " + str(round(result[0][9],2)) + " " + str(round(result[0][10],2))
fop.write(output)
fop.close()

Beispiel #11
0
def learn(obj_fun, inital_weights):
    l = CMAES(obj_fun, inital_weights, minimize=True, verbose=True)
    res = l.learn()
    log = logging.getLogger("CMAES")
    log.debug("result is: {}".format(res))
    return res[0]
def learn(obj_fun, init_values):
    l = CMAES(obj_fun, init_values, minimize=True, verbose=True)
    res = l.learn()
    return res[0]
		for row in reader:
			ds.addSample(convert(row[0]),convert(row[1]))

	#testds, trainds = ds.splitWithProportion(0.2)

	net = buildNetwork(20, 20, 20)
	#trainer = BackpropTrainer(net, dataset=trainds, learningrate=learning_rate)
	trainer = BackpropTrainer(net, dataset=ds, learningrate=learning_rate)
	#trainer.train()
	#trainer.trainEpochs(5)
	trainer.trainUntilConvergence()

	score = 0
	for x, y in testds:
		predict = unconvert(net.activate(x))
		score += damerau_levenshtein_distance(predict,unconvert(y))

	global lastNet
	lastNet = net

	global netNum
	netNum += 1

	print "Network " + str(netNum) + " done with score " + str(score)
	
	return score

x0 = [0.01]
optimizer = CMAES(makeNet, x0, minimize=True, maxLearningSteps=10)
print optimizer.learn()
Beispiel #14
0
		tri_1.append(module.rec_number(main,1))
		tri_2.append(module.rec_number(main,2))
		count = count + 1
		print count

	return pow((mean(tri_0) - tri_count[0]),2) + pow((mean(tri_1) - tri_count[1]),2) + pow((mean(tri_2) - tri_count[2]),2)

def objF(p) : return graph_function(p)

p0 = [0.333,0.3333,0.3333,0,0,0,0]
#p0 = [1/7,1/7,1/7,1/7,1/7,1/7,1/7]


l = CMAES(objF, p0)
l.verbose = True
l.minimize = True
l._notify()
l.desiredEvaluation = 3



g = l.learn()
if(g[0][0]<0): g[0][0] = g[0][0]*(-1)
if(g[0][1]<0): g[0][1] = g[0][1]*(-1)
if(g[0][2]<0): g[0][2] = g[0][2]*(-1)
summ = g[0][0] + g[0][1] + g[0][2]
print g[0][0]/summ, g[0][1]/summ, g[0][2]/summ
print g[1]
end_time = time.time()
print "The optimization took ", end_time - start_time, " seconds"