示例#1
0
    def dim_weights(shape):
        dim = 0
        for i in range(len(shape)-1):
            dim = dim + (shape[i] + 1) * shape[i+1]
        return dim
	
	def weights_to_vector(weights):
        w = np.asarray([])
        for i in range(len(weights)):
            v = weights[i].flatten()
            w = np.append(w, v)
        return w

    def vector_to_weights(vector, shape):
        weights = []
        idx = 0
        for i in range(len(shape)-1):
            r = shape[i] + 1
            c = shape[i+1]
            idx_min = idx
            idx_max = idx + r*c
            W = vector[idx_min:idx_max].reshape(r,c)
            weights.append(W)
        return weights
	
    def eval_network(weights,shape,X,y):
        mse=np.asarray([])
        for w in weights:
            weights = vector_to_weights(w,shape)
        l0=X
        l1=Math.sigmoid(np.dot(l0,weights[0]))
        m=np.ones((l1.shape[0],1))
        l1=np.c_[m,l1]
        l2=Math.relu(np.dot(weights[1].T,l1.T))
        l2=l2.T
        mse = np.append(mse, sklearn.metrics.mean_squared_error(y, l2))
        return mse
    # Load and prepare data
    date, latitude, longitude, magnitude = Dataset.load_from_file("database_original.csv")
    data_size = len(date)
    vectorsX, vectorsY = Dataset.vectorize(date, latitude, longitude), magnitude.reshape((data_size, 1))

    # Split vectors into train / eval sets
    eval_set_size = int(0.1 * data_size)
    index = np.arange(data_size)
    np.random.shuffle(index)
    trainX, trainY = vectorsX[index[eval_set_size:]], vectorsY[index[eval_set_size:]]
    evalX, evalY = vectorsX[index[:eval_set_size]], vectorsY[index[:eval_set_size]]

        # Hyperparameters
    batch_size = 128 
    
    max_epochs = 100
        #defining shape of neural net
    shape = (4,32,1)

    # feed forward
    X=trainX
    v=np.ones((X.shape[0],1))
    X=np.c_[v,X]
    cost_fn = functools.partial(eval_network, shape=shape, X=X, y=trainY)
    swarm = pso.ParticleSwarm(cost_fn, dim=dim_weights(shape), size=50)
            
    # Train...
    i = 0
    best_scores = [swarm.best_score]
    print ("Before updation: ",best_scores[-1]) #printing the last element of the list
    while swarm.best_score > 1e-6 and i < 500:
        swarm.update()
        i = i+1
        #print(swarm.best_score)
        if swarm.best_score < best_scores[-1]:
            best_scores.append( swarm.best_score )
            print ("Updating: ",best_scores[-1]) #printing the last element of the list
    print (best_scores)
	#finding best set of weights
	best_weights = vector_to_weights(swarm.g, shape)
示例#2
0
y_true = np.array(y_train)
y_test_true = np.array(y_test)

#model_s = vanilla_backpropagation(X_train,y_train)
model_s = build_model(LOSS)
shapes =[]
# for i, layer in enumerate(model_s.get_weights()):
#     #shapes.append(layer.shape[1])
#     print(layer.shape)
# Set up
num_classes=2
shape = (num_inputs,25, 50, num_classes)
print(shape)
cost_func = functools.partial(eval_neural_network, shape=shape, X=X_train, y=y_train, model=model_s)

swarm = pso.ParticleSwarm(cost_func, num_dimensions=dim_weights(shape), num_particles=50)

# Train...
i = 0
best_scores = [(i, swarm.best_score)]
print_best_particle(best_scores[-1])
f=open("losskeras.txt","a")
scores = []
while swarm.best_score>1e-6 and i<750:
    swarm._update()
    i = i+1
    scores.append(swarm.best_score)
    if swarm.best_score < best_scores[-1][1]:
        best_scores.append((i, swarm.best_score))
        print_best_particle(best_scores[-1])
f.write("\n------------\n")
示例#3
0
    for i in xrange(len(y)):
        y_true[i, y[i]] = 1

    y_test_true = np.zeros((len(y_test), num_classes))
    for i in xrange(len(y_test)):
        y_test_true[i, y_test[i]] = 1

    # Set up
    shape = (num_inputs, 64, 32, num_classes)

    cost_func = functools.partial(eval_neural_network,
                                  shape=shape,
                                  X=X,
                                  y=y_true.T)

    swarm = pso.ParticleSwarm(cost_func, dim=dim_weights(shape), size=30)

    # Train...
    i = 0
    best_scores = [(i, swarm.best_score)]
    print best_scores[-1]
    while swarm.best_score > 1e-6 and i < 500:
        #while i < 1:
        swarm.update()
        i = i + 1
        if swarm.best_score < best_scores[-1][1]:
            best_scores.append((i, swarm.best_score))
            print best_scores[-1]

    # Test...
    best_weights = vector_to_weights(swarm.g, shape)
示例#4
0
y_true = np.zeros((len(y), num_classes))
for i in range(len(y)):
    y_true[i, y[i]] = 1

y_test_true = np.zeros((len(y_test), num_classes))
for i in range(len(y_test)):
    y_test_true[i, y_test[i]] = 1

# Set up
accuracies = []
shape = (num_inputs, 64, 32, num_classes)

obj_func = functools.partial(eval_neural_network, shape=shape, X=X, y=y_true.T)

swarm = pso.ParticleSwarm(obj_func,
                          num_dimensions=dim_weights(shape),
                          num_particles=30)

# Train...
i = 0
best_scores = [(i, swarm.best_score)]
print_best_particle(best_scores[-1])
while i < 500:
    swarm._update()
    i = i + 1
    if swarm.best_score < best_scores[-1][1]:
        corrects, wrongs, predictions = eval_accuracy(
            vector_to_weights(swarm.Gbest, shape), shape, X_test, y_test)
        accuracy = corrects / (corrects + wrongs)
        best_scores.append((i, swarm.best_score))
        print_best_particle(best_scores[-1])
示例#5
0
def test_complex_optimization():
    swarm = pso.ParticleSwarm(cost_func=schaffer6, dim=2, size=20)

    best = swarm.optimize(epsilon=1e-6, max_iter=100000)
    assert schaffer6(best) < 1e-6
示例#6
0
def test_simple_optimization():
    swarm = pso.ParticleSwarm(cost_func=evaluate_simple, dim=2, size=20)

    best = swarm.optimize(epsilon=1e-6, max_iter=100000)
    assert evaluate_simple(best) < 1e-6