Esempio n. 1
0
def substract_mean_pixel_value(train_data):
    shape = np.shape(train_data)
    if True:
        train_data -= np.mean(train_data[0:shape[0]], axis=(2, 3))
    else:
        train_data -= np.mean(train_data[0:shape[0]], axis=(1, 2))
    return train_data
Esempio n. 2
0
def initialize_network(num_inputs, num_hidden_layers, num_nodes_hidden, num_nodes_output):
    
    num_nodes_previous = num_inputs # number of nodes in the previous layer

    network = {}
    
    # loop through each layer and randomly initialize the weights and biases associated with each layer
    for layer in range(num_hidden_layers + 1):
        
        if layer == num_hidden_layers:
            layer_name = 'output' # name last layer in the network output
            num_nodes = num_nodes_output
        else:
            layer_name = 'layer_{}'.format(layer + 1) # otherwise give the layer a number
            num_nodes = num_nodes_hidden[layer] 
        
        # initialize weights and bias for each node
        network[layer_name] = {}
        for node in range(num_nodes):
            node_name = 'node_{}'.format(node+1)
            network[layer_name][node_name] = {
                'weights': np.around(np.random.uniform(size=num_nodes_previous), decimals=2),
                'bias': np.around(np.random.uniform(size=1), decimals=2),
            }
    
        num_nodes_previous = num_nodes

    return network # return the network
    
    def compute_weighted_sum(inputs, weights, bias):
    return np.sum(inputs * weights) + bias
    
    def node_activation(weighted_sum):
    return 1.0 / (1.0 + np.exp(-1 * weighted_sum))
    
    def forward_propagate(network, inputs):
    
    layer_inputs = list(inputs) # start with the input layer as the input to the first hidden layer
    
    for layer in network:
        
        layer_data = network[layer]
        
        layer_outputs = [] 
        for layer_node in layer_data:
        
            node_data = layer_data[layer_node]
        
            # compute the weighted sum and the output of each node at the same time 
            node_output = node_activation(compute_weighted_sum(layer_inputs, node_data['weights'], node_data['bias']))
            layer_outputs.append(np.around(node_output[0], decimals=4))
            
        if layer != 'output':
            print('The outputs of the nodes in hidden layer number {} is {}'.format(layer.split('_')[1], layer_outputs))
    
        layer_inputs = layer_outputs # set the output of this layer to be the input to next layer

    network_predictions = layer_outputs
    return network_predictions
Esempio n. 3
0
def OR(x1, x2):
    x = np.array([x1, x2])
    w = np.array([0.5, 0.5])
    tmp = np.sum(w * x)
    if tmp <= 0:
        return 0
    else:
        return 1
Esempio n. 4
0
def NAND(x1, x2):
    x = np.array([x1, x2])
    w = np.array([-0.5, -0.5])
    tmp = np.sum(w * x)
    if tmp <= 0:
        return 0
    else:
        return 1
Esempio n. 5
0
def GenerateDictionary():
    global collection_words
    ###词干提取
    porter_stem_tokens = Numpy.word_stem(collection_words)
    ###词性归并
    wordnet_lematizer_tokens = Numpy.word_lematizer(porter_stem_tokens)

    collection_words = delete_re_word(wordnet_lematizer_tokens)
Esempio n. 6
0
def GenerateKeywords(filename):

       sentence=re.sub(pattern,'',filename)
       ###分词
       tokens = Numpy.divide_word(sentence)
       ##计算出分词后词语总数
       count = len(tokens)
       ###词干提取
       porter_stem_tokens = Numpy.word_stem(tokens)
       ###词性归并
       wordnet_lematizer_tokens = Numpy.word_lematizer(porter_stem_tokens)
       ###去掉停用词
       stop_words_tokens = Numpy.delete_stopwords(wordnet_lematizer_tokens)

       return ([stop_words_tokens, filename])
Esempio n. 7
0
    def node_activation(weighted_sum):
    return 1.0 / (1.0 + np.exp(-1 * weighted_sum))
    
    def forward_propagate(network, inputs):
    
    layer_inputs = list(inputs) # start with the input layer as the input to the first hidden layer
    
    for layer in network:
        
        layer_data = network[layer]
        
        layer_outputs = [] 
        for layer_node in layer_data:
        
            node_data = layer_data[layer_node]
        
            # compute the weighted sum and the output of each node at the same time 
            node_output = node_activation(compute_weighted_sum(layer_inputs, node_data['weights'], node_data['bias']))
            layer_outputs.append(np.around(node_output[0], decimals=4))
            
        if layer != 'output':
            print('The outputs of the nodes in hidden layer number {} is {}'.format(layer.split('_')[1], layer_outputs))
    
        layer_inputs = layer_outputs # set the output of this layer to be the input to next layer

    network_predictions = layer_outputs
    return network_predictions
Esempio n. 8
0
    def check(self, img):
        """
        Vérifie si le problème corrigé par le filtre est présent sur l'image d'entrée

        img : Un tableau Numpy RGB (576, 720, 3) de l'image
        """
        img = np.interp(img, (0, 1), (0, 255))
        return (False,
                True)[self.blur.check_blur(img, True) <= self.blur.max_limit]
Esempio n. 9
0
def GenerateKeywords(filename):
    f = open('VOA2/' + filename, 'r',encoding='utf-8')
    print(filename)
    sentence=f.read()
    f.close()
    print(sentence)
    sentence = re.sub(pattern, '', sentence)
    ###分词
    tokens = Numpy.divide_word(sentence)

    ###词干提取
    porter_stem_tokens = Numpy.word_stem(tokens)
    ###词性归并
    wordnet_lematizer_tokens = Numpy.word_lematizer(porter_stem_tokens)
    ###去掉停用词
    stop_words_tokens = Numpy.delete_stopwords(wordnet_lematizer_tokens)
    # print(stop_words_tokens)
    f=open('VOA3/'+filename,'w',encoding='utf-8')
    f.write(json.dumps(stop_words_tokens))
    f.close()
Esempio n. 10
0
def grafica2(vX):
    # a,b son arreglos de 50 números en el rango (-2,2)
    a = np.linspace(-10, 10, RANGO)
    b = np.linspace(-10, 10, RANGO)
    #x=np.zeros((2, RANGO))

    x, y = np.meshgrid(a, b)
    # vectorize Permite que la función f(x,y) reciba como parámetros los vectores x,y
    Z = np.vectorize(f2)
    ax = plt.axes(projection='3d')
    # Dibuja la gráfica de f(x,y)
    ax.contour3D(x, y, Z(x, y), 30, cmap='binary')

    ax.set_xlabel('x')
    ax.set_ylabel('y')
    ax.set_zlabel('z')

    for i in range(Pf):
        # Dibuja la población en la gráfica
        ax.scatter3D(vX[0, i], vX[1, i], f2(vX[0, i], vX[1, i]))
    plt.show()
Esempio n. 11
0
def EtapaAbejasObservadorasMaestras(num):
    m = mejorPosicion(num)
    for i in range(Po):
        while(True):
            k = random.randint(0, Pf-1)  # Número aleatorio  tal que i!=k
            if k != m:
                break
        vm = np.array([0, 0])
        for j in range(D-1):
            r = random.uniform(0, 1)  # random [0,1]
            vm[j] = X[j, m] + r*(X[j, m]-X[j, k])  # vmj = xmj + r*(xmj - xkj)
        if f(num, vm[0], vm[1]) < f(num, X[0, m], X[1, m]):  # si f(vm) < f(xm)
            X[:, m] = vm  # xm = vm
            li[m] = 0  # lm = 0
        else:
            li[m] += 1  # lm = lm +1
Esempio n. 12
0
def EtapaAbejasEmpleadasAlumnas(num):
    for i in range(Pf):
        while(True):
            k = random.randint(0, Pf-1)  # Número aleatorio  tal que i!=k
            if k != i:
                break
        vi = np.array([0, 0])
        if f(num, X[0, i], X[1, i]) < f(num, X[0, k], X[1, k]):  # Si f(xi) < f(xk)
            for j in range(D-1):
                r = random.uniform(0, 1)  # random [0,1]
                vi[j] = X[j, i]+r*(X[j, i]-X[j, k])  # cij = xij + r*(xij-xkj)
        else:
            for j in range(D-1):
                r = random.uniform(0, 1)  # random [0,1]
                vi[j] = X[j, i]+r*(X[j, k]-X[j, i])  # cij = xij + r*(xkj-xij)
        if f(num, vi[0], vi[1]) < f(num, X[0, i], X[1, i]):  # si f(vi) < f(xi)
            X[:, i] = vi  # xi = vi
            li[i] = 0  # li =0
        else:
            li[i] += 1  # li = li+1
#>>> N[a][b]		#Neighborhood membership
#1
#>>> sum(N[f])		#Degree
#3


#2-6 对不存在的边赋予无限大的权值的加权矩阵
a, b, c, d, e, f, g, h = range(8)
inf = float('inf')		#sys.maxint
#		a    b    c    d    e    f    g    h
W = [[  0,   2,   1,   3,   9,   4, inf, inf],	#a
	 [inf,   0,   4, inf,   3, inf, inf, inf],	#b
	 [inf, inf,   0,   8, inf, inf, inf, inf],	#c
	 [inf, inf, inf,   0,   7, inf, inf, inf],	#d
	 [inf, inf, inf, inf,   0,   5, inf, inf],	#e
	 [inf, inf,   2, inf, inf,   0,   2,   2],	#f
	 [inf, inf, inf, inf, inf,   1,   0,   6],	#g
	 [inf, inf, inf, inf, inf,   9,   8,   0]]	#h
#>>> W[a][b] < inf		#Neighborhood membership
#True
#>>> W[c][e] < inf		#Neighborhood membership
#False
#>>> sum(1 for w in W[a] if w < inf) - 1	#Degree
#5
#这里记得-1,因为不算对角线!


#N = [[0]*10 for i in range(10)]
import Numpy as np
N = np.zeros([10, 10])
Esempio n. 14
0
# Résolution de l'équation de la chaleur par méthode Monte-Carlo 
# sur une barre "finie"

from math import pi
import Numpy as np
from matplotlib import pyplot as plt

# Données du problème

a = 0
b = 1
thetaA = 100
thetaB = 500
sigma = 0.01

xx = np.linspace(start=-a,stop=b,num=101)
tempinf = thetaA + (thetaB - thetaA)*(xx+a)/(b+a)

plt.figure(facecolor='w')
plt.plot(xx, tempinf, label="Temps inf")
plt.xlabel("Abscisse x de la barre (m)")
plt.ylabel("Température (K)")
plt.title("Diffusion de la chaleur dans une barre")

n1, n2 = 5, 8
theta1, theta2 = 50, 100
temp1 = np.sin(2*pi*n1*(xx+a)/2/(b+a))
temp2 = np.sin(2*pi*n2*(xx+a)/2/(b+a))
temp0 = tempinf + theta1*temp1 + theta2*temp2
plt.plot(xx, temp0, label="Temps 0")
 
Esempio n. 15
0
#In this Article i gone Elaborate, How to create 1,2,3D Array Manually in Numpy And HOW we Create 1,2,3D Random Number Numpy Array.

import Numpy as np

#creation of 1,2,3D Arrays Manually

#1D Array
#Example
a = np.array([1, 2, 3])
#output
array([1, 2, 3])

#2D Array(it is list of lists)
a = np.array([[1, 2, 3], [4, 5, 6]])
#output
array([[1, 2, 3], [4, 5, 6]])

#3D Array(it is list of lists)
#Example
a = np.array([[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]],
              [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]])

#output
array([[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]],
       [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]])

#Let's start how to create random Numpy Array (Three ways to get random Array in numpy)

#first way Using numpy.random.rand()
#(By this we can create Random number Array and all the numbers in between 0,1)
#Syntex
 def initialize(self):
     generation = np.zeros(self.Np)
     for index, x in enumerate(generation):
         generation[index] = PPP(self.xmin, self.xmax)
     return generation
def evaluate(glove_model, query, document, query2document, model):

    average_rank = 0

    top1 = 0

    top10 = 0

    top50 = 0

    top100 = 0

    top200 = 0

    number = 0

    q2d_ranking = {}

    true_ranking = {}

    for query_id, true_document_pmid in query2document.items():

        #query_id = random.choice(list(query.keys()))
        query_words = query[query_id]

        query_embed = getWeightMatrix(glove_model, query_words, embed_dim)

        document_candiates = {}

        for pmid, document_words in document.items():

            sim_matrix = torch.zeros(len(query_words),
                                     len(document_words),
                                     dtype=torch.float)
            document_embed = getWeightMatrix(glove_model, document_words,
                                             embed_dim)

            for i, query_word in enumerate(query_words):

                for j, document_word in enumerate(document_words, 0):
                    sim_matrix[i][j] = wordSimilarity(glove_model, query_word,
                                                      document_word)

            if torch.cuda.is_available():
                sim_matrix = sim_matrix.cuda()

            score = model(query_embed, document_embed, sim_matrix, 0.5,
                          0.5).cpu()

            score_value = score.data.item()

            document_candiates[pmid] = score_value

        sorted_documents = sorted(document_candiates.items(),
                                  key=lambda x: x[1])

        q2d_ranking[query_id] = sorted_documents

        true_index = getIndex(sorted_documents, true_document_pmid)

        true_ranking[query_id] = true_index

        print("The true document is ranked as %dth in the list" % true_index)

        if true_index <= 1:
            top1 += 1
        if true_index <= 10:
            top10 += 1
        if true_index <= 50:
            top50 += 1
        if true_index <= 100:
            top100 += 1
        if true_index <= 200:
            top200 += 1

        average_rank += true_index[0]

        number += 1

    rr = []
    for true_index in true_ranking:
        tmp = 1.0 / (true_index[0] + 1)
        rr.append(tmp)
    MRR = np.mean(rr)

    pp = []
    for true_index in true_ranking:
        ap = 0
        for i, idx in enumerate(true_index):
            ap += (i + 1) / float(idx + 1)
    pp.append(ap)
    MAP = np.mean(pp)

    with open('../data/result_true_ranking.json', 'w') as fwrite:
        json.dump(true_ranking, fwrite, indent=4)

    with open('../data/query_to_document_ranking.json', 'w') as fwrite:
        json.dump(q2d_ranking, fwrite, indent=4)

    average_rank = float(average_rank / number)

    print("Top 1: %d, Top 10: %d, Top 50: %d, Top 100: %d, Top200: %d" %
          (top1, top10, top50, top100, top200))

    return average_rank
Esempio n. 18
0
    for i in range(Pf):
        if f(num, X[0, i], X[1, i]) < f(num, X[0, best], X[1, best]):
            best = i
    return best


RANGO = 50
D = 2

POBLACION = 50
GENERACIONES = 150
L = 20
Pf = 30  # Abejas empleadas
Po = POBLACION - Pf  # Abejas observadoras

X = np.zeros((2, Pf))  # Fuentes de Alimento
li = np.zeros(Pf)
apt = np.zeros(POBLACION)   # inicializa arreglo de aptitudes

xl = np.array([-10, -10])  # límite inferior
xu = np.array([10, 10])  # Límite superior


def inicializa():
    for i in range(Pf):
        X[:, i] = xl+(xu-xl)*rand()  # Inicializa fuentes aleatoriamente


def EtapaAbejasEmpleadasAlumnas(num):
    for i in range(Pf):
        while(True):
def AND(z):
    print(OF(BF(np.sum(z * w))))
# Binary function
def BF(x):
    if x > 0:
        return 1
    return 0


# output function
def OF(x):
    if x:
        return True
    return False


w = np.array([-2, 1, 2])
N = 0.001
Input = [(1, 1, 1), (1, 0, 1), (0, 1, 1), (0, 0, 1)]
test_input = np.array([[1, 1, 1], [1, 0, 1], [0, 1, 1], [0, 0, 1]])
correct_output = [True, False, False, False]
#show dataset to know if linear or not
import matplotlib.pyplot as plt
x = np.array([[1, 1], [1, 0], [0, 1], [0, 0]])
y = [True, False, False, False]
for t in range(4):
    if y[t] == True:
        plt.scatter(x[t][0], x[t][1], alpha=0.8, c='r')
    else:

        plt.scatter(x[t][0], x[t][1], alpha=0.8, c='y')
plt.show()
import Pandas as pd
import Numpy as np

#Load user ratings into a data frame
user_ratings_df = pd.read_csv("movie_ratings.csv")

#Convert the running list of user ratings into a matrix
user_ratings_matrix = pd.pivot_table(user_ratings_df,
                                     index='user_id',
                                     columns='movie_id',
                                     aggfunc=np.max)

#Apply matrix factorization to find the latent features
U, M = matrix_factorization_utilities.low_rank_matrix_factorization(
    user_ratings_df.as_matrix(), num_features=15, regularization_amount=0.1)

# Find all predicting ratings by multiplyin the U by the M
predicted_ratings = np.matmul(U, M)

# Save all predicted ratings to a csv file
predicted_ratings_df = pd.DataFrame(index=user_ratings_matrix,
                                    columns=user_ratings_matrix.columns,
                                    data=predicted_ratings)
predicted_ratings_df.to_csv("predicted_ratins.csv")
Esempio n. 22
0
import Numpy as np

intarr = np.array([[1, 2, 4][3, 5, 4]])
print("integer array")
print(intarr)
print("")

floatarr = np.array([[2, 9, 8][7, 9, 7]])
print("floatarray")
print(floatarr)
print("")

complexarr = np.array([[1, 3, 4][4, 3, 6]], dtype=np.complex)
print("comples array")
print(complexarr)
print("")
Esempio n. 23
0
def normalize(train_data):
    train_data /= np.float32(255)
    substract_mean_pixel_value(train_data)
    return train_data
Esempio n. 24
0
def flip(train_data):
    flipped_data = np.fliplr(train_data)
    return flipped_data
Esempio n. 25
0
#randn(행,열) : 다차원배열, 정규분포를 따르는 난수 생성
#array(리스트):다차원배열 생성
#arrange : 0~(n-1) 정수 생성

import Numpy as np
from Numpy import genfromtxt  #텍스트 파일을 배열로 생성


data = np.random.rand(3,4)   #3*4행렬에 난수 생성
print(data)

lotto=np.random.randint(46,size=(3,6))  #3*6행렬에 난수 생성
print(lotto)

list=[3,4.1,5,6.3,7,8.6]      #단일 리스트
arr=np.array(list)


print('평균',arr.mean())      #기술통계
print('합계',arr.sum())
print('합계',np.count_nonzero(arr))
print('최대값',arr.max())
print('최소값',arr.min())
print('분산',arr.var())
print('표준편차',arr.std())

list = [[9,8,7,6,5],[1,2,3,4,5] ]  #중첩리스트
arr = np.array(list)
print(arr)
print(arr[0,2])     #7
print(arr[1,3])     #4
Esempio n. 26
0
def mueve_col():
    import Numpy as np
    b = np.array([(34, 43, 73, 25, 10), (82, 22, 12, 14, 10),
                  (53, 94, 66, 84, 10), (35, 73, 24, 34, 10)])
    print(b)
Esempio n. 27
0
def substract_mean_image(train_data):
    shape = np.shape(train_data)
    mean_image = np.mean(train_data[0:shape[0]], axis=0)
    train_data -= mean_image
    return train_data