def concatenateMatrixInList(self, matrixList, dim, axis):
        if axis == 0:
            E = np.empty([0, dim])
        elif axis == 1:
            E = np.empty([dim, 0])
        for i in range(len(matrixList)):
            E = np.concatenate((E, matrixList[i]), axis=axis)

        return E
Beispiel #2
0
def forward_pass(x, hidden_sizes, wh, bh, w_out, b_out, alpha, p):
    """
    Performs the forward pass of a neural network.
    :param x: The input data of form (N x D), where N is the number of observations an D is the dimensionality.
    :param hidden_sizes: The size of each hidden layer as array.
    :param wh: The weights of each hidden layer connection as array. Each weight is a matrix of (H_i-1 ... H_i),
    where H_i-1 is the size of the previous hidden layer (or the input layer) and H_i is the size of the corresponding
    hidden layer..
    :param bh: The biases of each hidden layer as array. Each bias is a vector of the same length of the corresponding
    hidden layer.
    :param w_out: The weight of the output layer as matrix of form (H x out_size),
    where H is the size of the last hidden layer.
    :param b_out: The bias of the output layer as vector of length out_size.
    :param alpha: The factor by which negative inputs are scaled in ReLU activations. Set to 0 to avoid leaky ReLU.
    :param p: The probability of each neuron to be dropped out. Set to 1 to disable dropout.
    :return: A tuple consisting of the following values:
    * An array containing the values of each hidden layer as vector of length hidden_size[i] for every input observation.
    * An array containing the class scores of each input observation.
    * The connection weights of the last layer (output_layer).
    """
    hidden_layers = [np.empty((x.shape[0], size)) for size in hidden_sizes]
    for h in range(len(hidden_sizes)):
        if h == 0:
            hidden = x.dot(wh[h]) + bh[h]
        else:
            hidden = hidden_layers[h - 1].dot(wh[h]) + bh[h]
        hidden = calculate_activation(hidden, alpha)
        dropout_mask = (np.random.random(hidden.shape) < p) / p
        hidden *= dropout_mask
        hidden_layers[h] = hidden
    outs = hidden_layers[-1].dot(w_out) + b_out
    return hidden_layers, outs, w_out
Beispiel #3
0
 def train(self, input, target, epochs):
     """Train network for all epochs"""
     #train_writer = SummaryWriter(summaries_dir + '/train')
     lossArray = np.empty(epochs)  # array to store loss each epoch
     deltaW_vec = np.empty(
         epochs)  # array to store change in weights each epoch
     for i in range(epochs):
         loss, deltaW = self.trainOneEpoch(input, target)
         lossArray[i] = loss
         deltaW_vec[i] = deltaW
         if i % PRINT_BIN == 0:
             print('Iter {}, training loss {}'.format(i, loss))
         #summary1 = summaryOps.scalarSummary('loss', loss)
         #train_writer.add_summary(summary1, i)
     #train_writer.close()
     self.deltaW_vec = deltaW_vec
     self.loss_vec = lossArray
     return lossArray
Beispiel #4
0
def test_creation():
    a = np.array([1, 2, 3])
    print(a)
    b = np.array([[1, 2, 3], [2, 3, 4]])
    print(b)
    a = np.zeros((2, 3))
    print(a)
    b = np.ones((2, 3))
    print(b)
    c = np.full((2, 3), 7)
    print(c)
    d = np.empty((2, 3))
    print(d)
Beispiel #5
0
def backpropagation(x, s, y, hidden_layers, wh, bh, w_out, b_out, alpha):
    """
    Performs the backpropagation of a neural network.
    :param x: The input data of form (N x D), where N is the number of observations an D is the dimensionality.
    :param s: The score matrix of form (N x K), where N is the number of observations and K is the number of classes.
    :param y: The ground truth labels for each observation.
    :param hidden_layers: An array containing the values of each hidden layer as a vector.
    :param wh: The weights of each hidden layer connection as array. Each weight is a matrix of (H_i-1 ... H_i),
    where H_i-1 is the size of the previous hidden layer (or the input layer) and H_i is the size of the corresponding
    hidden layer..
    :param bh: The biases of each hidden layer as array. Each bias is a vector of the same length of the corresponding
    hidden layer.
    :param w_out: The weight of the output layer as matrix of form (H x K),
    where H is the size of the last hidden layer and K is the number of classes.
    :param b_out: The bias of the output layer as vector of length K, where K is the number of classes.
    :param alpha: The factor by which negative inputs are scaled in ReLU activations. Set to 0 to avoid leaky ReLU.
    :return: The backpropagation returns relevant gradients as a tuple containing the following values:
    * An array containing the gradients for the connection weights of each hidden layer of the same form as `wh`.
    * An array containing the gradients for the biases of each hidden layer of the same form as `bh`.
    * An array containing the gradients for the connection weights of the output layer of the same form as `w_out`.
    * An array containing the gradients for the biases of the output layer of the same form as `b_out`.
    """
    dscores = cross_entropy_loss_gradient(s, y)
    dw_out2 = hidden_layers[-1].T.dot(dscores)
    db_out2 = np.sum(dscores, axis=0, keepdims=True)
    dhiddens = {}
    dwh2 = [np.full(w_i.shape, .0) for w_i in wh]
    dbh2 = [np.empty(b_i.shape) for b_i in bh]
    for h in range(len(hidden_layers) - 1, -1, -1):
        if h == len(hidden_layers) - 1:
            dhidden = dscores.dot(w_out.T)
        else:
            dhidden = dhiddens[h + 1].dot(wh[h + 1].T)
        dhidden[hidden_layers[h] < 0] = alpha
        dhiddens[h] = dhidden
        if h == 0:
            dwh2[h] = x.T.dot(dhidden)
        else:
            dwh2[h] = hidden_layers[h - 1].T.dot(dhidden)
        dbh2[h] = np.sum(dhidden, axis=0, keepdims=True)
    dw_out2 += lambda_ * w_out
    return dwh2, dbh2, dw_out2, db_out2
Beispiel #6
0
def plot_SSError_per_size(teacher, sizes, simulations=12):
    ssError = np.empty(shape=(simulations, len(sizes)))
    students = studentSize_simple(teacher, sizes)
    for sim in range(simulations):
        #tf.reset_default_graph()
        print(students)
        model = NoisyStudentsTeacher(teacher, students, epochs=1000)
        model.run()
        ssError[sim, :] = model.steadyState()
    print(ssError)
    meanError = ssError.mean(0)
    outputName = '{path}{date:%Y-%m-%d_%H%M%S_Size}.png'.format(
        path=GRAPH_OUTPUT_PATH, date=datetime.datetime.now())
    fig, ax = plt.subplots()
    sizesNN = [model.sizeNNs[i] for i in range(len(sizes))]
    ax.plot(sizesNN, meanError, 'o')
    ax.set_ylabel('average Error')
    ax.set_xlabel('Number of synapses')
    fig.savefig(outputName)
    plt.close()
Beispiel #7
0
import time

st = time.time()

#图像地址
dirTrains = ['D:\\image\\train2lt\\']

step_number_x = 41
step_number_y = 51
step_number_z = 21
#中心矩数
moments_num = 7

angle_num = 3

labels = np.empty([step_number_x * step_number_y * step_number_z, angle_num],
                  dtype=float)
labels_x = np.empty([step_number_x * step_number_y * step_number_z, 1],
                    dtype=float)
labels_y = np.empty([step_number_x * step_number_y * step_number_z, 1],
                    dtype=float)
labels_z = np.empty([step_number_x * step_number_y * step_number_z, 1],
                    dtype=float)

features = np.empty(
    [step_number_x * step_number_y * step_number_z, moments_num], dtype=float)

for dirTrain in dirTrains:

    flag = 0
    #读取图像名字txt文件
    image_train_f = open(dirTrain + 'image_train.txt', 'r')
Beispiel #8
0
f_mean = np.loadtxt(dirTrains[0] + "image_train_mean.txt", delimiter=' ')
picName = '644.jpg'

#读取图片
img = cv2.imread(dirTest + picName, 0)

#阈值操作
#ret,thresh = cv2.threshold(img,125,255,0)
thresh = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                               cv2.THRESH_BINARY, 11, 2)
#thresh = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)

#轮廓检测,找到x光片中的检测目标
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, 2)
l = len(contours)
f = np.empty([l, moments_num], dtype=float)

for i in range(l):
    cnt = contours[i]
    area = cv2.contourArea(cnt)
    if area > min_area:
        M = cv2.moments(cnt)
        feature = [
            M['nu20'], M['nu11'], M['nu02'], M['nu30'], M['nu21'], M['nu12'],
            M['nu03']
        ]
        for iii, val in enumerate(feature):
            f[i, iii] = feature[iii]

f = f - f_mean
f = np.abs(f)