def project(tuplelist): '''Takes a list of tuples and fits a curve through them.''' #n refers to the degree of the polynomial dim = len(tuplelist) #Format data. xrows = [] brows = [] for mytuple in tuplelist: xrow = mytuple[:len(mytuple)-1] xrow += (1,) xrows.append(xrow) brows.append([mytuple[len(mytuple)-1]]) x_matrix = matrix.Matrix(xrows) b_matrix = matrix.Matrix(brows) x_transpose = x_matrix.transpose() #Compute inverse of X_transpose * X. x_matrix_2 = matrix.multiply(x_transpose, x_matrix) x_inv = x_matrix_2.inverse() #Computer X_transpose * b. xt_b = matrix.multiply(x_transpose, b_matrix) return matrix.multiply(x_inv, xt_b)
def train(self,inputs_list,targets_list): #convert inputs list to 2d array inputs = inputs_list.transpose() targets = targets_list.transpose() #calculate signals into hidden layer hidden_inputs = multiply(self.weights_ih,inputs) hidden_outputs = self.activation_function(hidden_inputs) #calculate signals entering final output layer final_inputs = multiply(self.weights_ho,hidden_outputs) #calculate signals exiting final output layer final_outputs = self.activation_function(final_inputs) #output layer error is the target - actual output_errors = targets - final_outputs #hidden layer error is the output_errors, split by weights, #recombined at hidden nodes hidden_errors = multiply(transpose(self.weights_ho),output_errors) #update the weights for the links between the hidden and output layers self.weights_ho += self.lr * multiply((output_errors*final_inputs *\ (1.0 - final_outputs)), transpose(hidden_outputs)) #update the weights for the links between the input and hidden layers self.weights_ih += self.lr * multiply((hidden_errors * hidden_outputs *\ (1.0 - hidden_outputs)), transpose(inputs))
def draw_offset_mult(offset, blend, mult, mask): for y in range(min(f.height, pic.size[1])): for x in range(min(f.width, pic.size[0])): pix = matrix.add( matrix.cmultiply(matrix.multiply(pic.getpixel((x + offset, y))[:3], mult * (1-blend)), mask), matrix.cmultiply(matrix.multiply(pic.getpixel((x + offset + 1, y))[:3], mult * blend), mask) ) f.point(x, y, pix)
def addBezier(m, x1, y1, x2, y2, x3, y3, x4, y4, step): bezMatrix = [[-1, 3, -3, 1], [3, -6, 3, 0], [-3, 3, 0, 0], [1, 0, 0, 0]] xcoef = matrix.multiply(bezMatrix, matrix.transpose([[x1, x2, x3, x4]])) ycoef = matrix.multiply(bezMatrix, matrix.transpose([[y1, y2, y3, y4]])) x = polyParametrize(matrix.transpose(xcoef)[0]) y = polyParametrize(matrix.transpose(ycoef)[0]) z = lambda t: 0 addEdgesFromParam(m, x, y, z, step)
def run(self): features = self.CreateFeatures() M = self.CreateMatrix(features) b = matrix.multiply(matrix.transpose(features), self.y) A = matrix.inverse(M) weight = matrix.multiply(A, b) error = self.CalculateError(weight) self.PrintResult(weight, error)
def addHermite(m, p0x, p0y, p1x, p1y, m0x, m0y, m1x, m1y, step): hermMatrix = [[2, -2, 1, 1], [-3, 3, -2, -1], [0, 0, 1, 0], [1, 0, 0, 0]] xcoef = matrix.multiply(hermMatrix, matrix.transpose([[p0x, p1x, m0x, m1x]])) ycoef = matrix.multiply(hermMatrix, matrix.transpose([[p0y, p1y, m0y, m1y]])) x = polyParametrize(matrix.transpose(xcoef)[0]) y = polyParametrize(matrix.transpose(ycoef)[0]) z = lambda t: 0 addEdgesFromParam(m, x, y, z, step)
def test_multiply_bad_dimensions(self): """ Tests the multiplication of two matrices using incompatible dimensions """ outer = 4 inner = 5 a = self.random_matrix(outer, inner) b = self.random_matrix(outer, inner) # Attempt to multiply 4x5 by 4x5 (should fail) with self.assertRaises(Exception): matrix.multiply(a, b)
def best_fit(A, b): '''Takes two matrices and fits a curve through them.''' #Format data. A_transpose = A.transpose() #Compute inverse of A_transpose * A. At_A = matrix.multiply(A_transpose, A) At_A_inv = At_A.inverse() #Computer A_transpose * b. At_b = matrix.multiply(A_transpose, b) return matrix.multiply(At_A_inv, At_b)
def feedforward(self, input_array): # Generating the hidden outputs inputs = matrix.fromArray(input_array) hidden = matrix.multiply(self.weights_ih, inputs) hidden.add(self.bias_h) # activation function hidden.map(sigmoid) # Generating the output layer's output output = matrix.multiply(self.weights_ho, hidden) output.map(sigmoid) return output.toArray()
def train(self, input_array, target_array): # Generating the hidden outputs inputs = matrix.fromArray(input_array) hidden = matrix.multiply(self.weights_ih, inputs) hidden.add(self.bias_h) # activation function hidden.map(sigmoid) # Generating the output layer's output outputs = matrix.multiply(self.weights_ho, hidden) outputs.map(sigmoid) targets = matrix.fromArray(target_array) output_errors = matrix.subtract(targets, outputs) # gradient = outputs * (1 - outputs) # Calculate gradient gradients = matrix.map(outputs, dsigmoid) # get hadamard product gradients.multiply(output_errors) # perform scalar multiplication gradients.multiply(self.learning_rate) # Calculate deltas hidden_t = matrix.transpose(hidden) weight_ho_deltas = matrix.multiply(gradients, hidden_t) # Change weights by the calculated deltas self.weights_ho.add(weight_ho_deltas) # Adjust bias by the gradient self.bias_o.add(gradients) # after output errors are calculated, they are backpropagated to hidden layers for hidden layer error calculation weights_ho_t = matrix.transpose(self.weights_ho) hidden_errors = matrix.multiply(weights_ho_t, output_errors) # Calculate hidden gradient hidden_gradient = matrix.map(hidden, dsigmoid) # hadamard product hidden_gradient.multiply(hidden_errors) hidden_gradient.multiply(self.learning_rate) # Calculate input->hidden deltas inputs_t = matrix.transpose(inputs) weight_ih_deltas = matrix.multiply(hidden_gradient, inputs_t) self.weights_ih.add(weight_ih_deltas) self.bias_h.add(hidden_gradient)
def test_multiply(self): M = [[1, 2.2], [-3, 0]] kM = matrix.multiply(2.5, M) self.assertEqual(kM[0][0], 2.5) self.assertEqual(kM[1][1], 0) self.assertEqual(kM[1][0], -7.5)
def cam_transform(self, camera): """camera = VCam object returns Position object representing the camera's view""" # translation inversion t = self.translate(-camera.pos[0], -camera.pos[1], -camera.pos[2]) cam_pos_col = [[camera.pos[0]], [camera.pos[1]], [camera.pos[2]]] cam_upvec_col = [[camera.upvec[0]], [camera.upvec[1]], [camera.upvec[2]]] # rotation inversion # ***maybe try quaternions here instead? # 3 perpendicular unit vecs with Z pointing towards the camera Z = matrix.normalize(matrix.subtract(t.vec[0:3], cam_pos_col)) X = matrix.normalize(matrix.crossprod(cam_upvec_col, Z)) Y = matrix.crossprod(Z, X) # camera rotation inversion matrix for the object C = [ [X[0][0], X[1][0], X[2][0], 0], # [Xaxis.x, Xaxis.y, Xaxis.z, 0] [Y[0][0], Y[1][0], Y[2][0], 0], # [Yaxis.x, Yaxis.y, Yaxis.z, 0] [Z[0][0], Z[1][0], Z[2][0], 0], # [Zaxis.x, Zaxis.y, Zaxis.z, 0] [0, 0, 0, 1] ] # the camera transformation can be done with a single matrix mult vec = matrix.multiply(C, t.vec) return Position(vec[0][0], vec[1][0], vec[2][0])
def __mul__(self, mat): if isinstance(mat, TransMatrix): return TransMatrix(matrix.multiply(self.lst, mat.lst)) elif isinstance(mat[0], tuple): # point list (x,y,z) newls = [] for pt in mat: nx = self.lst[0][3] ny = self.lst[1][3] nz = self.lst[2][3] for i in range(3): nx += self.lst[0][i] * pt[i] ny += self.lst[1][i] * pt[i] nz += self.lst[2][i] * pt[i] newls.append((nx, ny, nz)) return newls else: # matrix return matrix.multiply(self.lst, mat)
def scale(self, factor=1, **kwargs): """vec = column vector of homogeneous coords factor = uniform scaling factor x = scaling factor of x y = scaling factor of y z = scaling factor of z""" S = [[factor * kwargs["x"], 0, 0, 0], [0, factor * kwargs["y"], 0, 0], [0, 0, factor * kwargs["z"], 0], [0, 0, 0, 1]] vec = matrix.multiply(S, self.vec) return Position(vec[0][0], vec[1][0], vec[2][0])
def test_inverse_multiply(self): """ Checks that multiplying a matrix by its inverse is the identity """ a = [bitarray('11'), bitarray('10')] a_inverse = matrix.inverse(a) identity = matrix.identity(2) result = matrix.multiply(a, a_inverse) # Make sure the result matches the identity for i in xrange(len(identity)): self.assertTrue(result[i] == identity[i])
def test_good_dimensions(self): """ Tests that dimensions are correct on result matrix from multiplying two matrices together """ outer = 4 inner = 5 a = self.random_matrix(outer, inner) b = self.random_matrix(inner, outer) c = matrix.multiply(a, b) # c should now be a 4 x 4 matrix self.assertTrue(len(c) == outer) for row in c: self.assertTrue(len(row) == outer)
def calculate_i_symbols_hard(self): """ Calculates list of intermediate symbols. This is ineffecient. Calculates a, the inverse of a and then does matrix multiplication between a^-1 and d This WILL take a long time for larger symbolsizes Returns list of bit arrays representing intermediate symbols """ a = self.a() ai = matrix.inverse(a) d = self.calculate_d() return matrix.multiply(ai, d)
def fit_curve(tuplelist): '''Takes a list of tuples and fits a curve through them.''' dim = len(tuplelist) #Format x data. xrows = [] yrows = [] for mytuple in tuplelist: xrow = [] for n in range(dim): xrow.append(math.pow(mytuple[0], n)) xrows.append(xrow) yrows.append([mytuple[1]]) x_matrix = matrix.Matrix(xrows) y_matrix = matrix.Matrix(yrows) x_inv = x_matrix.inverse() transformation = matrix.multiply(x_inv, y_matrix) return transformation
def project(self, **frustum): """parameter frustum takes in a frustum defined by: l = left of projection plane r = right of projection plane t = top of projection plane b = bottom of projection plane n = near (focal pt to proj plane in abs dist) f = far (abs dist) returns vec3 representing NDC""" r4 = [0, 0, -1, 0] # copy -z to w to convert clip to NDC l = frustum['l'] r = frustum['r'] t = frustum['t'] b = frustum['b'] n = frustum['n'] f = frustum['f'] r1 = [2 * n / (r - l), 0, (r + l) / (r - l), 0] # converts x from eye to clip coords # (linear relationship between x proj and x ndc) r2 = [0, 2 * n / (t - b), (t + b) / (t - b), 0] # converts y from eye to clip coords, as x r3 = [0, 0, -(f + n) / (f - n), -2 * f * n / (f - n)] # converts z from eye to clip coords # although z always projects to -n, # each z in eye space has to have # a unique value in clip space to # know what's nearer and what's further P = [r1, r2, r3, r4] vec = matrix.multiply( P, self.vec) # transform from eye space to clip space w = vec[3][0] vec = [[vec[0][0] / w], [vec[1][0] / w], [ vec[2][0] / w ]] # convert from clip space (homogeneous coords) to NDC (cartesian) return vec
def animate_scramble_in_one_step(self, scramble, steps_per_turn = 20): if steps_per_turn == None: steps = config.STEPS_PER_TURN else: steps = steps_per_turn scramble = scramble.upper() scramble_list = scramble.split() transform_dict = {} for s in scramble_list: affected_tiles = self.cube.get_affected_tiles(s) self.cube.transform_using_string(s) axis, theta = self.get_trans_from_string(s) axis = axis.lower() r = None if axis == 'x': r = matrix.rotx(theta) elif axis == 'y': r = matrix.roty(theta) elif axis == 'z': r = matrix.rotz(theta) if r == None: print "Error: rotation about invalid axis: ", axis return for t in affected_tiles: if t not in transform_dict.keys(): transform_dict[t] = matrix.Matrix(list(r.data)) else: transform_dict[t] = matrix.multiply(matrix.Matrix(list(r.data)), transform_dict[t]) for tile in transform_dict.keys(): total_transform = transform_dict[tile] axis, angle = matrix.get_axis_and_angle_from_rot(total_transform) dtheta = angle / steps transform_dict[tile] = matrix.rotv(axis, dtheta) for s in range(steps): self.pvc.erase() for tile in transform_dict.keys(): self.pvc.views[tile].transform(transform_dict[tile], self.origin) self.display()
def mtxTest1(): m1 = [[2, 2, 3], [3, 2, 2]] m2 = [[1, 5], [6.5, 4], [1, -0.7]] m3 = [[1, 2, 3, 1], [5, 2, -1, 3], [-1, -5, 3, 6], [2, 4, -7, 2]] k1 = 2.5 k2 = 3.5 id3 = matrix.id(3) id2 = matrix.id(2) print 'identity 3x3' print matrix.toStr(id3) print 'identity 2x2' print matrix.toStr(id2) print 'm1 2x3' print matrix.toStr(m1) print 'sanity checks: m1 * id3 = m1, id2 * m1 = m1' m1again = matrix.multiply(m1, id3) m1evenmore = matrix.multiply(id2, m1) print matrix.toStr(m1again) print matrix.toStr(m1evenmore) print 'testing size mismatch id3 * m1:' try: matrix.multiply(id3, m1) except ArithmeticError: print 'it errored, that\'s good' print 'm2 3x2' print matrix.toStr(m2) m12 = matrix.multiply(m1, m2) print 'm1 * m2, should be a 2x2' print matrix.toStr(m12) m21 = matrix.multiply(m2, m1) print 'm2 * m1, should be a 3x3' print matrix.toStr(m21) print '10 * (m2 * m1)' print matrix.toStr(matrix.multiply(10, m21)) print '(m2 * m1) * 10' print matrix.toStr(matrix.multiply(m21, 10)) print '10 * 10' print matrix.multiply(10, 10) print 'Adding edge (1, 1, 1), (2, 3, 2.5)' m = edgemtx() addEdge(m, 1, 1, 1, 2, 3, 2.5) print matrix.toStr(m) print 'm3' print matrix.toStr(m3) print 'Transforming edge matrix' print matrix.toStr(matrix.multiply(m3, m)) img = Image(500, 500) for loc in range(0, 500, 4): edges = edgemtx() addEdge(edges, 125, loc, 100, loc + 1, 375, 100) addEdge(edges, loc + 1, 375, 100, 375, 500 - loc - 2, 100) addEdge(edges, 375, 500 - loc - 2, 100, 500 - loc - 3, 125, 100) addEdge(edges, 500 - loc - 3, 125, 100, 125, loc + 4, 100) drawEdges(edges, img, (255 - loc / 2, loc / 2, 127)) # crossfade r + g img.display()
def __NewtonMethod(self, hesion, gradient, weight): return matrix.subtraction(weight, matrix.multiply(hesion, gradient))
if __name__ == '__main__': m1 = [[2, 2, 3], [3, 2, 2]] m2 = [[1, 5], [6.5, 4], [1, -0.7]] m3 = [[1, 2, 3, 1], [5, 2, -1, 3], [-1, -5, 3, 6], [2, 4, -7, 2]] k1 = 2.5 k2 = 3.5 id3 = matrix.id(3) id2 = matrix.id(2) print 'identity 3x3' print matrix.toStr(id3) print 'identity 2x2' print matrix.toStr(id2) print 'm1 2x3' print matrix.toStr(m1) print 'sanity checks: m1 * id3 = m1, id2 * m1 = m1' m1again = matrix.multiply(m1, id3) m1evenmore = matrix.multiply(id2, m1) print matrix.toStr(m1again) print matrix.toStr(m1evenmore) print 'testing size mismatch id3 * m1:' try: matrix.multiply(id3, m1) except ArithmeticError: print 'it errored, that\'s good' print 'm2 3x2' print matrix.toStr(m2) m12 = matrix.multiply(m1, m2) print 'm1 * m2, should be a 2x2' print matrix.toStr(m12) m21 = matrix.multiply(m2, m1) print 'm2 * m1, should be a 3x3'
def translate(self, x, y, z): T = [[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]] vec = matrix.multiply(T, self.vec) return Position(vec[0][0], vec[1][0], vec[2][0])
def __matmul__(self, other): return matrix.multiply(self.matrix, self.rows, self.columns, other.matrix, other.columns)
def main(x, hidden, b, epochs, test, w, g, n_d, m, p_m): random.seed(1) training_data = x[:] noise_data = n_d[:] # Adding bias to training data training_data.append([]) noise_data.append([]) for _ in x[0]: training_data[len(training_data)-1].append(b) noise_data[len(noise_data)-1].append(b) # Random weights for synapses synapses0 = [] synapses1 = [] for f in range(hidden): synapses0.append([]) for _ in range(len(training_data)): synapses0[f].append(random.uniform(w, -w)) # second rand for bias synapses for j in range(hidden + 1): # +1 for bias synapses1.append([random.uniform(w, -w)]) sig_layer2 = [] error_log = [] error_log2 = [] gamma_log = [] global loading_message global loading_progress # learning loop (learning = iterations) for i in xrange(epochs): loading_progress = round((float(i) / float(iterations)) * 100, 1) # # # Forward pass # # Input Layer layer1 = matrix.multiply(synapses0, training_data) # Activation level sig_layer1 = matrix.sig(layer1) # # Hidden Layer # Adding bias to layer1 b_sig_layer1 = sig_layer1[:] b_sig_layer1.append([]) for _ in b_sig_layer1[0]: b_sig_layer1[len(b_sig_layer1) - 1].append(b) layer2 = matrix.multiply(matrix.transpose(synapses1), b_sig_layer1) sig_layer2 = matrix.sig(layer2) # # # ---------------- # # Calculate net error error = [matrix.subtract(test, matrix.transpose(sig_layer2))] # error = [matrix.error(test, matrix.transpose(sig_layer2))] # if i % 5000 == 0: # print(error) temp = 0 for j in range(len(error)): temp += temp + error[0][j] error_log.append(temp/len(error)) # Test with test data sig_noise = [] l1 = matrix.multiply(synapses0, noise_data) sig_l1 = matrix.sig(l1) b_sig_l1 = sig_l1[:] b_sig_l1.append([]) for _ in b_sig_l1[0]: b_sig_l1[len(b_sig_l1) - 1].append(b) l2 = matrix.multiply(matrix.transpose(synapses1), b_sig_l1) sig_noise = matrix.sig(l2) error2 = [matrix.subtract(test, matrix.transpose(sig_noise))] temp2 = 0 for j in range(len(error2)): temp2 += temp2 + error2[0][j] error_log2.append(temp2 / len(error2)) # # # ---------------- # # Calculating weight updates # Delta for neuron in output layer (1 for each training data) deriv_sig_layer2 = matrix.derivative(sig_layer2) delta_layer2 = [[]] # temp_g = (g/(i+1)) # gamma_log.append(temp_g) for j in range(len(error[0])): delta_layer2[0].append(deriv_sig_layer2[0][j] * error[0][j] * g) # Delta for neurons in hidden layer deriv_sig_layer1 = matrix.derivative(sig_layer1) delta_layer1 = [] delta_weight_sum = [] for k in range(len(synapses1)): delta_weight_sum.append([]) for j in range(len(delta_layer2[0])): delta_weight_sum[k].append(synapses1[k][0] * delta_layer2[0][j]) for k in range(len(deriv_sig_layer1)): delta_layer1.append([]) for j in range(len(deriv_sig_layer1[0])): delta_layer1[k].append(deriv_sig_layer1[k][j] * delta_weight_sum[k][j] * g) delta_w_oh = matrix.multiply(delta_layer2, matrix.transpose(b_sig_layer1)) delta_w_hi = matrix.multiply(delta_layer1, matrix.transpose(training_data)) # # # Backwards pass # # Update weights synapses1 = matrix.add(synapses1, matrix.transpose(delta_w_oh)) synapses0 = matrix.add(synapses0, delta_w_hi) if i > epochs * 0.5: if i > epochs * 0.95: loading_message = "I'm nearly done, good training." else: loading_message = "Well, I'm halfway through." # # # End of learning # Testing net with noised/test data sig_noise = [] l1 = matrix.multiply(synapses0, noise_data) sig_l1 = matrix.sig(l1) b_sig_l1 = sig_l1[:] b_sig_l1.append([]) for _ in b_sig_l1[0]: b_sig_l1[len(b_sig_l1) - 1].append(b) l2 = matrix.multiply(matrix.transpose(synapses1), b_sig_l1) sig_noise = matrix.sig(l2) # formatting net output for plot result1 = [] # training data result2 = [] # noised data for i in range(len(sig_layer2[0])): result1.append(sig_layer2[0][i] * 2 - 1) result2.append(sig_noise[0][i] * 2 - 1) if m == "sin": # Plot # Some code lines from: https://matplotlib.org/users/legend_guide.html neuron_patch = mpatches.Patch(label='Neurons: ' + str(hidden)) bias_patch = mpatches.Patch(label='Bias: ' + str(b)) iteration_patch = mpatches.Patch(label='Iterations: ' + str(epochs)) epsilon_patch = mpatches.Patch(label='Gamma: ' + str(g)) weight_patch = mpatches.Patch(label='Weight range: +/- ' + str(w)) time_patch = mpatches.Patch(label=str(round((time.time() - start_time) / 60, 2)) + " min") first_legend = plt.legend( handles=[bias_patch, time_patch, epsilon_patch, neuron_patch, iteration_patch, weight_patch], bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3, mode="expand", borderaxespad=0.) line4, = plt.plot(error_axis[0], error_log, label="Error", linewidth=0.5) line6, = plt.plot(error_axis[0], error_log2, label="Error2", linewidth=0.5) line1, = plt.plot(inputData[0], result1, label="Training Data", linewidth=0.75) line2, = plt.plot(inputData[0], result2, label="Test Data", linestyle=':', linewidth=0.75) line3, = plt.plot(x_data, y_data, label="sin(x)", linestyle='--', linewidth=0.75) line5, = plt.plot(x_axis, y_axis, label="Axis", linewidth=0.5) ax = plt.gca().add_artist(first_legend) plt.legend(handles=[line4, line1, line2, line3, line5, line6]) if p_m: plt.savefig('./plots/' + str(time.time())[2:10] + '.png') else: plt.show() plt.clf() plt.cla() plt.close() elif m == "xor": print("-----") for i in range(len(sig_noise[0])): print "Input: " + str(round(noise_data[0][i], 0)) + " & " \ + str(round(noise_data[1][i], 0)) + " = " + str(round(sig_noise[0][i], 0)) + " (" \ + str(round(sig_noise[0][i] * 100, 4)) + "% for True)"
def update(time, delta): #rotation = matrix.y_rotation((2 * math.pi) * (delta * 0.01)) rotation = matrix.y_rotation(math.pi * 2 / 360.0) scene['camera'] = matrix.multiply(scene['camera'], rotation)
def __Gradient(self, features, weight): AtAx = matrix.multiply( matrix.multiply(matrix.transpose(features), features), weight) Atb = matrix.multiply(matrix.transpose(features), self.y) matrix.addition(AtAx, Atb) return matrix.subtraction(AtAx, Atb)
def __mul__(self, mat): if isinstance(mat, TransMatrix): return TransMatrix(matrix.multiply(self.lst, mat.lst)) else: return matrix.multiply(self.lst, mat)
def main(): print """arrow keys - move around space - toggle current pixel w/a/s/d - shift entire canvas""" if len(sys.argv) > 1: fn = sys.argv[1] else: fn = 'image.mx' FRAME_RATE = 50 x = y = 0 colour = [255, 255, 255] if os.path.exists(fn): image = matrix.load(fn) else: image = matrix.Frame() f = matrix.Frame() frame = 0 input = [] kb.capture() # switch to raw mode while 1: frame += 1 while 1: c = kb.read(blocking=0) if c is None: break input.append(c) if len(input): #print "%s\r" % `input` c = input.pop(0) if c == chr(27): if not len(input): print "exit\r" break if input.pop(0) == '[': c = input.pop(0) if c == 'A': print "up\r" y = (y + f.height - 1) % f.height elif c == 'B': print "down\r" y = (y + 1) % f.height elif c == 'C': print "right\r" x = (x + 1) % f.width elif c == 'D': print "left\r" x = (x + f.width - 1) % f.width else: print "unknown magic: %s\r" % c else: print "unknown special\r" elif c == ' ': image.point(x, y, matrix.BLACK if image.get(x, y) == tuple(colour) else tuple(colour)) image.save(fn) elif c.lower() == 'w': # roll up image.translate(0, -1) elif c.lower() == 'a': # roll left image.translate(-1, 0) elif c.lower() == 's': # roll right image.translate(0, 1) elif c.lower() == 'd': # roll down image.translate(1, 0) elif c.lower() == 'r': colour[0] = 0 if colour[0] else 255 elif c.lower() == 'g': colour[1] = 0 if colour[1] else 255 elif c.lower() == 'b': colour[2] = 0 if colour[2] else 255 else: print c,"\r" f.copy(image) # draw cursor f.point(x, y, matrix.multiply(colour, 0.5 * math.sin(float(frame) / FRAME_RATE * math.pi) + 0.5)) f.show() time.sleep(1.0 / FRAME_RATE)
def CreateMatrix(self, features): AtA = matrix.multiply(matrix.transpose(features), features) return matrix.addition(AtA, matrix.diagonal(self.degree, self.lm))
def main(x, hidden, b, learning, test, w, g, n_d): random.seed(1) training_data = x[:] noise_data = n_d[:] # Adding bias to training data training_data.append([]) noise_data.append([]) for _ in x[0]: training_data[1].append(b) noise_data[1].append(b) # Random weights for synapses synapses0 = [] synapses1 = [] for _ in range(hidden): synapses0.append([random.uniform(w, -w), random.uniform(w, -w)]) # second rand for bias synapses for j in range(hidden + 1): # +1 for bias synapses1.append([random.uniform(w, -w)]) sig_layer2 = [] global loading_message global loading_progress # learning loop (learning = iterations) for i in xrange(learning): temp = i+1 loading_progress = round((float(temp) / float(iterations)) * 100, 1) # loading_progress = (temp / learning) * 100 # # # Forward pass # # Input Layer layer1 = matrix.multiply(synapses0, training_data) # Activation level sig_layer1 = matrix.sig(layer1) # # Hidden Layer # Adding bias to layer1 b_sig_layer1 = sig_layer1[:] b_sig_layer1.append([]) for _ in b_sig_layer1[0]: b_sig_layer1[len(b_sig_layer1) - 1].append(b) layer2 = matrix.multiply(matrix.transpose(synapses1), b_sig_layer1) sig_layer2 = matrix.sig(layer2) # Calculate net error error = [matrix.subtract(test, matrix.transpose(sig_layer2))] # if i % 25000 == 0: # temp = 0 # for j in range(len(error)): # temp += temp + error[0][j] # print i, temp # Delta for neuron in output layer (1 for each training data) deriv_sig_layer2 = matrix.derivative(sig_layer2) delta_layer2 = [[]] for j in range(len(error[0])): delta_layer2[0].append(deriv_sig_layer2[0][j] * error[0][j] * g) # Delta for neurons in hidden layer deriv_sig_layer1 = matrix.derivative(sig_layer1) delta_layer1 = [] delta_weight_sum = [] for k in range(len(synapses1)): delta_weight_sum.append([]) for j in range(len(delta_layer2[0])): delta_weight_sum[k].append(synapses1[k][0] * delta_layer2[0][j]) for k in range(len(deriv_sig_layer1)): delta_layer1.append([]) for j in range(len(deriv_sig_layer1[0])): delta_layer1[k].append(deriv_sig_layer1[k][j] * delta_weight_sum[k][j] * g) delta_w_oh = matrix.multiply(delta_layer2, matrix.transpose(b_sig_layer1)) delta_w_hi = matrix.multiply(delta_layer1, matrix.transpose(training_data)) # # Update weights synapses1 = matrix.add(synapses1, matrix.transpose(delta_w_oh)) synapses0 = matrix.add(synapses0, delta_w_hi) if i > learning * 0.5: if i > learning * 0.95: loading_message = "I'm nearly done, good training." else: loading_message = "Well, I'm halfway through." # Testing net with noised data sig_noise = [] if len(n_d) > 0: # print "testing with noise data" l1 = matrix.multiply(synapses0, noise_data) sig_l1 = matrix.sig(l1) b_sig_l1 = sig_l1[:] b_sig_l1.append([]) for _ in b_sig_l1[0]: b_sig_l1[len(b_sig_l1) - 1].append(b) l2 = matrix.multiply(matrix.transpose(synapses1), b_sig_l1) sig_noise = matrix.sig(l2) # formatting net output for plot result1 = [] # training data result2 = [] # noised data for i in range(len(sig_layer2[0])): result1.append(sig_layer2[0][i] * 2 - 1) result2.append(sig_noise[0][i] * 2 - 1) # Plot # Some code lines from: https://matplotlib.org/users/legend_guide.html neuron_patch = mpatches.Patch(label='Neurons: ' + str(hidden)) bias_patch = mpatches.Patch(label='Bias: ' + str(b)) iteration_patch = mpatches.Patch(label='Iterations: ' + str(learning)) epsilon_patch = mpatches.Patch(label='Epsilon: ' + str(g)) weight_patch = mpatches.Patch(label='Weight range (0 +/-): ' + str(w)) time_patch = mpatches.Patch(label=str(round((time.time() - start_time) / 60, 2)) + " min") first_legend = plt.legend( handles=[bias_patch, time_patch, epsilon_patch, neuron_patch, iteration_patch, weight_patch], bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3, mode="expand", borderaxespad=0.) line1, = plt.plot(inputData[0], result1, label="Training Data", linewidth=0.75) line2, = plt.plot(inputData[0], result2, label="Test Data", linestyle=':', linewidth=0.75) line3, = plt.plot(x_data, y_data, label="sin(x)", linestyle='--', linewidth=0.75) ax = plt.gca().add_artist(first_legend) plt.legend(handles=[line1, line2, line3]) plt.savefig('./plots/plot' + str(time.time())[2:10] + '.png') plt.clf() plt.cla() plt.close()
def __HessionInverse(self, features): AtA = matrix.multiply(matrix.transpose(features), features) return matrix.inverse(AtA)