import tensor.Variable as var import tensor.Operator as op from layers.base_conv import Conv2D from layers.pooling import MaxPooling from layers.fc import FullyConnect from layers.relu import Relu from layers.softmax import Softmax import cv2 import numpy as np img = cv2.imread('layers/test.jpg') img = img[np.newaxis, :] a = var.Variable((1, 128, 128, 3), 'a') label = var.Variable([1, 1], 'label') import random label.data = np.array([random.randint(1, 9)]) label.data = label.data.astype(int) conv1_out = op.Conv2D((3, 3, 3, 3), input_variable=a, name='conv1', padding='VALID').output_variables relu1_out = op.Relu(input_variable=conv1_out, name='relu1').output_variables pool1_out = op.MaxPooling(ksize=2, input_variable=relu1_out, name='pool1').output_variables fc1_out = op.FullyConnect(output_num=10, input_variable=pool1_out, name='fc1').output_variables sf_out = op.SoftmaxLoss(predict=fc1_out, label=label, name='sf').loss
#dropout2_out = op.DropOut(input_variable=relu2_out, name='dropout2', phase='train', prob=0.7).output_variables pool2_out = op.MaxPooling(input_variable=relu1_out, ksize=2, name='pool2').output_variables fc_out = op.FullyConnect(input_variable=pool2_out, output_num=output_num, name='fc').output_variables return fc_out batch_size = 64 global_step = 0 # set method for k in var.GLOBAL_VARIABLE_SCOPE: s = var.GLOBAL_VARIABLE_SCOPE[k] if isinstance(s, var.Variable) and s.learnable: s.set_method_adam() img_placeholder = var.Variable((batch_size, 28, 28, 1), 'input') label_placeholder = var.Variable([batch_size, 1], 'label') # set train_op prediction = inference(img_placeholder, 10) sf = op.SoftmaxLoss(prediction, label_placeholder, 'sf') images, labels = load_mnist('./data/mnist') test_images, test_labels = load_mnist('./data/mnist', 't10k') # save train curve config loss_collect = [] acc_collect = [] print ('new') with open('logs/%s_log.txt'%VERSION, 'wb') as logf:
import tensor.Operator as op # from layers.base_conv import Conv2D # from layers.pooling import MaxPooling # from layers.fc import FullyConnect # from layers.relu import Relu # from layers.softmax import Softmax # import cv2 import numpy as np # img = cv2.imread('layers/test.jpg') # img = img[np.newaxis, :] e=1e-3 a = var.Variable((1, 128, 128, 3), 'a') b = var.Variable((1, 128, 128, 3), 'b') b.data = a.data.copy() a.data[0,0,0,1] += e b.data[0,0,0,1] -= e # label = var.Variable([1, 1], 'label') # import random # label.data = np.array([random.randint(1,9)]) # label.data = label.data.astype(int) import numpy as np conv1_out = op.Conv2D((3, 3, 3, 3), input_variable=a, name='conv1',padding='VALID').output_variables conv2_out = op.Conv2D((3, 3, 3, 3), input_variable=b, name='conv2',padding='VALID').output_variables
from tensor import Graph, Variable, placeholder, Session from tensor.Activation import sigmoid, softmax if __name__ == '__main__': from base_tests import add, matmul # 创建一个新 graph Graph().as_default() x = placeholder() w = Variable([1, 1]) b = Variable(0) p = sigmoid(add(matmul(w, x), b)) session = Session() print(session.run(p, {x: [3, 2]})) # softmax # 创建一些集中于 (-2, -2) 的红点 red_points = np.random.randn(50, 2) - 2 * np.ones((50, 2)) # 创建一些集中于 (2, 2) 的蓝点 blue_points = np.random.randn(50, 2) + 2 * np.ones((50, 2)) # 创建一个新 graph Graph().as_default() X = placeholder() # 为两种输出类别创建一个权矩阵: # 蓝色的权向量是 (1, 1) ,红色是 (-1, -1) W = Variable([[1, -1], [1, -1]])
Args: a_value: First matrix value b_value: Second matrix value """ return a_value.dot(b_value) if __name__ == '__main__': # Create a new graph Graph().as_default() # graph_init() print(get_default()) # Create variables A = Variable([[1, 0], [0, -1]]) b = Variable([1, 1]) # Create placeholder x = placeholder() # Create hidden node y y = matmul(A, x) # Create output node z z = add(y, b) session = Session() output = session.run(z, { x: [1, 2] })
import tensor.Operator as op import tensor.Variable as var import tensor.Activation as activation import numpy as np ### grad_check e = 1e-3 a = var.Variable((1, 28, 28, 3), 'a') b = var.Variable((1, 28, 28, 3), 'b') c = var.Variable((1, 28, 28, 3), 'c') b.data = a.data.copy() c.data = a.data.copy() a.data += e b.data -= e ## conv2d conv1_out = op.Conv2D(a, (3, 3, 3, 3), name='conv1', stride=1, padding=1).output_variables conv2_out = op.Conv2D(b, (3, 3, 3, 3), name='conv2', stride=1, padding=1).output_variables conv3_out = op.Conv2D(c, (3, 3, 3, 3), name='conv3', stride=1, padding=1).output_variables conv1 = var.GLOBAL_VARIABLE_SCOPE['conv1'] conv2 = var.GLOBAL_VARIABLE_SCOPE['conv2'] conv3 = var.GLOBAL_VARIABLE_SCOPE['conv3'] var.GLOBAL_VARIABLE_SCOPE['conv1'].weights.data = var.GLOBAL_VARIABLE_SCOPE[ 'conv2'].weights.data var.GLOBAL_VARIABLE_SCOPE['conv1'].bias.data = var.GLOBAL_VARIABLE_SCOPE[