def test_grad_of_grad(): x2 = ad.Variable(name="x2") x3 = ad.Variable(name="x3") y = x2 * x2 + x2 * x3 grad_x2, grad_x3 = ad.gradients(y, [x2, x3]) grad_x2_x2, grad_x2_x3 = ad.gradients(grad_x2, [x2, x3]) executor = ad.Executor([y, grad_x2, grad_x3, grad_x2_x2, grad_x2_x3]) x2_val = 2 * np.ones(3) x3_val = 3 * np.ones(3) y_val, grad_x2_val, grad_x3_val, grad_x2_x2_val, grad_x2_x3_val = executor.run( feed_dict={ x2: x2_val, x3: x3_val }) expected_yval = x2_val * x2_val + x2_val * x3_val expected_grad_x2_val = 2 * x2_val + x3_val expected_grad_x3_val = x2_val expected_grad_x2_x2_val = 2 * np.ones_like(x2_val) expected_grad_x2_x3_val = 1 * np.ones_like(x2_val) assert isinstance(y, ad.Node) assert np.array_equal(y_val, expected_yval) assert np.array_equal(grad_x2_val, expected_grad_x2_val) assert np.array_equal(grad_x3_val, expected_grad_x3_val) assert np.array_equal(grad_x2_x2_val, expected_grad_x2_x2_val) assert np.array_equal(grad_x2_x3_val, expected_grad_x2_x3_val)
def test_add_mul_mix_3(): x2 = ad.Variable(name="x2") x3 = ad.Variable(name="x3") z = x2 * x2 + x2 + x3 + 3 y = z * z + x3 grad_x2, grad_x3 = ad.gradients(y, [x2, x3]) executor = ad.Executor([y, grad_x2, grad_x3]) x2_val = 2 * np.ones(3) x3_val = 3 * np.ones(3) y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict={ x2: x2_val, x3: x3_val }) z_val = x2_val * x2_val + x2_val + x3_val + 3 expected_yval = z_val * z_val + x3_val expected_grad_x2_val = 2 * (x2_val * x2_val + x2_val + x3_val + 3) * (2 * x2_val + 1) expected_grad_x3_val = 2 * (x2_val * x2_val + x2_val + x3_val + 3) + 1 assert isinstance(y, ad.Node) assert np.array_equal(y_val, expected_yval) assert np.array_equal(grad_x2_val, expected_grad_x2_val) assert np.array_equal(grad_x3_val, expected_grad_x3_val)
def test_full_forward_op(): inputs = ad.Variable("inputs") filters = ad.Variable("filters") y_ = ad.Variable(name="y_") #ini ctx = ndarray.gpu(0) x_val = np.linspace(0, 100, 100).reshape((5, 1, 20)) filters_val = np.ones((1, 1, 20)) * 0.001 y_val = np.zeros((5, 1)) x_val = ndarray.array(x_val, ctx) filters_val = ndarray.array(filters_val, ctx) y_val = ndarray.array(y_val, ctx) outputs = ad.convolution_1d_forward_op(inputs, filters, "NCHW", "VALID", 1) outputs_pool = ad.pooling_1d_forward_op(outputs, "NCHW", "max", 0, 1, 1) outputs_relu = ad.activation_forward_op(outputs_pool, "NCHW", "relu") outputs_f = ad.flatten_op(outputs_relu) output = ad.fullyactivation_forward_op(outputs_f, "NCHW", "relu") loss = ad.matmul_op(output, output, trans_A=True) * (1 / 5) grad_f = ad.gradients(loss, [filters]) #gra返回一个list executor = ad.Executor([grad_f[0]], ctx=ctx) g_val = executor.run(feed_dict={ inputs: x_val, filters: filters_val }) #返回一个list print("g_val:", g_val[0].asnumpy())
def test_matmul_two_vars(): x2 = ad.Variable(name="x2") x3 = ad.Variable(name="x3") y = ad.matmul_op(x2, x3) grad_x2, grad_x3 = ad.gradients(y, [x2, x3]) executor = ad.Executor([y, grad_x2, grad_x3]) x2_val = np.array([[1, 2], [3, 4], [5, 6]]) # 3x2 x3_val = np.array([[7, 8, 9], [10, 11, 12]]) # 2x3 y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict={ x2: x2_val, x3: x3_val }) expected_yval = np.matmul(x2_val, x3_val) expected_grad_x2_val = np.matmul(np.ones_like(expected_yval), np.transpose(x3_val)) expected_grad_x3_val = np.matmul(np.transpose(x2_val), np.ones_like(expected_yval)) assert isinstance(y, ad.Node) assert np.array_equal(y_val, expected_yval) assert np.array_equal(grad_x2_val, expected_grad_x2_val) assert np.array_equal(grad_x3_val, expected_grad_x3_val)
def test_add_mul_mix_2(): x1 = ad.Variable(name="x1") x2 = ad.Variable(name="x2") x3 = ad.Variable(name="x3") x4 = ad.Variable(name="x4") y = x1 + x2 * x3 * x4 grad_x1, grad_x2, grad_x3, grad_x4 = ad.gradients(y, [x1, x2, x3, x4]) executor = ad.Executor([y, grad_x1, grad_x2, grad_x3, grad_x4]) x1_val = 1 * np.ones(3) x2_val = 2 * np.ones(3) x3_val = 3 * np.ones(3) x4_val = 4 * np.ones(3) y_val, grad_x1_val, grad_x2_val, grad_x3_val, grad_x4_val = executor.run( feed_dict={ x1: x1_val, x2: x2_val, x3: x3_val, x4: x4_val }) assert isinstance(y, ad.Node) assert np.array_equal(y_val, x1_val + x2_val * x3_val * x4_val) assert np.array_equal(grad_x1_val, np.ones_like(x1_val)) assert np.array_equal(grad_x2_val, x3_val * x4_val) assert np.array_equal(grad_x3_val, x2_val * x4_val) assert np.array_equal(grad_x4_val, x2_val * x3_val)
def test_exp_grad(): x = ad.Variable("x") y = ad.exp_op(x) x_grad, = ad.gradients(y, [x]) executor = ad.Executor([y, x_grad]) x_val = 1 y_val, x_grad_val = executor.run(feed_dict={x: x_val}) print(y_val) print(x_grad_val)
def test_identity(): x2 = ad.Variable(name="x2") y = x2 grad_x2, = ad.gradients(y, [x2]) executor = ad.Executor([y, grad_x2]) x2_val = 2 * np.ones(3) y_val, grad_x2_val = executor.run(feed_dict={x2: x2_val}) assert isinstance(y, ad.Node) assert np.array_equal(y_val, x2_val) assert np.array_equal(grad_x2_val, np.ones_like(x2_val))
def test_exp(): x1 = ad.Variable("x1") x2 = ad.exp_op(x1) x3 = x2 + 1 x4 = x2 * x3 x1_grad, = ad.gradients(x4, [x1]) executor = ad.Executor([x4]) x1_val = 1 x4_val, x1_grad = executor.run(feed_dict={x1: x1_val}) print(x4_val) print(x1_grad)
def test_lr(): W = ad.Variable(name="W") b = ad.Variable(name="b") X = ad.Variable(name="X") y_ = ad.Variable(name="y_") ctx = ndarray.gpu(0) # ini x_val = np.linspace(0, 1, 100).reshape((100, 1)) y_val = x_val + 0.5 W_val = np.array([[0.1]]) b_val = np.array([0.1]) x_val = ndarray.array(x_val, ctx) W_val = ndarray.array(W_val, ctx) b_val = ndarray.array(b_val, ctx) y_val = ndarray.array(y_val, ctx) z = ad.matmul_op(X, W) # z.shape = (100,1) # b.shape = (1,1) y = z + ad.broadcastto_op(b, z) # y = (100,1) y = ad.fullyactivation_forward_op(y, "NCHW", "relu") loss = ad.matmul_op(y + (-1) * y_, y + (-1) * y_, trans_A=True) * (1 / 100) # loss = ad.softmaxcrossentropy_op(y, y_) grad_W, grad_b = ad.gradients(loss, [W, b]) executor = ad.Executor([loss, grad_W, grad_b], ctx) aph = 1e-6 for i in range(100): loss_val, grad_W_val, grad_b_val = executor.run(feed_dict={ X: x_val, b: b_val, W: W_val, y_: y_val }) grad_W_val = grad_W_val.asnumpy() W_val = W_val.asnumpy() W_val = W_val - aph * grad_W_val W_val = ndarray.array(W_val, ctx) grad_b_val = grad_b_val.asnumpy() b_val = b_val.asnumpy() b_val = b_val - aph * grad_b_val b_val = ndarray.array(b_val, ctx) print(W_val.asnumpy(), b_val.asnumpy())
def test_l1_l2_regular(): inputs = ad.Variable("inputs") filters = ad.Variable("filters") y_ = ad.Variable(name="y_") # ini ctx = ndarray.gpu(0) x_val = np.ones((5, 2)) * 0.5 x_val = ndarray.array(x_val, ctx) loss = ad.l2regular_op(inputs) # loss = ad.l1regular_op(inputs) grad_f = ad.gradients(loss, [inputs]) # gra返回一个list executor = ad.Executor([loss, grad_f[0]], ctx=ctx) g_val = executor.run(feed_dict={inputs: x_val}) # 返回一个list print("g_val:", g_val[0].asnumpy()) print("g_val:", g_val[1].asnumpy())
def test_reduce_mean(): inputs = ad.Variable("inputs") ctx = ndarray.gpu(0) shape = (2, 2, 3) x = np.random.uniform(0, 20, shape).astype(np.float32) arr_x = ndarray.array(x, ctx=ctx) outputs = ad.reduce_mean_op(inputs, 1) f_out = ad.pow_op(outputs, 2) grad_out = ad.gradients(f_out, [inputs]) executor = ad.Executor([outputs, f_out, grad_out[0]], ctx=ctx) result = executor.run(feed_dict={inputs: arr_x}) print(arr_x.asnumpy()) print(result[0].asnumpy()) print(result[1].asnumpy()) print(result[2].asnumpy())
def test_mul_two_vars(): x2 = ad.Variable(name="x2") x3 = ad.Variable(name="x3") y = x2 * x3 grad_x2, grad_x3 = ad.gradients(y, [x2, x3]) executor = ad.Executor([y, grad_x2, grad_x3]) x2_val = 2 * np.ones(3) x3_val = 3 * np.ones(3) y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict={ x2: x2_val, x3: x3_val }) assert isinstance(y, ad.Node) assert np.array_equal(y_val, x2_val * x3_val) assert np.array_equal(grad_x2_val, x3_val) assert np.array_equal(grad_x3_val, x2_val)
def test_l1_l2_cross_loss(): inputs = ad.Variable("inputs") filters = ad.Variable("filters") y_ = ad.Variable(name="y_") # ini ctx = ndarray.gpu(0) x_val = np.ones((5, 2)) * 0.5 filters_val = np.ones((2, 2, 10)) * 0.001 y_val = np.ones((5, 2)) x_val = ndarray.array(x_val, ctx) filters_val = ndarray.array(filters_val, ctx) y_val = ndarray.array(y_val, ctx) # loss = ad.crossEntropy_op(inputs, y_) loss = ad.l1loss_op(inputs, y_) grad_f = ad.gradients(loss, [inputs, y_]) # gra返回一个list executor = ad.Executor([loss, grad_f[0], grad_f[1]], ctx=ctx) g_val = executor.run(feed_dict={inputs: x_val, y_: y_val}) # 返回一个list print("g_val:", g_val[0].asnumpy()) print("g_val:", g_val[1].asnumpy()) print("g_val:", g_val[2].asnumpy())
def test_convolution_3d_forward_op(): inputs = ad.Variable("inputs") filters = ad.Variable("filters") y_ = ad.Variable(name="y_") #ini ctx = ndarray.gpu(0) x_val = np.linspace(0, 100, 135).reshape((5, 1, 3, 3, 3)) filters_val = np.ones((1, 1, 2, 2, 2)) * 0.001 y_val = np.zeros((5, 1)) x_val = ndarray.array(x_val, ctx) filters_val = ndarray.array(filters_val, ctx) y_val = ndarray.array(y_val, ctx) outputs = ad.convolution_3d_forward_op(inputs, filters, "NCHW", "VALID", 1, 1, 1) outputs_pool = ad.pooling_3d_forward_op(outputs, "NCHW", "max", 0, 0, 0, 1, 1, 1, 2, 2, 2) outputs_relu = ad.activation_forward_op(outputs_pool, "NCHW", "relu") outputs_dro = ad.dropout_forward_op(outputs_relu, "NCHW", 0.5, 0) outputs_f = ad.flatten_op(outputs_dro) loss = ad.matmul_op(outputs_f, outputs_f, trans_A=True) * (1 / 5) grad_inputs, grad_f = ad.gradients(loss, [inputs, filters]) executor = ad.Executor([loss, grad_f], ctx=ctx) aph = 1.0e-6 for i in range(20): loss_val, filters_grad_val = executor.run(feed_dict={ inputs: x_val, filters: filters_val }) filters_val = filters_val.asnumpy() filters_grad_val = filters_grad_val.asnumpy() filters_val = filters_val - aph * filters_grad_val filters_val = ndarray.array(filters_val, ctx) print("loss_val:", loss_val.asnumpy()) print("filters_val:", filters_val.asnumpy())
def test_exp_log_reverse_pow(): inputs = ad.Variable("inputs") filters = ad.Variable("filters") y_ = ad.Variable(name="y_") # ini ctx = ndarray.gpu(0) x_val = np.linspace(0, 100, 80).reshape((5, 1, 4, 4)) filters_val = np.ones((1, 1, 3, 3)) * 0.001 y_val = np.zeros((5, 1)) x_val = ndarray.array(x_val, ctx) filters_val = ndarray.array(filters_val, ctx) y_val = ndarray.array(y_val, ctx) #outputs = ad.exp_op(inputs) #outputs = ad.log_op(inputs) #outputs = ad.reverse_op(inputs) outputs = ad.pow_op(inputs, 2) grad_out = ad.gradients(outputs, [inputs]) executor = ad.Executor([outputs, grad_out[0]], ctx=ctx) result = executor.run(feed_dict={inputs: filters_val}) print(result[0].asnumpy()) print(result[1].asnumpy())
def mnist_mlp(executor_ctx=None, num_epochs=10, print_loss_val_each_epoch=False): # 训练一个三层感知机模型 print("Build 3-layer MLP model...") W1 = ad.Variable(name="W1") W2 = ad.Variable(name="W2") W3 = ad.Variable(name="W3") b1 = ad.Variable(name="b1") b2 = ad.Variable(name="b2") b3 = ad.Variable(name="b3") X = ad.Variable(name="X") y_ = ad.Variable(name="y_") # 下面是三层网络的激活函数,两个relu和一个softmax # relu(X W1+b1) z1 = ad.matmul_op(X, W1) z2 = z1 + ad.broadcastto_op(b1, z1) z3 = ad.relu_op(z2) # relu(z3 W2+b2) z4 = ad.matmul_op(z3, W2) z5 = z4 + ad.broadcastto_op(b2, z4) z6 = ad.relu_op(z5) # softmax(z5 W2+b2) z7 = ad.matmul_op(z6, W3) y = z7 + ad.broadcastto_op(b3, z7) loss = ad.softmaxcrossentropy_op(y, y_) grad_W1, grad_W2, grad_W3, grad_b1, grad_b2, grad_b3 = ad.gradients( loss, [W1, W2, W3, b1, b2, b3]) # 此处向前为符号定义 # 只声明,不操作 executor = ad.Executor( [loss, grad_W1, grad_W2, grad_W3, grad_b1, grad_b2, grad_b3, y], ctx=executor_ctx) # Read input data datasets = load_mnist_data("mnist.pkl.gz") train_set_x, train_set_y = datasets[0] valid_set_x, valid_set_y = datasets[1] test_set_x, test_set_y = datasets[2] # Set up minibatch batch_size = 1000 n_train_batches = train_set_x.shape[0] // batch_size n_valid_batches = valid_set_x.shape[0] // batch_size print("Start training loop...") # Initialize parameters # 随机初始化网络中的w和b rand = np.random.RandomState(seed=123) W1_val = rand.normal(scale=0.1, size=(784, 256)) W2_val = rand.normal(scale=0.1, size=(256, 100)) W3_val = rand.normal(scale=0.1, size=(100, 10)) b1_val = rand.normal(scale=0.1, size=(256)) b2_val = rand.normal(scale=0.1, size=(100)) b3_val = rand.normal(scale=0.1, size=(10)) X_val = np.empty(shape=(batch_size, 784), dtype=np.float32) y_val = np.empty(shape=(batch_size, 10), dtype=np.float32) valid_X_val = np.empty(shape=(batch_size, 784), dtype=np.float32) valid_y_val = np.empty(shape=(batch_size, 10), dtype=np.float32) # todo 此处修改为cpu W1_val = ndarray.array(W1_val, ctx=executor_ctx_cpu) W2_val = ndarray.array(W2_val, ctx=executor_ctx_cpu) W3_val = ndarray.array(W3_val, ctx=executor_ctx_cpu) b1_val = ndarray.array(b1_val, ctx=executor_ctx_cpu) b2_val = ndarray.array(b2_val, ctx=executor_ctx_cpu) b3_val = ndarray.array(b3_val, ctx=executor_ctx_cpu) X_val = ndarray.array(X_val, ctx=executor_ctx_cpu) y_val = ndarray.array(y_val, ctx=executor_ctx_cpu) # 此处以上将数据分别转化为cpu和gpu两种格式 lr = 1.0e-3 for i in range(num_epochs): print("epoch %d" % i) for minibatch_index in range(n_train_batches): minibatch_start = minibatch_index * batch_size minibatch_end = (minibatch_index + 1) * batch_size X_val[:] = train_set_x[minibatch_start:minibatch_end] y_val[:] = convert_to_one_hot( train_set_y[minibatch_start:minibatch_end]) # 计算单步的梯度 loss_val, grad_W1_val, grad_W2_val, grad_W3_val, \ grad_b1_val, grad_b2_val, grad_b3_val, _ = executor.run( feed_dict={ X: X_val, y_: y_val, W1: W1_val, W2: W2_val, W3: W3_val, b1: b1_val, b2: b2_val, b3: b3_val}) # todo 更新sgd_update_gpu_on_cpu def sgd_update_cpu(w1, w2, w3): w1_gpu = ndarray.empty(w1.shape, executor_ctx) w1.copyto(w1_gpu) w2_gpu = ndarray.empty(w2.shape, executor_ctx) w2.copyto(w2_gpu) sgd_update_gpu(w1_gpu, w2_gpu, w3) w1_gpu.copyto(w1) w2_gpu.copyto(w2) sgd_update_cpu(W1_val, grad_W1_val, lr) sgd_update_cpu(W2_val, grad_W2_val, lr) sgd_update_cpu(W3_val, grad_W3_val, lr) sgd_update_cpu(b1_val, grad_b1_val, lr) sgd_update_cpu(b2_val, grad_b2_val, lr) sgd_update_cpu(b3_val, grad_b3_val, lr) # sgd_update_gpu(W1_val, grad_W1_val, lr) # sgd_update_gpu(W2_val, grad_W2_val, lr) # sgd_update_gpu(W3_val, grad_W3_val, lr) # sgd_update_gpu(b1_val, grad_b1_val, lr) # sgd_update_gpu(b2_val, grad_b2_val, lr) # sgd_update_gpu(b3_val, grad_b3_val, lr) if print_loss_val_each_epoch: if isinstance(loss_val, ndarray.NDArray): print(loss_val.asnumpy()) else: print(loss_val) correct_predictions = [] for minibatch_index in range(n_valid_batches): minibatch_start = minibatch_index * batch_size minibatch_end = (minibatch_index + 1) * batch_size valid_X_val[:] = valid_set_x[minibatch_start:minibatch_end] valid_y_val[:] = convert_to_one_hot( valid_set_y[minibatch_start:minibatch_end]) _, _, _, _, _, _, _, valid_y_predicted = executor.run( feed_dict={ X: valid_X_val, y_: valid_y_val, W1: W1_val, W2: W2_val, W3: W3_val, b1: b1_val, b2: b2_val, b3: b3_val}, convert_to_numpy_ret_vals=True) correct_prediction = np.equal( np.argmax(valid_y_val, 1), np.argmax(valid_y_predicted, 1)).astype(np.float) correct_predictions.extend(correct_prediction) accuracy = np.mean(correct_predictions) # validation set accuracy=0.970800 print("validation set accuracy=%f" % accuracy)
def mnist_logreg(executor_ctx=None, num_epochs=10, print_loss_val_each_epoch=False): # 训练逻辑回归模型 print("Build logistic regression model...") W1 = ad.Variable(name="W1") b1 = ad.Variable(name="b1") X = ad.Variable(name="X") y_ = ad.Variable(name="y_") z1 = ad.matmul_op(X, W1) y = z1 + ad.broadcastto_op(b1, z1) loss = ad.softmaxcrossentropy_op(y, y_) grad_W1, grad_b1 = ad.gradients(loss, [W1, b1]) executor = ad.Executor([loss, grad_W1, grad_b1, y], ctx=executor_ctx) # Read input data datasets = load_mnist_data("mnist.pkl.gz") train_set_x, train_set_y = datasets[0] valid_set_x, valid_set_y = datasets[1] test_set_x, test_set_y = datasets[2] # Set up minibatch batch_size = 1000 n_train_batches = train_set_x.shape[0] // batch_size n_valid_batches = valid_set_x.shape[0] // batch_size print("Start training loop...") # Initialize parameters W1_val = np.zeros((784, 10)) b1_val = np.zeros((10)) X_val = np.empty(shape=(batch_size, 784), dtype=np.float32) y_val = np.empty(shape=(batch_size, 10), dtype=np.float32) valid_X_val = np.empty(shape=(batch_size, 784), dtype=np.float32) valid_y_val = np.empty(shape=(batch_size, 10), dtype=np.float32) if ndarray.is_gpu_ctx(executor_ctx): W1_val = ndarray.array(W1_val, ctx=executor_ctx) b1_val = ndarray.array(b1_val, ctx=executor_ctx) X_val = ndarray.array(X_val, ctx=executor_ctx) y_val = ndarray.array(y_val, ctx=executor_ctx) lr = 1e-3 for i in range(num_epochs): print("epoch %d" % i) for minibatch_index in range(n_train_batches): minibatch_start = minibatch_index * batch_size minibatch_end = (minibatch_index + 1) * batch_size X_val[:] = train_set_x[minibatch_start:minibatch_end] y_val[:] = convert_to_one_hot( train_set_y[minibatch_start:minibatch_end]) loss_val, grad_W1_val, grad_b1_val, _ = executor.run( feed_dict = {X: X_val, y_: y_val, W1: W1_val, b1: b1_val}) # SGD update if (executor_ctx is None): W1_val = W1_val - lr * grad_W1_val b1_val = b1_val - lr * grad_b1_val else: sgd_update_gpu(W1_val, grad_W1_val, lr) sgd_update_gpu(b1_val, grad_b1_val, lr) if print_loss_val_each_epoch: if isinstance(loss_val, ndarray.NDArray): print(loss_val.asnumpy()) else: print(loss_val) correct_predictions = [] for minibatch_index in range(n_valid_batches): minibatch_start = minibatch_index * batch_size minibatch_end = (minibatch_index + 1) * batch_size valid_X_val[:] = valid_set_x[minibatch_start:minibatch_end] valid_y_val[:] = convert_to_one_hot( valid_set_y[minibatch_start:minibatch_end]) _, _, _, valid_y_predicted = executor.run( feed_dict={ X: valid_X_val, y_: valid_y_val, W1: W1_val, b1: b1_val}, convert_to_numpy_ret_vals=True) correct_prediction = np.equal( np.argmax(valid_y_val, 1), np.argmax(valid_y_predicted, 1)).astype(np.float) correct_predictions.extend(correct_prediction) accuracy = np.mean(correct_predictions) # validation set accuracy=0.928200 print("validation set accuracy=%f" % accuracy)
def vgg16(): n = 10 n_class = 10 inputs = ad.Variable("inputs") filters1_1 = ad.Variable("filters1_1") filters1_2 = ad.Variable("filters1_2") filters2_1 = ad.Variable("filters2_1") filters2_2 = ad.Variable("filters2_2") filters3_1 = ad.Variable("filters3_1") filters3_2 = ad.Variable("filters3_2") filters3_3 = ad.Variable("filters3_3") filters4_1 = ad.Variable("filters4_1") filters4_2 = ad.Variable("filters4_2") filters4_3 = ad.Variable("filters4_3") filters5_1 = ad.Variable("filters5_1") filters5_2 = ad.Variable("filters5_2") filters5_3 = ad.Variable("filters5_3") filters6 = ad.Variable("filters6") filters7 = ad.Variable("filters7") filters8 = ad.Variable("filters8") biases6 = ad.Variable("biases6") biases7 = ad.Variable("biases7") biases8 = ad.Variable("biases8") y_ = ad.Variable(name="y_") x_val = np.linspace(0, 0.001, 10*3*224*224).reshape((10, 3, 224, 224)) filters_val = [np.ones((64, 3, 3, 3))*0.001] filters_val.append(np.ones((64, 64, 3, 3))*0.001) filters_val.append(np.ones((128, 64, 3, 3))*0.001) filters_val.append(np.ones((128, 128, 3, 3))*0.001) filters_val.append(np.ones((256, 128, 3, 3))*0.001) filters_val.append(np.ones((256, 256, 3, 3))*0.001) filters_val.append(np.ones((256, 256, 3, 3))*0.001) filters_val.append(np.ones((512, 256, 3, 3))*0.001) filters_val.append(np.ones((512, 512, 3, 3))*0.001) filters_val.append(np.ones((512, 512, 3, 3))*0.001) filters_val.append(np.ones((512, 512, 3, 3))*0.001) filters_val.append(np.ones((512, 512, 3, 3))*0.001) filters_val.append(np.ones((512, 512, 3, 3))*0.001) filters_val.append(np.ones((512*7*7, 4096)) * 0.001) filters_val.append(np.ones((4096, 4096)) * 0.001) filters_val.append(np.ones((4096, n_class)) * 0.001) biases_val = [np.ones((1, 4096))* 0.001] biases_val.append(np.ones((1, 4096)) * 0.001) biases_val.append(np.ones((1, n_class)) * 0.001) y_val = np.zeros((10, n_class)) ctx = ndarray.gpu(0) for i in range(16): filters_val[i] = ndarray.array(filters_val[i], ctx) # conv 1 conv1_1 = ad.convolution_2d_forward_op(inputs, filters1_1, "NCHW", "SAME", 1, 1) bn1_1 = ad.bn_forward_op(conv1_1, "NCHW", "pre_activation") act1_1 = ad.activation_forward_op(bn1_1, "NCHW", "relu") conv1_2 = ad.convolution_2d_forward_op(act1_1, filters1_2, "NCHW", "SAME", 1, 1) bn1_2 = ad.bn_forward_op(conv1_2, "NCHW", "pre_activation") act1_2 = ad.activation_forward_op(bn1_2, "NCHW", "relu") pool1 = ad.pooling_2d_forward_op(act1_2, "NCHW", "max", 0, 0, 2, 2, 2, 2) # conv 2 conv2_1 = ad.convolution_2d_forward_op(pool1, filters2_1, "NCHW", "SAME", 1, 1) bn2_1 = ad.bn_forward_op(conv2_1, "NCHW", "pre_activation") act2_1 = ad.activation_forward_op(bn2_1, "NCHW", "relu") conv2_2 = ad.convolution_2d_forward_op(act2_1, filters2_2, "NCHW", "SAME", 1, 1) bn2_2 = ad.bn_forward_op(conv2_2, "NCHW", "pre_activation") act2_2 = ad.activation_forward_op(bn2_2, "NCHW", "relu") pool2 = ad.pooling_2d_forward_op(act2_2, "NCHW", "max", 0, 0, 2, 2, 2, 2) # conv 3 conv3_1 = ad.convolution_2d_forward_op(pool2, filters3_1, "NCHW", "SAME", 1, 1) bn3_1 = ad.bn_forward_op(conv3_1, "NCHW", "pre_activation") act3_1 = ad.activation_forward_op(bn3_1, "NCHW", "relu") conv3_2 = ad.convolution_2d_forward_op(act3_1, filters3_2, "NCHW", "SAME", 1, 1) bn3_2 = ad.bn_forward_op(conv3_2, "NCHW", "pre_activation") act3_2 = ad.activation_forward_op(bn3_2, "NCHW", "relu") conv3_3 = ad.convolution_2d_forward_op(act3_2, filters3_3, "NCHW", "SAME", 1, 1) bn3_3 = ad.bn_forward_op(conv3_3, "NCHW", "pre_activation") act3_3 = ad.activation_forward_op(bn3_3, "NCHW", "relu") pool3 = ad.pooling_2d_forward_op(act3_3, "NCHW", "max", 0, 0, 2, 2, 2, 2) # conv 4 conv4_1 = ad.convolution_2d_forward_op(pool3, filters4_1, "NCHW", "SAME", 1, 1) bn4_1 = ad.bn_forward_op(conv4_1, "NCHW", "pre_activation") act4_1 = ad.activation_forward_op(bn4_1, "NCHW", "relu") conv4_2 = ad.convolution_2d_forward_op(act4_1, filters4_2, "NCHW", "SAME", 1, 1) bn4_2 = ad.bn_forward_op(conv4_2, "NCHW", "pre_activation") act4_2 = ad.activation_forward_op(bn4_2, "NCHW", "relu") conv4_3 = ad.convolution_2d_forward_op(act4_2, filters4_3, "NCHW", "SAME", 1, 1) bn4_3 = ad.bn_forward_op(conv4_3, "NCHW", "pre_activation") act4_3 = ad.activation_forward_op(bn4_3, "NCHW", "relu") pool4 = ad.pooling_2d_forward_op(act4_3, "NCHW", "max", 0, 0, 2, 2, 2, 2) # conv 5 conv5_1 = ad.convolution_2d_forward_op(pool4, filters5_1, "NCHW", "SAME", 1, 1) bn5_1 = ad.bn_forward_op(conv5_1, "NCHW", "pre_activation") act5_1 = ad.activation_forward_op(bn5_1, "NCHW", "relu") conv5_2 = ad.convolution_2d_forward_op(act5_1, filters5_2, "NCHW", "SAME", 1, 1) bn5_2 = ad.bn_forward_op(conv5_2, "NCHW", "pre_activation") act5_2 = ad.activation_forward_op(bn5_2, "NCHW", "relu") conv5_3 = ad.convolution_2d_forward_op(act5_2, filters5_3, "NCHW", "SAME", 1, 1) bn5_3 = ad.bn_forward_op(conv5_3, "NCHW", "pre_activation") act5_3 = ad.activation_forward_op(bn5_3, "NCHW", "relu") pool5 = ad.pooling_2d_forward_op(act5_3, "NCHW", "max", 0, 0, 2, 2, 2, 2) # fc6 pool5_flat = ad.flatten_op(pool5) mul6 = ad.matmul_op(pool5_flat, filters6) add6 = ad.add_op(mul6, biases6) bn6 = ad.fullybn_forward_op(add6, "NCHW") fc6 = ad.fullyactivation_forward_op(bn6, "NCHW", "relu") drop6 = ad.fullydropout_forward_op(fc6, "NCHW", 0.5) # fc7 mul7 = ad.matmul_op(drop6, filters7) add7 = ad.add_op(mul7, biases7) bn7 = ad.fullybn_forward_op(add7, "NCHW") fc7 = ad.fullyactivation_forward_op(bn7, "NCHW", "relu") drop7 = ad.fullydropout_forward_op(fc7, "NCHW", 0.5) #fc8 mul8 = ad.matmul_op(drop7, filters8) add8 = ad.add_op(mul8, biases8) fc8 = ad.fullyactivation_forward_op(add8, "NCHW", "softmax") loss = ad.l2loss_op(fc8, y_) grad = ad.gradients(loss, [filters1_1, filters1_2, filters2_1, filters2_2, filters3_1, filters3_2, filters3_3 , filters4_1, filters4_2, filters4_3, filters5_1, filters5_2, filters5_3 , filters6, filters7]) executor = ad.Executor([grad[0], grad[1], grad[2], grad[3], grad[4], grad[5], grad[6], grad[7], grad[8], grad[9] , grad[10], grad[11], grad[12], grad[13], grad[14], loss, y_], ctx=ctx) aph = 1.0e-6 for i in range(20): select = random.randint(0, n-1) tmp_x_val = x_val[select] tmp_x_val = np.expand_dims(tmp_x_val, 0) tmp_y_val = y_val[select] tmp_y_val = np.expand_dims(tmp_y_val, 0) grad_val = executor.run( feed_dict={inputs: tmp_x_val, y_: tmp_y_val , filters1_1: filters_val[0], filters1_2: filters_val[1], filters2_1: filters_val[2], filters2_2: filters_val[3] , filters3_1: filters_val[4], filters3_2: filters_val[5], filters3_3: filters_val[6] , filters4_1: filters_val[7], filters4_2: filters_val[8], filters4_3: filters_val[9] , filters5_1: filters_val[10], filters5_2: filters_val[11], filters5_3: filters_val[12] , filters6: filters_val[13], filters7: filters_val[14], filters8: filters_val[15] , biases6: biases_val[0], biases7: biases_val[1], biases8: biases_val[2]}) for i in range(14): sgd_update_gpu(filters_val[i], grad_val[i], aph) print(filters_val[0].asnumpy()) return filters_val