def evaluate(self, flag, x, y): '''Compuate the averaged error. Returns: a float value as the averaged error ''' return tensor.sum(tensor.square(x - y) * 0.5) / x.size()
if activation_function is None: outputs = Wx_plus_b else: outputs = activation_function(Wx_plus_b) return outputs def add_hide_layer(inputs, in_size, out_size, activation_function=None): Weights = tensor.Variable(np.zeros([in_size, out_size])) Wx = Weights * inputs if activation_function is None: outputs = Wx else: outputs = activation_function(Wx) return outputs l1 = add_input_layer(x, 1, 10, activation_function=tensor.relu) # l1 = add_input_layer(x, 1, 10) # l1.derivative() # add output layer y = add_hide_layer(l1, 10, 1, activation_function=None) loss = tensor.sum((y - tf_y)**2) train = tensor.minimize(loss) for step in range(100): print('loss = ', tensor.run(train, {tf_x: x, tf_y: y}))