def off_partial_backward(node: Node, X_batch, Y_batch=None): db = np.mean(np.abs(X_batch - Y_batch)) b = node.get_buffer("b") b += db
def inv_initialize(node: Node, x=None, **kwargs): if x is not None: node.set_input_dim(x.shape[1]) node.set_output_dim(x.shape[1])
def plus_forward(node: Node, x: np.ndarray): return x + node.c + node.h + node.state()
def fb_initialize(node: Node, x=None, **kwargs): node.set_input_dim(x.shape[1]) node.set_output_dim(x.shape[1])
def fb_initialize_fb(node: Node, fb=None): node.set_feedback_dim(fb.shape[1])
def minus_initialize(node: Node, x=None, **kwargs): node.set_input_dim(x.shape[1]) node.set_output_dim(x.shape[1]) node.set_param("c", 1)
def fb_forward(node: Node, x): return node.feedback() + x + 1
def on_initialize(node: Node, x=None, y=None): if x is not None: node.set_input_dim(x.shape[1]) node.set_output_dim(x.shape[1])
def minus_forward(node: Node, x): return x - node.c - node.h - node.state()
def unsupervised_initialize_buffers(node: Node): node.create_buffer("b", (1, ))
def on_train(node: Node, x, y=None): if y is not None: node.set_param("b", node.b + np.mean(x + y)) else: node.set_param("b", node.b + np.mean(x))
def unsupervised_backward(node: Node, X=None, Y=None): b = node.get_buffer("b") node.set_param("b", np.array(b).copy())
def unsupervised_partial_backward(node: Node, X_batch, Y_batch=None): b = np.mean(X_batch) node.set_buffer("b", node.get_buffer("b") + b)
def sum_initialize(node: Node, x=None, **kwargs): if x is not None: if isinstance(x, list): x = np.concatenate(x, axis=0) node.set_input_dim(x.shape[1]) node.set_output_dim(x.shape[1])
def off2_initialize_buffers(node: Node): node.create_buffer("b", (1, ))
def off_backward_basic(node: Node, X=None, Y=None): b = np.mean(node._X) node.set_param("b", np.array(b).copy())