def layering(self, activator=tfe.Activator.ReLU.value): self.activator = activator self.affine0 = tfl.Affine(self.params['W0'], self.input_node, self.params['b0'], name="A0", graph=self) self.activation0 = activator(self.affine0, name="O0", graph=self) self.affine1 = tfl.Affine(self.params['W1'], self.activation0, self.params['b1'], name="A1", graph=self) self.output = activator(self.affine1, name="O1", graph=self) self.error = tfl.SquaredError(self.output, self.target_node, name="SE", graph=self) self.affine0 = tfl.Affine(self.params['W0'], self.input_node, self.params['b0'], name="A0", graph=self) self.activation0 = activator(self.affine0, name="O0", graph=self) self.affine1 = tfl.Affine(self.params['W1'], self.activation0, self.params['b1'], name="A1", graph=self) self.output = activator(self.affine1, name="O1", graph=self) self.error = tfl.SquaredError(self.output, self.target_node, name="SE", graph=self)
def layering(self, activator=tfe.Activator.ReLU.value): self.activator = activator self.affine = tfl.Affine(self.params['W0'], self.input_node, self.params['b0'], name="A", graph=self) self.output = activator(self.affine, name="O", graph=self) self.error = tfl.SquaredError(self.output, self.target_node, name="SE", graph=self)
def layering(self, activator=tfe.Activator.ReLU.value): self.activator = activator u = tfl.Affine(self.params['W0'], self.input_node, self.params['b0'], name="A") self.output = activator(u, name="O") self.error = tfl.SquaredError(self.output, self.target_node, name="SE") if isinstance(self, nx.Graph): self.add_edge(self.params['W0'], u) self.add_edge(self.input_node, u) self.add_edge(self.params['b0'], u) self.add_edge(u, self.output) self.add_edge(self.output, self.error) self.add_edge(self.error, self.target_node)
def layering(self, activator=tfe.Activator.ReLU.value): self.activator = activator for idx in range(self.hidden_layer_num): self.layers['affine' + str(idx)] = tfl.Affine( self.params['W' + str(idx)], self.input_node, self.params['b' + str(idx)], name='affine' + str(idx), graph=self ) self.layers['activation' + str(idx)] = activator(self.layers['affine' + str(idx)], name='activation' + str(idx), graph=self) idx = self.hidden_layer_num self.layers['affine' + str(idx)] = tfl.Affine( self.params['W' + str(idx)], self.input_node, self.params['b' + str(idx)], name='affine' + str(idx), graph=self ) self.output = activator(self.layers['affine' + str(idx)], name='output', graph=self) #self.last_layer = SoftmaxWithCrossEntropyLoss() self.error = tfl.SquaredError(self.output, self.target_node, name="SE", graph=self)
def layering(self, activator=tfe.Activator.ReLU.value): self.activator = activator u0 = tfl.Affine(self.params['W0'], self.input_node, self.params['b0'], name="A0") o0 = activator(u0, name="O0") u1 = tfl.Affine(self.params['W1'], o0, self.params['b1'], name="A1") self.output = activator(u1, name="O1") self.error = tfl.SquaredError(self.output, self.target_node, name="SE") if isinstance(self, nx.Graph): self.add_edge(self.params['W0'], u0) self.add_edge(self.input_node, u0) self.add_edge(self.params['b0'], u0) self.add_edge(u0, o0) self.add_edge(self.params['W1'], u1) self.add_edge(o0, u1) self.add_edge(self.params['b1'], u1) self.add_edge(u1, self.output) self.add_edge(self.output, self.error) self.add_edge(self.error, self.target_node)