예제 #1
0
    def __init__(self, numneurons_list, activationType_allLayers, input_size):
        """
        Main constructor for NN.

        Parameters:
        - numneurons_list: list[int]
            List of integers where each integer specifies the number of neurons at that layer
        - activationType_allLayer: ActivationType
            ActivationType to be applied to ALL hidden layers
        - input_size:
            Input size of the network itself
        """
        self.batchnorm = False
        self.drop_out = False
        self.optimizer = ''
        self.learning_rate = 0.0
        self.epochs = 0
        self.drop_percent = 0.2
        lastindex = len(numneurons_list) - 1
        layers = []
        for idx, num_neuron in enumerate(numneurons_list):
            layer = None
            if idx == lastindex:
                layer = Layer(input_size, num_neuron, ActivationType.SOFTMAX,
                              True)
            else:
                layer = Layer(input_size, num_neuron, activationType_allLayers,
                              False)
            input_size = num_neuron
            layers.append(layer)
        self.layer_list = layers
예제 #2
0
def generateDoublet(centroid, width, moduleType="tripletPixel", color=6):

    radius1 = centroid - width / 2.0
    radius2 = centroid + width / 2.0
    l1 = Layer(radius1, color, moduleType, layerNumber=1)
    l2 = Layer(radius2, color, moduleType, layerNumber=2)
    return [l1, l2]
예제 #3
0
 def __init__(self, num_inputs, num_hidden_neurons, num_outputs):
     hidden_layer = Layer("Hidden Layer", num_inputs,num_hidden_neurons)
     output_layer = Layer("Output Layer", num_hidden_neurons, num_outputs)
     self.layers = [hidden_layer, output_layer]
     self.learning_rate = 0
     self.alpha = 0
     return
예제 #4
0
    def __init__(self, n_in, n_out, hidStruct, hidAct, input=None, pDrop=0.0):

        self.n_in = n_in  # number of input dimensions
        self.n_out = n_out  # number of output labels
        self.hidStruct = hidStruct  # a list describe the nodes of each hidden layer
        self.hidAct = hidAct
        self.pDrop = pDrop

        self.struct = [self.n_in] + self.hidStruct + [self.n_out
                                                      ]  # nodes of all layers

        #####################################
        # construct multi-layer NNet
        #####################################
        self.layers = []
        for i in range(len(self.struct) - 2):
            self.layers.append(
                Layer(name=['layer ', str(i + 1)],
                      n_in=self.struct[i],
                      n_out=self.struct[i + 1],
                      pDrop=pDrop,
                      aFnt_No=self.hidAct[i]))
        self.layers.append(
            Layer(name='output layer',
                  n_in=self.struct[-2],
                  n_out=self.struct[-1],
                  aFnt_No=-1))

        self.params = []
        for layer in self.layers:
            self.params += layer.params
예제 #5
0
def rollout(model_, i):
    global b
    global R_sum
    featureLayers = [Layer(l) for l in model_.features._modules.values()]
    featureLayers = list(filter(lambda x: x.type in [1, 2, 8], featureLayers))
    classifierLayers = [Layer(l) for l in model_.classifier._modules.values()]
    classifierLayers = list(filter(lambda x: x.type in [5], classifierLayers))
    actions = rolloutActions(featureLayers, classifierLayers)
    newModel = build_child_model(featureLayers, classifierLayers, [a.data.numpy()[0] for a in actions])
    if newModel is None:
        R = -1
    else:
        print(newModel)
        acc = train(dataset, newModel)
        R = Reward(acc, numParams(newModel), baseline_acc, parentSize)
        rewardsPerModel[i] = R
        accsPerModel[i] = acc
        paramsPerModel[i] = numParams(newModel)
        #torch.save(newModel, modelSavePath + '%f.net' % i)
        #print('Val accuracy: %f' % acc)
        print('Compression: %f' % (1.0 - (float(numParams(newModel))/parentSize)))
        print('Reward achieved %f' % R)
        print('Reward after baseline %f' % (R-b))
        # Update reward and baseline after each rollout
    return (R, actions, newModel)
예제 #6
0
 def __init__(self, learning_rate, ml_lambda, input_data, targets, max_accepted_error, output_layer_size,
              layer_size_arr):
     from Layer import Layer
     from TestNetwork import TestNetwork
     self.layers = []
     for x in range(len(layer_size_arr)):
         # num_neurons, previous_layer, inputs_per_neuron, outputs_per_neuron, learning_rate, ml_lambda, layer_type
         if x == 0:
             self.layers.append(Layer(layer_size_arr[x], 0, 1, layer_size_arr[x+1], learning_rate,
                                      ml_lambda, "input"))
         elif 0 < x < len(layer_size_arr)-1:
             self.layers.append(Layer(layer_size_arr[x], self.layers[x-1], 0, layer_size_arr[x+1], learning_rate,
                                      ml_lambda, "hidden"))
         else:
             self.layers.append(Layer(layer_size_arr[x], self.layers[x-1], 0, output_layer_size, learning_rate,
                                      ml_lambda, "output"))
     self.learning_rate = learning_rate
     self.ml_lambda = ml_lambda
     self.input_data = input_data
     self.targets = targets
     self.max_accepted_error = max_accepted_error
     self.correct_guesses = 0
     self.count = 0
     self.x_sample = self.get_x_sample(141)  # 141 size of train sample, total of 213 samples in JAFFE data set
     self.y_sample = self.get_y_sample(141)
     self.x_test_sample = self.get_test_x_sample(72)  # size of test sample
     self.y_test_sample = self.get_test_y_sample(72)
     self.test_percentage = [0]
     self.sample_correct_guesses = 0
     self.sample_count = len(targets)
     self.test_network = TestNetwork(self.layers, self.x_test_sample, self.y_test_sample, self.max_accepted_error)
예제 #7
0
def main():

    Log.i('Sphinx is starting....')
    input_layer = Layer(neurons=[Neuron(), Neuron(),
                                 Neuron()],
                        layer_type=LayerType.INPUT)

    hidden_layer = Layer(
        [Neuron(bias=-0.4), Neuron(bias=0.2)],
        [Synapse([0.2, 0.4, -0.5]),
         Synapse([-0.3, 0.1, 0.2])], LayerType.HIDDEN)

    output_layer = Layer([Neuron(bias=0.1)], [Synapse([-0.3, -0.2])],
                         LayerType.OUTPUT)

    layers = [input_layer, hidden_layer, output_layer]

    network = Network(layers, 0.1)

    # Utils.save(network)
    network.print_outputs()
    network.train([TrainingPattern([1, 0, 1], [1])], 1000)
    network.print_errors()
    pyp.plot(network.errors_history)
    pyp.show()
예제 #8
0
    def __init__(self):
        self.network = []

        # Hidden Layer
        self.network.append(Layer(3, 8))

        # Output Layer
        self.network.append(Layer(8, 3))
예제 #9
0
def generateModel(learningRate):
    layer0 = Layer(4, 0, 4, ActivationFunction.linear)
    layer1 = Layer(3, 1, 4, ActivationFunction.sigmoid)
    layer2 = Layer(3, 2, 3, ActivationFunction.sigmoid)
    layers = []
    layers.append(layer0)
    layers.append(layer1)
    layers.append(layer2)
    return MLP(layers, learningRate)
예제 #10
0
파일: ANN.py 프로젝트: Clientrace/BasicANN
def init():
    global trainingFile
    global inputLayer
    global hiddenLayer
    global outputLayer

    inputLayer = Layer("InputLayer", 2)
    hiddenLayer = Layer("HiddenLayer", 3)
    outputLayer = Layer("OutputLayer", 3)
예제 #11
0
 def add_layer(self, units):
     if self._layers.size == 0:
         new_layer = Layer(input_dim = self._input_dim, units = units)
     else:
         new_layer_dim = self._layers[self._layers.size - 1].units
         new_layer = Layer(input_dim = new_layer_dim, units = units)
     self._num_layers += 1
     self._output_dim = units
     self._layers = np.append(self._layers, new_layer)
예제 #12
0
    def run2(self):
        input, output = self.readFromFile("data.txt")
        final_loss = []
        loss_x = []
        iteration = []

        # initialize nn
        layer0Weights = []
        layer0 = Layer(15, 6)
        for neuron in layer0.neurons:
            layer0Weights.append(neuron.weights)

        layer1Weights = []
        layer1 = Layer(3, 15)
        for neuron in layer1.neurons:
            layer1Weights.append(neuron.weights)

        for j in range(0, 100):
            iteration.append(j)

            for i in range(0, len(input)):
                    input[i] = self.normalizeData(input[i])

                    layer0WeightsT = array(layer0Weights).T
                    layer1WeightsT = array(layer1Weights).T

                    d0 = dot(input[i], layer0WeightsT)
                    l1 = self.sigmoid(d0)

                    d1 = dot(l1, layer1WeightsT)
                    l2 = self.sigmoid(d1)

                    l2_error = output[i] - l2
                    l2_delta = l2_error * self.sigmoid_derivative(l2)

                    l1_error = dot(l2_delta, layer1Weights)
                    l1_delta = l1_error * self.sigmoid_derivative(l1)

                    layer1Weights += dot(l2, l2_delta)
                    layer0Weights += dot(l1, l1_delta)

                    if j % 100 == 99:
                        x = output[i] - l2_error
                        loss_x.append(sum(x**2))
                        # print("Diagnosis " + str(j + 1) + ": " + self.decode(l2_error[0], l2_error[1], l2_error[2]) + "err: [ " + str(l2_error[0]) + str(l2_error[1]) + str(l2_error[2]) + "]" + " sum= " + str(loss_x[-1]))
                        print(
                            "Diagnosis " + str(j + 1) + ": " + self.decode(l2_error[0], l2_error[1], l2_error[2]) + " err: [ " + str(
                                l2_error[0]) + " " + str(l2_error[1]) + " " + str(l2_error[2]) + "]") # + " sum= " + str(loss_x[-1]))

            final_loss.append(sum((output[i] - l2_error)**2))

        mpl.plot(iteration, final_loss, label='loss value vs iteration')
        mpl.xlabel('Iterations')
        mpl.ylabel('loss function')
        mpl.legend()
        mpl.show()
예제 #13
0
def test_triangle(n, norm_vect):
    sym = []

    sym.append((norm_vect, 0))
    for i in range(n):
        sym.append((-norm_vect, 10 * 2**(n - i - 1)))

    folding_net = NeuralNet.folding_net(sym, optimize=True)
    layers = []
    layers.append(
        Layer(2,
              2,
              weights=compute_rot(norm_vect),
              bias=nd.zeros(2),
              function=nd.identity))
    layers.append(
        Layer(1,
              2,
              weights=nd.array([[-10, 1]]),
              bias=nd.array([0]),
              function=echelon))
    compute_net = NeuralNet([2, 2, 1], layers)

    size = 2**(n + 12)
    inputs = nd.zeros((2, size))
    inputs[0] = nd.random.uniform(-2**(n + 1), 2**(n + 1), size)
    inputs[1] = nd.random.uniform(-2**(n + 1), 2**(n + 1), size)

    outputs = compute_net.compute(folding_net.compute(inputs))

    x = list(inputs[0].asnumpy())
    y = list(inputs[1].asnumpy())
    results = list(outputs.asnumpy()[0])

    #    def triangle(x, y):
    #        x = nd.abs(x)
    #        x_floor = nd.floor(x)
    #        x = nd.where(nd.modulo(x_floor, 2), 1 - x + x_floor, x - x_floor)
    #        return y - x > 0

    #    true_outputs = list(triangle(inputs[0], inputs[1]).asnumpy())

    colors = ['red', 'green']

    plt.scatter(x,
                y,
                c=results,
                cmap=matplotlib.colors.ListedColormap(colors),
                marker='.')
    plt.show()

    #    plt.scatter(x,y, c = true_outputs, cmap=matplotlib.colors.ListedColormap(colors), marker = '.')
    #    plt.show()

    return sym, folding_net, compute_net
예제 #14
0
    def load(self, file_path):
        npzfile = np.load(file_path)
        self._layers = np.array([])
        self._layers[0] = Layer(input_dim = npzfile['a1'].shape[1], units = npzfile['a1'].shape[0])
        self._layers[1] = Layer(input_dim = npzfile['a2'].shape[1], units = npzfile['a2'].shape[0])
        self._layers[0]._weights = npzfile['a1']
        self._layers[1]._weights = npzfile['a2']

        self._input_dim = self._layers[0]._weights.shape[1]
        self._output_dim = self._layers[1]._weights.shape[0]
        self._num_layers = 2
예제 #15
0
def multi_layer(X_train, X_test, y_train, y_test, verbose=False):
    batch_size = 128
    learning_rate = 1e-2
    momentum_rate = 0.25
    m = Model(Error(), learning_rate, momentum_rate, batch_size)
    m.add_layer(Layer((784, 700), ReLU(), 'fanin'))
    # m.add_layer(Layer((784,100), ReLU()))
    m.add_layer(Layer((700, 10), SoftMax(), 'fanin'))
    t_acc = (1 - m.train(X_train, y_train, verbose)) * 100
    print "Train accuracy", t_acc, "%"
    print "Test accuracy", (1 - m.test(X_test, y_test)) * 100, "%"
예제 #16
0
 def __init__(self, function, inputsCount, neuronsCountPerLayer):
     self.inputsCount = max(1, inputsCount)
     self.layersCount = len(
         neuronsCountPerLayer
     )  #neuronsCountPerLayer needs to be a list here >.>
     self.layers = [0 for k in range(self.getLayersCount())]
     for i in range(self.getLayersCount()):
         if i == 0:
             self.layers[i] = Layer(neuronsCountPerLayer[i], inputsCount,
                                    function)
         else:
             self.layers[i] = Layer(neuronsCountPerLayer[i],
                                    neuronsCountPerLayer[i - 1], function)
예제 #17
0
 def add_layer(self, states=2, activation=None, fixed_weights=False):
     if len(self.layers) == 0:
         self.layers.append(
             Layer(layer_size=states,
                   input_size=self.input_size,
                   activation=activation,
                   fixed_weights=fixed_weights))
     else:
         self.layers.append(
             Layer(layer_size=states,
                   input_size=len(self.layers[-1].states),
                   activation=activation,
                   fixed_weights=fixed_weights))
예제 #18
0
def main():
    model = Sequential()
    model.add(Layer(size = 2)) # Input layer
    model.add(Layer(size = 6,activation = 'relu')) # Hidden layer with 6 neurons
    model.add(Layer(size = 6,activation = 'relu')) # Hidden layer with 6 neurons
    model.add(Layer(size = 2,activation = 'softmax')) # Output layer
    model.compile(learning_rate = 0.1)
    # learn the XOR mapping
    X =  np.array([[1,0],[0,1],[0,0],[1,1]])
    Y =  np.array([[1,0],[1,0],[0,1],[0,1]])

    print(model.predict(X))
    model.fit(X,Y,iterations =10000)
    print(model.predict(X))
예제 #19
0
 def initialize_model(self, list_of_innovations):
     input_layer = Layer(len(self.list_of_all_layers), Layer.INPUT_LAYER,
                         self.number_of_inputs, self.list_of_all_nodes,
                         self.list_of_all_connections,
                         self.list_of_all_layers, list_of_innovations)
     self.list_of_all_layers.append(input_layer)
     self.input_layer = input_layer
     output_layer = Layer(len(self.list_of_all_layers), Layer.OUTPUT_LAYER,
                          self.number_of_outputs, self.list_of_all_nodes,
                          self.list_of_all_connections,
                          self.list_of_all_layers, list_of_innovations,
                          Node.SIGMOID_ACTIVATION_FUNCTION)
     self.list_of_all_layers.append(output_layer)
     self.output_layer = output_layer
예제 #20
0
 def __init__(self, n_input_neurons, n_hidden_layers_same_size,
              size_of_a_hidden_layer, n_output_neurons, learning_rate):
     if n_input_neurons > 0 and n_hidden_layers_same_size > 1 and size_of_a_hidden_layer > 0 and n_output_neurons > 0 and learning_rate >= 0.1 and learning_rate < 1:
         self.n_input_neurons = n_input_neurons
         self.first_hidden_layer = Layer(size_of_a_hidden_layer,
                                         n_input_neurons)
         self.other_hidden_layers_list = list()
         for x in range(n_hidden_layers_same_size - 1):
             self.other_hidden_layers_list.append(
                 Layer(size_of_a_hidden_layer, size_of_a_hidden_layer))
         self.output_layer = Layer(n_output_neurons, size_of_a_hidden_layer)
         self.learning_rate = learning_rate
     else:
         print("Invalid Construction of the neural network!")
예제 #21
0
    def add(self, num_neuron=None, input_shape=None, activation='linear'):

        if len(self.graph) == 0:
            if input_shape == None:
                raise Exception(
                    "Please specify input_shape for the input layer")
            else:
                layer = Layer(num_neuron, input_shape)
        else:
            layer = Layer(num_neuron, activation)
            weight = self.create_Weights()
            self.graph['weights'].append(weight)

        self.graph['layer'].append(layer)
예제 #22
0
	def __init__(self):
		# Init LCD 
		self.lcdString = [[' ' for col in range(Layer.WIDTH)] for row in range(Layer.HEIGHT)]
		# Init layer
		self.canvas  = Layer()
		self.runner  = Layer()
		self.barrier = Layer()
		# Init pixelSet
		self.pixelSet = [list(Layer.EMPTY) for i in range(8)]
		# Init LCD
		self.lcd = Adafruit_CharLCDPlate()
		self.lcd.begin(16, 2)
		self.lcd.backlight(Adafruit_CharLCDPlate.ON)
		# Init Game
		self.game = Game(self.lcd)
예제 #23
0
 def add(self, input, output, activation='tanh'):
     if len(self.layers) == 0:
         if input < 0 or output < 0 or activation not in (
                 'relu', 'tanh', 'sigmoid', 'leaky relu', 'softmax'):
             return
         else:
             self.layers.append(Layer(input, output, activation))
     else:
         if input != self.layers[
                 -1].output or output < 0 or activation not in (
                     'relu', 'tanh', 'sigmoid', 'leaky relu', 'softmax'):
             return
         else:
             print('ok')
             self.layers.append(Layer(input, output, activation))
예제 #24
0
def main():
    Log.i('Sphinx is starting....')
    # in put_layer = Layer(neurons=[Neuron(), Neuron(), Neuron(), Neuron()],
    # layer_type = LayerType.INPUT)

    # hidden_layer = Layer([Neuron(bias=-0.4), Neuron(bias=0.2)],
    # [Synapse([0.2, 0.4, -0.5, -0.3]), Synapse([-0.3, 0.1, 0.2, -0.3])],
    # LayerType.HIDDEN)

    # output_layer = Layer([Neuron(), Neuron(), Neuron()],
    # [Synapse([-0.3, -0.2]), Synapse([-0.2, 0.1]), Synapse([0.1, -0.4])],
    # LayerType.OUTPUT)

    input_layer = Layer(neuron_count=4, layer_type=LayerType.INPUT)
    hidden_layer = Layer(neuron_count=2, synapse_count=2, weight_per_synapse=4, layer_type=LayerType.HIDDEN)
    output_layer = Layer(neuron_count=3, synapse_count=3, weight_per_synapse=2, layer_type=LayerType.OUTPUT)

    layers = [input_layer, hidden_layer, output_layer]

    network = Network(layers, 0.3)

    # Create training from csv file
    patterns = []
    csv_reader = DictReader(f=open("iris.csv"))
    class_label = ["Iris-virginica", "Iris-versicolor", "Iris-setosa"]
    for i in csv_reader:
        values = []
    for j in range(0, 4):
        values.append(float(list(i.values())[j]))

    c = list(i.values())[4]
    if c == "Iris-virginica":
        c_label = [0, 0, 1]
    elif c == "Iris-versicolor":
        c_label = [0, 1, 0]
    elif c == "Iris-setosa":
        c_label = [1, 0, 0]
    else:
        c_label = []
    t = TrainingPattern(values, c_label)
    patterns.append(t)

    # Utils.save(network)
    # network.print_outputs()
    # network.train(patterns, 1000)
    network.load_weight('weights_iter=999_iter_mode.txt')
    i = network.predict([5.4, 3.4, 1.7, 0.2])
    print(class_label[i])
예제 #25
0
def test_triangle_horiz(n):
    norm_vect = nd.array([1, 0])
    sym = []

    sym.append((norm_vect, 0))
    for i in range(n):
        sym.append((-norm_vect, 2**(n - i - 1)))

    folding_net = NeuralNet.folding_net(sym, optimize=True)
    layers = []
    layers.append(
        Layer(2,
              2,
              weights=compute_rot(norm_vect),
              bias=nd.zeros(2),
              function=nd.identity))
    layers.append(
        Layer(1,
              2,
              weights=nd.array([[-1, 1]]),
              bias=nd.array([0]),
              function=echelon))
    compute_net = NeuralNet([2, 2, 1], layers)

    size = 2**(n + 12)
    inputs = nd.zeros((2, size))
    inputs[0] = nd.random.uniform(-2**(n), 2**(n), size)
    inputs[1] = nd.random.uniform(-0.1, 1.1, size)

    outputs = compute_net.compute(folding_net.compute(inputs))

    def triangle(x, y):
        x = nd.abs(x)
        x_floor = nd.floor(x)
        x = nd.where(nd.modulo(x_floor, 2), 1 - x + x_floor, x - x_floor)
        return y - x > 0

    true_outputs = triangle(inputs[0], inputs[1])

    errors = nd.sum(nd.abs(true_outputs - outputs))

    print("MODEL PROPERTY :")
    print("--------------------------------------")
    print("Number of layers :",
          folding_net.layersNumber + compute_net.layersNumber)
    print("Number of parameters :", folding_net.size() + compute_net.size())
    print("Errors :", errors, "/", size, "=", errors / size)
    print("--------------------------------------")
예제 #26
0
 def __init__(self,
              director,
              pausedScene,
              title='Warning!',
              message='Message',
              tooltip='Press here to continue'):
     Scene.__init__(self, director)
     self.bluredBackground = None
     self.b = 1
     self.pausedScene = pausedScene
     layer = Layer(director)
     backgroundImage = Resources.load_image("message_box.png")
     buttonImage = Resources.load_image("message_box_button.png")
     buttonWidth = 372
     style = MessageBox.createMessageBoxStyle(backgroundImage, buttonImage,
                                              buttonWidth)
     self.messageBox = MessageBox.MessageBox(
         self.director, (SCREEN_W / 2 - style['bg'].get_width() / 2,
                         SCREEN_H / 2 - style['bg'].get_height() / 2),
         style, True)
     self.messageBox.button.onMouseDown = lambda: self.popScene()
     self.messageBox.title = title
     self.messageBox.message = message
     self.messageBox.tooltip = tooltip
     layer.append(self.messageBox)
     self.layers.append(layer)
 def add_layer(self, num_neurons):
     """
     Add a hidden layer to the ANN
     :param num_neurons: Number of neurons in the layer
     """
     new_layer = Layer(num_neurons)
     self.hidden_layers.append(new_layer)
예제 #28
0
 def __init__(self, layers):
     '''
     initialize a FC neural network
     :param layers: A two-dimension array, which is to record the number of nodes in each layer
     len(layers) = number of layer in NN
     len(layers[i]) = the number of nodes in ith layer 
     '''
     self.connections = Connections()
     self.layers = []
     layer_count = len(layers)  # number of layer
     node_count = 0
     for i in range(layer_count):
         self.layers.append(Layer(
             i, layers[i]))  # Layer[i] is the number of nodes in ith layer
     for layer in range(layer_count -
                        1):  # one connection between two layers
         connections = [
             Connection(upstream_node, downstream_node) for upstream_node in
             self.layers[layer].nodes  # upstream_node at this iteration
             for downstream_node in self.layers[layer + 1].nodes[:-1]
         ]  # downstream_node at this iteration
         for conn in connections:
             self.connections.add_connection(conn)
             conn.downstream_node.append_upstream_connection(conn)
             conn.upstream_node.append_downstream_connection(conn)
예제 #29
0
	def run(self):
		while True:
			self.game.tick()
			if self.game.state == Game.STATE_RUNNING:
				self.drawBarriers()
				self.drawRunner()
				self.mergeLayers()
				self.updateLcdString()

			self.game.gameOver(self.barrier.bitmap[1][1], self.runner.bitmap[1][1])
			self.draw()
			
			self.canvas  = Layer()
			self.runner  = Layer()
			self.barrier = Layer()
			sleep(.03)
예제 #30
0
def main():

    np.random.seed(10)

    X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
    Y = np.array([[0], [1], [1], [0]])

    network = Network()
    network.add(Layer(16, Activations.relu), X[1])
    network.add(Layer(1, Activations.sigmoid))

    network.fit(X, Y, 0.01, verbose=True)

    print(f"\nf(0, 1) = {network.forward()[0][0]}")
    print(f"Error: {network.calcError(X, Y)}\nLoss: {network.calcLoss(X, Y)}",
          end="")