def calc_objective_value(weight, input_shape, x_train, y_train, x_test, y_test,
                         num_classes, epochs):
    """ train and evaluate the network given specified weights
    :param:  - weight: weighting for the two objectives
             - input_shape: training data input shapes, e.g. amount of pixel
             - x_train: training data input (pictures)
             - y_train: training data groundtruth (classification number)
             - x_test: test data input (pictures)
             - y_test: test data groundtruth (classification number)
             - num_classes: amount of classes (e.g. 10 digits (0-9))
             - epochs: how long the training will last
    :return: - test_loss1: loss value on testing data (after training) from objective loss1 (e.g. Crossentropy)
             - test_loss2: loss value on testing data (after training) from objective loss2 (e.g. L1 loss)
             - L0ges: amount of nonzero weights in the dense layers of the model
             - L1ges: L1 values of all weights in the dense layers of the model
             - test_accuracy: accuracy value on the testing data
    """
    ### define and compile the model used for the training
    weight_decay = 1e-4
    model = lenet5multimodel(input_shape=input_shape,
                             weight_decay=weight_decay)
    model.mcompile(optimizer=MAdam(multi=False,
                                   learning_rate=learning_rate,
                                   descent_weight1=weight[0],
                                   descent_weight2=weight[1]),
                   loss1='sparse_categorical_crossentropy',
                   loss2=L1lossDense(model),
                   metrics=['accuracy'])

    nonzero_weights1 = []
    nonzero_weights2 = []
    nonzero_weights3 = []

    ### perform a pruning step before the training starts
    weights1 = model.get_layer('denselayer1').get_weights()
    weights2 = model.get_layer('denselayer2').get_weights()
    weights3 = model.get_layer(
        'denselayer3').get_weights()  # weights and biases of last

    sparsified_weights1 = update_weights(weights1, 0.001)
    sparsified_weights2 = update_weights(weights2, 0.001)
    sparsified_weights3 = update_weights(weights3, 0.001)

    model.get_layer('denselayer1').set_weights(sparsified_weights1)
    model.get_layer('denselayer2').set_weights(sparsified_weights2)
    model.get_layer('denselayer3').set_weights(sparsified_weights3)

    nonzero_weights1.append(
        [np.count_nonzero(model.get_layer('denselayer1').get_weights()[0])])
    nonzero_weights2.append(
        [np.count_nonzero(model.get_layer('denselayer2').get_weights()[0])])
    nonzero_weights3.append(
        [np.count_nonzero(model.get_layer('denselayer3').get_weights()[0])])

    ### define a callback function that performs pruning after each iteration (after each batch)
    weight_callback_batch = LambdaCallback(on_batch_end=lambda batch, logs: [
        model.get_layer(f"{name}").set_weights(
            update_weights(model.get_layer(f"{name}").get_weights(), 0.001))
        for name in ['denselayer1', 'denselayer2', 'denselayer3']
    ])

    safe_nonzeroweights1 = LambdaCallback(on_epoch_end=lambda epoch, logs: [
        nonzero_weights1.append([
            np.count_nonzero(model.get_layer('denselayer1').get_weights()[0])
        ])
    ])
    safe_nonzeroweights2 = LambdaCallback(on_epoch_end=lambda epoch, logs: [
        nonzero_weights2.append([
            np.count_nonzero(model.get_layer('denselayer2').get_weights()[0])
        ])
    ])
    safe_nonzeroweights3 = LambdaCallback(on_epoch_end=lambda epoch, logs: [
        nonzero_weights3.append([
            np.count_nonzero(model.get_layer('denselayer3').get_weights()[0])
        ])
    ])

    ### start the training process
    history = model.mfit(x_train,
                         y_train,
                         epochs=epochs,
                         validation_data=[x_test, y_test],
                         callbacks=[
                             weight_callback_batch, safe_nonzeroweights1,
                             safe_nonzeroweights2, safe_nonzeroweights3
                         ])

    ### calculate some information about the weights in the trained model
    weights1 = model.get_layer("denselayer1").get_weights()
    L1w1 = sum(sum(sum(np.abs(weights1))))
    L0w1 = np.count_nonzero(weights1[0]) + np.count_nonzero(weights1[1])

    weights2 = model.get_layer("denselayer2").get_weights()
    L1w2 = sum(sum(sum(np.abs(weights2))))
    L0w2 = np.count_nonzero(weights2[0]) + np.count_nonzero(weights2[1])

    weights3 = model.get_layer("denselayer3").get_weights()
    L1w3 = sum(sum(sum(np.abs(weights3))))
    L0w3 = np.count_nonzero(weights3[0]) + np.count_nonzero(weights3[1])

    L0ges = L0w1 + L0w2 + L0w3
    L1ges = L1w1 + L1w2 + L1w3
    ### evaluate the performance of the trained model on the test data
    [test_loss1, test_loss2,
     test_accuracy] = model.evaluate_multi(x_test, y_test)

    return [
        test_loss1, test_loss2, L0ges, L1ges, test_accuracy, nonzero_weights1,
        nonzero_weights2, nonzero_weights3, history
    ]
        seed(1)
        set_random_seed(2)
        # collect values while training (in evaluation mode)
        loss1_values = []
        loss2_values = []
        loss_values = []
        val_loss_values = []
        accuracy_values = []
        val_accuracy_values = []
        nonzero_weights1 = []
        nonzero_weights2 = []
        nonzero_weights3 = []

        if opt == 'multiadam':
            lambdaconv = 1e-4
            model = lenet5multimodel(input_shape=input_shape,
                                     weight_decay=lambdaconv)
            model.mcompile(optimizer=MAdam(multi=True,
                                           split=False,
                                           learning_rate=learning_rate),
                           loss1='sparse_categorical_crossentropy',
                           loss2=L1lossDense(model),
                           metrics=['accuracy'])
            nonzero_weights1.append([
                np.count_nonzero(
                    model.get_layer('denselayer1').get_weights()[0])
            ])
            nonzero_weights2.append([
                np.count_nonzero(
                    model.get_layer('denselayer2').get_weights()[0])
            ])
            nonzero_weights3.append([