def perform_experiments(n_runs=10,
                        n_points=1000,
                        n_epochs=200,
                        run_best=False,
                        verbose=False):
    """
    Perform experiments for 5 different neural network architectures and losses.
    
    To run all experiments call this function with default params
    
    :param n_runs: number of runs for which experiment should be repeated
    :param n_points: number of training and testing data points used in the experiments
    :param n_epochs: number of epochs every architecture should be trained on
    :param run_best: If True only the best architecture (Siamese Network with auxiliary loss) is trained
    :param verbose: If True, print training and validation loss every epoch
    :returns: dictionary containing history of training (training, validation loss and accuracy)
    """
    history_mlp_net = []
    history_conv_net = []
    history_conv_net_aux = []
    history_siamese = []
    history_siamese_aux = []

    for n_run in range(n_runs):
        data_set = generate_pair_sets(n_points)
        MAX_VAL = 255.0

        TRAIN_INPUT = Variable(data_set[0]) / MAX_VAL
        TRAIN_TARGET = Variable(data_set[1])
        TRAIN_CLASSES = Variable(data_set[2])

        TEST_INPUT = Variable(data_set[3]) / MAX_VAL
        TEST_TARGET = Variable(data_set[4])
        TEST_CLASSES = Variable(data_set[5])

        if not run_best:
            ##############################################################################
            # Creates Multilayer Perceptron Network with ReLU activationss
            mlp_net = MLPNet(in_features=392,
                             out_features=2,
                             n_layers=3,
                             n_hidden=16)

            # Set train flag on (for dropouts)
            mlp_net.train()

            # Train the model and append the history
            history_mlp_net.append(
                train_model(mlp_net,
                            train_input=TRAIN_INPUT.view((n_points, -1)),
                            train_target=TRAIN_TARGET,
                            val_input=TEST_INPUT.view((n_points, -1)),
                            val_target=TEST_TARGET,
                            n_epochs=n_epochs,
                            verbose=verbose))

            # Set train flag to False for getting accuracies on validation data
            mlp_net.eval()
            acc = get_accuracy(mlp_net, TEST_INPUT.view(
                (n_points, -1)), TEST_TARGET) * 100.0
            print("Run: {}, Mlp_net Test Accuracy: {:.3f} %".format(
                n_run, acc))

            ##############################################################################
            # Create ConvNet without auxiliary outputs
            conv_net = ConvNet(n_classes=2, n_layers=3, n_features=16)

            # Set train flag on (for dropouts)
            conv_net.train()

            # Train the model and append the history
            history_conv_net.append(
                train_model(conv_net,
                            train_input=TRAIN_INPUT,
                            train_target=TRAIN_TARGET,
                            val_input=TEST_INPUT,
                            val_target=TEST_TARGET,
                            n_epochs=n_epochs,
                            verbose=verbose))

            # Set train flag to False for getting accuracies on validation data
            conv_net.eval()
            acc = get_accuracy(conv_net, TEST_INPUT, TEST_TARGET) * 100.0
            print("Run: {}, ConvNet Test Accuracy: {:.3f} %".format(
                n_run, acc))

            ##############################################################################
            # Create ConvNet with auxiliary outputs
            conv_net_aux = ConvNet(n_classes=22, n_layers=3, n_features=16)

            # Set train flag on (for dropouts)
            conv_net_aux.train()

            # Train the model and append the history
            history_conv_net_aux.append(
                train_model(conv_net_aux,
                            train_input=TRAIN_INPUT,
                            train_target=TRAIN_TARGET,
                            aux_param=1.0,
                            train_classes=TRAIN_CLASSES,
                            val_input=TEST_INPUT,
                            val_target=TEST_TARGET,
                            val_classes=TEST_CLASSES,
                            n_epochs=n_epochs,
                            verbose=verbose))

            # Set train flag to False for getting accuracies on validation data
            conv_net_aux.eval()
            acc = get_accuracy(conv_net_aux, TEST_INPUT, TEST_TARGET) * 100.0
            print("Run: {}, ConvNet Auxilary Test Accuracy: {:.3f} %".format(
                n_run, acc))

            ##############################################################################
            # Create Siamese Network without auxiliary outputs
            conv_net = BlockConvNet()
            conv_net_siamese = DeepSiameseNet(conv_net)

            # Set train flag on (for dropouts)
            conv_net.train()
            conv_net_siamese.train()

            # Train the model and append the history
            history_siamese.append(
                train_model(conv_net_siamese,
                            train_input=TRAIN_INPUT,
                            train_target=TRAIN_TARGET,
                            val_input=TEST_INPUT,
                            val_target=TEST_TARGET,
                            n_epochs=n_epochs,
                            verbose=verbose))

            # Set train flag to False for getting accuracies on validation data
            conv_net.eval()
            conv_net_siamese.eval()

            acc = get_accuracy(conv_net_siamese, TEST_INPUT,
                               TEST_TARGET) * 100.0
            print("Run: {}, Siamese Test Accuracy: {:.3f} %".format(
                n_run, acc))

        ##############################################################################
        # Create Siamese Network with auxiliary outputs
        conv_net = BlockConvNet()
        conv_net_siamese_aux = DeepSiameseNet(conv_net)

        # Set train flag on (for dropouts)
        conv_net.train()
        conv_net_siamese_aux.train()

        # Train the model and append the history
        history_siamese_aux.append(
            train_model(conv_net_siamese_aux,
                        train_input=TRAIN_INPUT,
                        train_target=TRAIN_TARGET,
                        train_classes=TRAIN_CLASSES,
                        val_input=TEST_INPUT,
                        val_target=TEST_TARGET,
                        val_classes=TEST_CLASSES,
                        aux_param=3.0,
                        n_epochs=n_epochs,
                        verbose=verbose))

        # Set train flag to False for getting accuracies on validation data
        conv_net.eval()
        conv_net_siamese_aux.eval()

        acc = get_accuracy(conv_net_siamese_aux, TEST_INPUT,
                           TEST_TARGET) * 100.0
        print("Run: {}, Siamese Auxilary Test Accuracy: {:.3f} %".format(
            n_run, acc))
        ##############################################################################

        return {
            'history_mlp_net': history_mlp_net,
            'history_conv_net': history_conv_net,
            'history_conv_net_aux': history_conv_net_aux,
            'history_siamese': history_siamese,
            'history_siamese_aux': history_siamese_aux
        }
                                   frame_step,
                                   num_mel_bins=40,
                                   lower_frequency=20,
                                   upper_frequency=4000,
                                   num_coefficients=10,
                                   mfcc=True)
strides = [2, 1]

train_ds = signal_generator.make_dataset(train_data, True)
test_ds = signal_generator.make_dataset(test_data, False)
val_ds = signal_generator.make_dataset(val_data, False)

#Train the MultiLayer Perceptron
mlp = MLP()
mlp.train(train_ds, val_ds, 20)
filename = 'Model_mlp'

mlp._model().save(filename)

#Train the Convolutional NN
cnn = ConvNet(strides)
cnn.train(train_ds, val_ds, 20)
filename = 'Model_cnn'
cnn._model().save(filename)

#Train the DS Convolutional NN
dscnn = DS_CNN(strides)
dscnn.train(train_ds, val_ds, 20)
filename = 'Model_dscnn'
dscnn._model().save(filename)