def run_k_fold_cv(model_path, validation_config, train_config, cnn_config,
                  df_dataset, target, data_config):
    sms.set_memory_size()
    # maximum number of additional stacked convolutional-pooling layers
    max_n_extra_layers = 5
    # batch size for generators
    batch_size = train_config.batch_size

    if train_config.outlier_detect:
        df_dataset = remove_outliers(df_dataset, target)

    if data_config.animal_weight:
        # animals weights converted from kilograms to tons
        normalized_animal_weights = df_dataset['Weight'].to_numpy() / 1000

    # cast dataset to numpy object in order to use k-fold cross-validation functionality from sklearn
    samples = df_dataset.to_numpy()

    # k-fold cross-validation with k=5
    kf = KFold(n_splits=validation_config.nr_splits,
               shuffle=validation_config.shuffle)
    mean_tr_mse_vec = []
    mean_vl_mse_vec = []
    cnn_counter = 0

    if train_config.data_augmentation:
        # rescale image elements values to 0-1 range
        train_data_gen = ImageDataGenerator(rescale=1. / 255,
                                            rotation_range=40,
                                            width_shift_range=0.2,
                                            height_shift_range=0.2,
                                            shear_range=0.2,
                                            zoom_range=0.2,
                                            horizontal_flip=True)
    else:
        train_data_gen = ImageDataGenerator(rescale=1. / 255)

    val_data_gen = ImageDataGenerator(rescale=1. / 255)

    # loop over cnn architectures
    for i in range(max_n_extra_layers):
        k_fold_counter = 1
        tr_mse_vec = []
        vl_mse_vec = []
        # create current cnn folder
        cnn_path = os.path.join(model_path, 'cnn_' + str(cnn_counter))
        os.makedirs(cnn_path)

        # loop over each fold
        for train, validation in kf.split(samples):
            # print(train)
            # print(validation)
            print('--------------------------------------------')
            print(
                f'CNN ID: {str(cnn_counter)} -- K-Fold: {str(k_fold_counter)}')
            print('--------------------------------------------')
            # create k-fold cross-validation folder
            k_fold_path = os.path.join(cnn_path, 'K_' + str(k_fold_counter))
            os.makedirs(k_fold_path)

            # split dataset into training and validation datasets
            df_train = df_dataset.loc[train]
            df_validation = df_dataset.loc[validation]
            # training image generator
            train_gen = train_data_gen.flow_from_dataframe(
                dataframe=df_train,
                x_col='IMAGE',
                y_col=target,
                target_size=(180, 240),
                class_mode='raw',
                batch_size=batch_size)

            # validation image generator
            val_gen = val_data_gen.flow_from_dataframe(dataframe=df_validation,
                                                       x_col='IMAGE',
                                                       y_col=target,
                                                       target_size=(180, 240),
                                                       class_mode='raw',
                                                       batch_size=batch_size)

            if cnn_config.conv_base is None:
                # create the base cnn model
                conv_net = ConvNet()
                # add additional layers
                conv_net.add_conv_layers(i)
                conv_net.add_dense_layers(
                    drop_out=cnn_config.drop_out,
                    nr_hidden_neurons=cnn_config.nr_hidden_neurons,
                    regularizers=cnn_config.regularizers)
                conv_net.configure(cnn_config.learning_rate)
                conv_net.train(
                    train_gen, val_gen,
                    train_config.steps_per_epoch(len(train)),
                    train_config.nr_epochs,
                    validation_config.validation_steps(len(validation)))
            else:
                conv_net = ConvNet(cnn_config.conv_base,
                                   train_config.data_augmentation,
                                   train_config.fine_tuning)

                if not train_config.data_augmentation:
                    train_features, train_labels = conv_net.extract_features(
                        train_gen, batch_size, len(train))
                    validation_features, validation_labels = conv_net.extract_features(
                        val_gen, batch_size, len(validation))
                    train_features = np.reshape(train_features,
                                                (len(train), 5 * 7 * 512))
                    validation_features = np.reshape(
                        validation_features, (len(validation), 5 * 7 * 512))
                    # min_feature, max_feature = train_features.min(), train_features.max()
                    # min_val_feature, max_val_feature = validation_features.min(), validation_features.max()
                    # scaled_train_input, scaled_validation = \
                    #     scale_input_for_dense_layer(train_features, validation_features)
                    # min_vra1, max_vra1 = scaled_train_input.min(), scaled_train_input.max()
                    # sorted_train = np.sort(train_features)
                    # sorted_val = np.sort(validation_features)
                    # min_vra2, max_vra2 = scaled_validation.min(), scaled_validation.max()
                    if data_config.animal_weight:
                        train_animal_weights = np.reshape(
                            normalized_animal_weights[train], (len(train), 1))
                        val_animal_weights = np.reshape(
                            normalized_animal_weights[validation],
                            (len(validation), 1))
                        train_features = np.concatenate(
                            [train_features, train_animal_weights], axis=1)
                        validation_features = np.concatenate(
                            [validation_features, val_animal_weights], axis=1)

                    conv_net.add_dense_layers(
                        drop_out=cnn_config.drop_out,
                        nr_hidden_neurons=cnn_config.nr_hidden_neurons,
                        regularizer=cnn_config.regularizers)
                    conv_net.configure(cnn_config.learning_rate)
                    conv_net.train(train_features, train_labels,
                                   validation_features, validation_labels,
                                   train_config.nr_epochs, batch_size)
                else:
                    if not train_config.fine_tuning:
                        conv_net.add_dense_layers(
                            drop_out=cnn_config.drop_out,
                            nr_hidden_neurons=cnn_config.nr_hidden_neurons,
                            regularizer=cnn_config.regularizers)

                    conv_net.configure(cnn_config.learning_rate)
                    conv_net.train(
                        train_gen, val_gen,
                        train_config.steps_per_epoch(len(train)),
                        train_config.nr_epochs,
                        validation_config.validation_steps(len(validation)),
                        batch_size)

            if data_config.animal_weight:
                predictions = predict(val_gen, conv_net, len(validation),
                                      batch_size, val_animal_weights,
                                      train_config.data_augmentation)

            else:
                predictions = predict(val_gen, conv_net, len(validation),
                                      batch_size, None,
                                      train_config.data_augmentation)

            df_outputs = pd.DataFrame.from_records(
                np.array(predictions).T, columns=['PREDICTION', 'TARGET'])
            df_outputs.to_csv(k_fold_path + '/cnn_outputs_vs_val_targets.csv')
            train_loss = conv_net.history.history['mean_squared_error']
            val_loss = conv_net.history.history['val_mean_squared_error']
            # save training loss
            save_loss_vec(k_fold_path, '/training_loss_vec.csv', train_loss)
            # save validation loss
            save_loss_vec(k_fold_path, '/validation_loss_vec.csv', val_loss)
            # save model summary
            save_model_summary(cnn_path, conv_net.model)
            # save architecture
            save_model(cnn_path, conv_net.model)
            plot_mse(train_loss, val_loss, k_fold_path)

            min_val_mse_id = np.argmin(val_loss)
            tr_mse_vec.append(train_loss[min_val_mse_id])
            vl_mse_vec.append(val_loss[min_val_mse_id])
            k_fold_counter += 1

        mean_tr_mse = np.array(tr_mse_vec).mean()
        mean_vl_mse = np.array(vl_mse_vec).mean()
        mean_tr_mse_vec.append(mean_tr_mse)
        mean_vl_mse_vec.append(mean_vl_mse)
        cnn_counter += 1

    save_loss_vec(model_path, '/mean_train_mse_vec.csv', mean_tr_mse_vec)
    save_loss_vec(model_path, '/mean_val_mse_vec.csv', mean_vl_mse_vec)