Пример #1
0
def run_opt_sim(args, inputfile, usernamespace):
    """Run a simulation using Taguchi's optmisation process.

    Args:
        args (dict): Namespace with command line arguments
        inputfile (object): File object for the input file.
        usernamespace (dict): Namespace that can be accessed by user
                in any Python code blocks in input file.
    """

    tsimstart = perf_counter()

    if args.n > 1:
        raise CmdInputError(
            'When a Taguchi optimisation is being carried out the number of model runs argument is not required'
        )

    inputfileparts = os.path.splitext(inputfile.name)

    # Default maximum number of iterations of optimisation to perform (used
    # if the stopping criterion is not achieved)
    maxiterations = 20

    # Process Taguchi code blocks in the input file; pass in ordered
    # dictionary to hold parameters to optimise
    tmp = usernamespace.copy()
    tmp.update({'optparams': OrderedDict()})
    taguchinamespace = taguchi_code_blocks(inputfile, tmp)

    # Extract dictionaries and variables containing initialisation parameters
    optparams = taguchinamespace['optparams']
    fitness = taguchinamespace['fitness']
    if 'maxiterations' in taguchinamespace:
        maxiterations = taguchinamespace['maxiterations']

    # Store initial parameter ranges
    optparamsinit = list(optparams.items())

    # Dictionary to hold history of optmised values of parameters
    optparamshist = OrderedDict((key, list()) for key in optparams)

    # Import specified fitness function
    fitness_metric = getattr(
        import_module('user_libs.optimisation_taguchi.fitness_functions'),
        fitness['name'])

    # Select OA
    OA, N, cols, k, s, t = construct_OA(optparams)

    taguchistr = '\n--- Taguchi optimisation'
    print('{} {}\n'.format(taguchistr,
                           '-' * (get_terminal_width() - 1 - len(taguchistr))))
    print(
        'Orthogonal array: {:g} experiments per iteration, {:g} parameters ({:g} will be used), {:g} levels, and strength {:g}'
        .format(N, cols, k, s, t))
    tmp = [(k, v) for k, v in optparams.items()]
    print('Parameters to optimise with ranges: {}'.format(
        str(tmp).strip('[]')))
    print('Output name(s) from model: {}'.format(fitness['args']['outputs']))
    print('Fitness function "{}" with stopping criterion {:g}'.format(
        fitness['name'], fitness['stop']))
    print('Maximum iterations: {:g}'.format(maxiterations))

    # Initialise arrays and lists to store parameters required throughout optimisation
    # Lower, central, and upper values for each parameter
    levels = np.zeros((s, k), dtype=floattype)
    # Optimal lower, central, or upper value for each parameter
    levelsopt = np.zeros(k, dtype=np.uint8)
    # Difference used to set values for levels
    levelsdiff = np.zeros(k, dtype=floattype)
    # History of fitness values from each confirmation experiment
    fitnessvalueshist = []

    iteration = 0
    while iteration < maxiterations:
        # Reset number of model runs to number of experiments
        args.n = N
        usernamespace['number_model_runs'] = N

        # Fitness values for each experiment
        fitnessvalues = []

        # Set parameter ranges and define experiments
        optparams, levels, levelsdiff = calculate_ranges_experiments(
            optparams, optparamsinit, levels, levelsopt, levelsdiff, OA, N, k,
            s, iteration)

        # Run model for each experiment
        # Mixed mode MPI with OpenMP or CUDA - MPI task farm for models with
        # each model parallelised with OpenMP (CPU) or CUDA (GPU)
        if args.mpi:
            run_mpi_sim(args, inputfile, usernamespace, optparams)
        # Standard behaviour - models run serially with each model parallelised
        # with OpenMP (CPU) or CUDA (GPU)
        else:
            run_std_sim(args, inputfile, usernamespace, optparams)

        # Calculate fitness value for each experiment
        for experiment in range(1, N + 1):
            outputfile = inputfileparts[0] + str(experiment) + '.out'
            fitnessvalues.append(fitness_metric(outputfile, fitness['args']))
            os.remove(outputfile)

        taguchistr = '\n--- Taguchi optimisation, iteration {}: {} initial experiments with fitness values {}.'.format(
            iteration + 1, N, fitnessvalues)
        print('{} {}\n'.format(
            taguchistr, '-' * (get_terminal_width() - 1 - len(taguchistr))))

        # Calculate optimal levels from fitness values by building a response
        # table; update dictionary of parameters with optimal values
        optparams, levelsopt = calculate_optimal_levels(
            optparams, levels, levelsopt, fitnessvalues, OA, N, k)

        # Update dictionary with history of parameters with optimal values
        for key, value in optparams.items():
            optparamshist[key].append(value[0])

        # Run a confirmation experiment with optimal values
        args.n = 1
        usernamespace['number_model_runs'] = 1
        # Mixed mode MPI with OpenMP or CUDA - MPI task farm for models with
        # each model parallelised with OpenMP (CPU) or CUDA (GPU)
        if args.mpi:
            run_mpi_sim(args, inputfile, usernamespace, optparams)
        # Standard behaviour - models run serially with each model parallelised
        # with OpenMP (CPU) or CUDA (GPU)
        else:
            run_std_sim(args, inputfile, usernamespace, optparams)

        # Calculate fitness value for confirmation experiment
        outputfile = inputfileparts[0] + '.out'
        fitnessvalueshist.append(fitness_metric(outputfile, fitness['args']))

        # Rename confirmation experiment output file so that it is retained for each iteraction
        os.rename(
            outputfile,
            os.path.splitext(outputfile)[0] + '_final' + str(iteration + 1) +
            '.out')

        taguchistr = '\n--- Taguchi optimisation, iteration {} completed. History of optimal parameter values {} and of fitness values {}'.format(
            iteration + 1, dict(optparamshist), fitnessvalueshist)
        print('{} {}\n'.format(
            taguchistr, '-' * (get_terminal_width() - 1 - len(taguchistr))))
        iteration += 1

        # Stop optimisation if stopping criterion has been reached
        if fitnessvalueshist[iteration - 1] > fitness['stop']:
            taguchistr = '\n--- Taguchi optimisation stopped as fitness criteria reached: {:g} > {:g}'.format(
                fitnessvalueshist[iteration - 1], fitness['stop'])
            print('{} {}\n'.format(
                taguchistr,
                '-' * (get_terminal_width() - 1 - len(taguchistr))))
            break

        # Stop optimisation if successive fitness values are within a percentage threshold
        fitnessvaluesthres = 0.1
        if iteration > 2:
            fitnessvaluesclose = (np.abs(fitnessvalueshist[iteration - 2] -
                                         fitnessvalueshist[iteration - 1]) /
                                  fitnessvalueshist[iteration - 1]) * 100
            if fitnessvaluesclose < fitnessvaluesthres:
                taguchistr = '\n--- Taguchi optimisation stopped as successive fitness values within {}%'.format(
                    fitnessvaluesthres)
                print('{} {}\n'.format(
                    taguchistr,
                    '-' * (get_terminal_width() - 1 - len(taguchistr))))
                break

    tsimend = perf_counter()

    # Save optimisation parameters history and fitness values history to file
    opthistfile = inputfileparts[0] + '_hist.pickle'
    with open(opthistfile, 'wb') as f:
        pickle.dump(optparamshist, f)
        pickle.dump(fitnessvalueshist, f)
        pickle.dump(optparamsinit, f)

    taguchistr = '\n=== Taguchi optimisation completed in [HH:MM:SS]: {} after {} iteration(s)'.format(
        datetime.timedelta(seconds=int(tsimend - tsimstart)), iteration)
    print('{} {}\n'.format(taguchistr,
                           '=' * (get_terminal_width() - 1 - len(taguchistr))))
    print('History of optimal parameter values {} and of fitness values {}\n'.
          format(dict(optparamshist), fitnessvalueshist))
Пример #2
0
def run_opt_sim(args, numbermodelruns, inputfile, usernamespace):
    """Run a simulation using Taguchi's optmisation process.
        
    Args:
        args (dict): Namespace with command line arguments
        numbermodelruns (int): Total number of model runs.
        inputfile (str): Name of the input file to open.
        usernamespace (dict): Namespace that can be accessed by user in any Python code blocks in input file.
    """

    if numbermodelruns > 1:
        raise CmdInputError('When a Taguchi optimisation is being carried out the number of model runs argument is not required')
    
    inputfileparts = os.path.splitext(inputfile)

    # Default maximum number of iterations of optimisation to perform (used if the stopping criterion is not achieved)
    maxiterations = 20
    
    # Process Taguchi code blocks in the input file; pass in ordered dictionary to hold parameters to optimise
    tmp = usernamespace.copy()
    tmp.update({'optparams': OrderedDict()})
    taguchinamespace = taguchi_code_blocks(inputfile, tmp)
    
    # Extract dictionaries and variables containing initialisation parameters
    optparams = taguchinamespace['optparams']
    fitness = taguchinamespace['fitness']
    if 'maxiterations' in taguchinamespace:
        maxiterations = taguchinamespace['maxiterations']

    # Store initial parameter ranges
    optparamsinit = list(optparams.items())

    # Dictionary to hold history of optmised values of parameters
    optparamshist = OrderedDict((key, list()) for key in optparams)
    
    # Import specified fitness function
    fitness_metric = getattr(importlib.import_module('user_libs.optimisation_taguchi_fitness'), fitness['name'])

    # Select OA
    OA, N, cols, k, s, t = construct_OA(optparams)
    print('\n{}\n\nTaguchi optimisation: orthogonal array with {} experiments, {} parameters ({} used), {} levels, and strength {} will be used.'.format(68*'*', N, cols, k, s, t))
    
    # Initialise arrays and lists to store parameters required throughout optimisation
    # Lower, central, and upper values for each parameter
    levels = np.zeros((s, k), dtype=floattype)
    # Optimal lower, central, or upper value for each parameter
    levelsopt = np.zeros(k, dtype=floattype)
    # Difference used to set values for levels
    levelsdiff = np.zeros(k, dtype=floattype)
    # History of fitness values from each confirmation experiment
    fitnessvalueshist = []

    iteration = 0
    while iteration < maxiterations:
        # Reset number of model runs to number of experiments
        numbermodelruns = N
        usernamespace['number_model_runs'] = numbermodelruns
        
        # Fitness values for each experiment
        fitnessvalues = []

        # Set parameter ranges and define experiments
        optparams, levels, levelsdiff = calculate_ranges_experiments(optparams, optparamsinit, levels, levelsopt, levelsdiff, OA, N, k, s, iteration)

        # Run model for each experiment
        if args.mpi: # Mixed mode MPI/OpenMP - MPI task farm for models with each model parallelised with OpenMP
            run_mpi_sim(args, numbermodelruns, inputfile, usernamespace, optparams)
        else: # Standard behaviour - models run serially with each model parallelised with OpenMP
            run_std_sim(args, numbermodelruns, inputfile, usernamespace, optparams)

        # Calculate fitness value for each experiment
        for experiment in range(1, numbermodelruns + 1):
            outputfile = inputfileparts[0] + str(experiment) + '.out'
            fitnessvalues.append(fitness_metric(outputfile, fitness['args']))
            os.remove(outputfile)

        print('\nTaguchi optimisation, iteration {}: {} initial experiments with fitness values {}.'.format(iteration + 1, numbermodelruns, fitnessvalues))
        
        # Calculate optimal levels from fitness values by building a response table; update dictionary of parameters with optimal values
        optparams, levelsopt = calculate_optimal_levels(optparams, levels, levelsopt, fitnessvalues, OA, N, k)

        # Run a confirmation experiment with optimal values
        numbermodelruns = 1
        usernamespace['number_model_runs'] = numbermodelruns
        run_std_sim(args, numbermodelruns, inputfile, usernamespace, optparams)

        # Calculate fitness value for confirmation experiment
        outputfile = inputfileparts[0] + '.out'
        fitnessvalueshist.append(fitness_metric(outputfile, fitness['args']))

        # Rename confirmation experiment output file so that it is retained for each iteraction
        os.rename(outputfile, os.path.splitext(outputfile)[0] + '_final' + str(iteration + 1) + '.out')
        
        print('\nTaguchi optimisation, iteration {} completed. History of optimal parameter values {} and of fitness values {}'.format(iteration + 1, dict(optparamshist), fitnessvalueshist, 68*'*'))
        iteration += 1

        # Stop optimisation if stopping criterion has been reached
        if fitnessvalueshist[iteration - 1] > fitness['stop']:
            print('\nTaguchi optimisation stopped as fitness criteria reached')
            break

        # Stop optimisation if successive fitness values are within a percentage threshold
        if iteration > 2:
            fitnessvaluesclose = (np.abs(fitnessvalueshist[iteration - 2] - fitnessvalueshist[iteration - 1]) / fitnessvalueshist[iteration - 1]) * 100
            fitnessvaluesthres = 0.1
            if fitnessvaluesclose < fitnessvaluesthres:
                print('\nTaguchi optimisation stopped as successive fitness values within {}%'.format(fitnessvaluesthres))
                break

    # Save optimisation parameters history and fitness values history to file
    opthistfile = inputfileparts[0] + '_hist'
    np.savez(opthistfile, dict(optparamshist), fitnessvalueshist)

    print('\n{}\nTaguchi optimisation completed after {} iteration(s).\nHistory of optimal parameter values {} and of fitness values {}\n{}\n'.format(68*'*', iteration, dict(optparamshist), fitnessvalueshist, 68*'*'))

    # Plot the history of fitness values and each optimised parameter values for the optimisation
    plot_optimisation_history(fitnessvalueshist, optparamshist, optparamsinit)
Пример #3
0
def run_opt_sim(args, numbermodelruns, inputfile, usernamespace):
    """Run a simulation using Taguchi's optmisation process.

    Args:
        args (dict): Namespace with command line arguments
        numbermodelruns (int): Total number of model runs.
        inputfile (str): Name of the input file to open.
        usernamespace (dict): Namespace that can be accessed by user in any Python code blocks in input file.
    """

    tsimstart = perf_counter()

    if numbermodelruns > 1:
        raise CmdInputError('When a Taguchi optimisation is being carried out the number of model runs argument is not required')

    inputfileparts = os.path.splitext(inputfile)

    # Default maximum number of iterations of optimisation to perform (used if the stopping criterion is not achieved)
    maxiterations = 20

    # Process Taguchi code blocks in the input file; pass in ordered dictionary to hold parameters to optimise
    tmp = usernamespace.copy()
    tmp.update({'optparams': OrderedDict()})
    taguchinamespace = taguchi_code_blocks(inputfile, tmp)

    # Extract dictionaries and variables containing initialisation parameters
    optparams = taguchinamespace['optparams']
    fitness = taguchinamespace['fitness']
    if 'maxiterations' in taguchinamespace:
        maxiterations = taguchinamespace['maxiterations']

    # Store initial parameter ranges
    optparamsinit = list(optparams.items())

    # Dictionary to hold history of optmised values of parameters
    optparamshist = OrderedDict((key, list()) for key in optparams)

    # Import specified fitness function
    fitness_metric = getattr(import_module('user_libs.optimisation_taguchi.fitness_functions'), fitness['name'])

    # Select OA
    OA, N, cols, k, s, t = construct_OA(optparams)

    taguchistr = '\n--- Taguchi optimisation'
    print('{} {}\n'.format(taguchistr, '-' * (get_terminal_width() - 1 - len(taguchistr))))
    print('Orthogonal array: {:g} experiments per iteration, {:g} parameters ({:g} will be used), {:g} levels, and strength {:g}'.format(N, cols, k, s, t))
    tmp = [(k, v) for k, v in optparams.items()]
    print('Parameters to optimise with ranges: {}'.format(str(tmp).strip('[]')))
    print('Output name(s) from model: {}'.format(fitness['args']['outputs']))
    print('Fitness function "{}" with stopping criterion {:g}'.format(fitness['name'], fitness['stop']))
    print('Maximum iterations: {:g}'.format(maxiterations))

    # Initialise arrays and lists to store parameters required throughout optimisation
    # Lower, central, and upper values for each parameter
    levels = np.zeros((s, k), dtype=floattype)
    # Optimal lower, central, or upper value for each parameter
    levelsopt = np.zeros(k, dtype=np.uint8)
    # Difference used to set values for levels
    levelsdiff = np.zeros(k, dtype=floattype)
    # History of fitness values from each confirmation experiment
    fitnessvalueshist = []

    iteration = 0
    while iteration < maxiterations:
        # Reset number of model runs to number of experiments
        numbermodelruns = N
        usernamespace['number_model_runs'] = numbermodelruns

        # Fitness values for each experiment
        fitnessvalues = []

        # Set parameter ranges and define experiments
        optparams, levels, levelsdiff = calculate_ranges_experiments(optparams, optparamsinit, levels, levelsopt, levelsdiff, OA, N, k, s, iteration)

        # Run model for each experiment
        if args.mpi:  # Mixed mode MPI/OpenMP - MPI task farm for models with each model parallelised with OpenMP
            run_mpi_sim(args, numbermodelruns, inputfile, usernamespace, optparams)
        else:  # Standard behaviour - models run serially with each model parallelised with OpenMP
            run_std_sim(args, numbermodelruns, inputfile, usernamespace, optparams)

        # Calculate fitness value for each experiment
        for experiment in range(1, numbermodelruns + 1):
            outputfile = inputfileparts[0] + str(experiment) + '.out'
            fitnessvalues.append(fitness_metric(outputfile, fitness['args']))
            os.remove(outputfile)

        taguchistr = '\n--- Taguchi optimisation, iteration {}: {} initial experiments with fitness values {}.'.format(iteration + 1, numbermodelruns, fitnessvalues)
        print('{} {}\n'.format(taguchistr, '-' * (get_terminal_width() - 1 - len(taguchistr))))

        # Calculate optimal levels from fitness values by building a response table; update dictionary of parameters with optimal values
        optparams, levelsopt = calculate_optimal_levels(optparams, levels, levelsopt, fitnessvalues, OA, N, k)

        # Update dictionary with history of parameters with optimal values
        for key, value in optparams.items():
            optparamshist[key].append(value[0])

        # Run a confirmation experiment with optimal values
        numbermodelruns = 1
        usernamespace['number_model_runs'] = numbermodelruns
        run_std_sim(args, numbermodelruns, inputfile, usernamespace, optparams)

        # Calculate fitness value for confirmation experiment
        outputfile = inputfileparts[0] + '.out'
        fitnessvalueshist.append(fitness_metric(outputfile, fitness['args']))

        # Rename confirmation experiment output file so that it is retained for each iteraction
        os.rename(outputfile, os.path.splitext(outputfile)[0] + '_final' + str(iteration + 1) + '.out')

        taguchistr = '\n--- Taguchi optimisation, iteration {} completed. History of optimal parameter values {} and of fitness values {}'.format(iteration + 1, dict(optparamshist), fitnessvalueshist)
        print('{} {}\n'.format(taguchistr, '-' * (get_terminal_width() - 1 - len(taguchistr))))
        iteration += 1

        # Stop optimisation if stopping criterion has been reached
        if fitnessvalueshist[iteration - 1] > fitness['stop']:
            taguchistr = '\n--- Taguchi optimisation stopped as fitness criteria reached: {:g} > {:g}'.format(fitnessvalueshist[iteration - 1], fitness['stop'])
            print('{} {}\n'.format(taguchistr, '-' * (get_terminal_width() - 1 - len(taguchistr))))
            break

        # Stop optimisation if successive fitness values are within a percentage threshold
        if iteration > 2:
            fitnessvaluesclose = (np.abs(fitnessvalueshist[iteration - 2] - fitnessvalueshist[iteration - 1]) / fitnessvalueshist[iteration - 1]) * 100
            fitnessvaluesthres = 0.1
            if fitnessvaluesclose < fitnessvaluesthres:
                taguchistr = '\n--- Taguchi optimisation stopped as successive fitness values within {}%'.format(fitnessvaluesthres)
                print('{} {}\n'.format(taguchistr, '-' * (get_terminal_width() - 1 - len(taguchistr))))
                break

    tsimend = perf_counter()

    # Save optimisation parameters history and fitness values history to file
    opthistfile = inputfileparts[0] + '_hist.pickle'
    with open(opthistfile, 'wb') as f:
        pickle.dump(optparamshist, f)
        pickle.dump(fitnessvalueshist, f)
        pickle.dump(optparamsinit, f)

    taguchistr = '\n=== Taguchi optimisation completed in [HH:MM:SS]: {} after {} iteration(s)'.format(datetime.timedelta(seconds=int(tsimend - tsimstart)), iteration)
    print('{} {}\n'.format(taguchistr, '=' * (get_terminal_width() - 1 - len(taguchistr))))
    print('History of optimal parameter values {} and of fitness values {}\n'.format(dict(optparamshist), fitnessvalueshist))
Пример #4
0
def run_opt_sim(args, numbermodelruns, inputfile, usernamespace):
    """Run a simulation using Taguchi's optmisation process.
        
    Args:
        args (dict): Namespace with command line arguments
        numbermodelruns (int): Total number of model runs.
        inputfile (str): Name of the input file to open.
        usernamespace (dict): Namespace that can be accessed by user in any Python code blocks in input file.
    """

    if numbermodelruns > 1:
        raise CmdInputError(
            'When a Taguchi optimisation is being carried out the number of model runs argument is not required'
        )

    inputfileparts = os.path.splitext(inputfile)

    # Default maximum number of iterations of optimisation to perform (used if the stopping criterion is not achieved)
    maxiterations = 20

    # Process Taguchi code blocks in the input file; pass in ordered dictionary to hold parameters to optimise
    tmp = usernamespace.copy()
    tmp.update({'optparams': OrderedDict()})
    taguchinamespace = taguchi_code_blocks(inputfile, tmp)

    # Extract dictionaries and variables containing initialisation parameters
    optparams = taguchinamespace['optparams']
    fitness = taguchinamespace['fitness']
    if 'maxiterations' in taguchinamespace:
        maxiterations = taguchinamespace['maxiterations']

    # Store initial parameter ranges
    optparamsinit = list(optparams.items())

    # Dictionary to hold history of optmised values of parameters
    optparamshist = OrderedDict((key, list()) for key in optparams)

    # Import specified fitness function
    fitness_metric = getattr(
        importlib.import_module('user_libs.optimisation_taguchi_fitness'),
        fitness['name'])

    # Select OA
    OA, N, cols, k, s, t = construct_OA(optparams)
    print(
        '\n{}\n\nTaguchi optimisation: orthogonal array with {} experiments, {} parameters ({} used), {} levels, and strength {} will be used.'
        .format(68 * '*', N, cols, k, s, t))

    # Initialise arrays and lists to store parameters required throughout optimisation
    # Lower, central, and upper values for each parameter
    levels = np.zeros((s, k), dtype=floattype)
    # Optimal lower, central, or upper value for each parameter
    levelsopt = np.zeros(k, dtype=floattype)
    # Difference used to set values for levels
    levelsdiff = np.zeros(k, dtype=floattype)
    # History of fitness values from each confirmation experiment
    fitnessvalueshist = []

    iteration = 0
    while iteration < maxiterations:
        # Reset number of model runs to number of experiments
        numbermodelruns = N
        usernamespace['number_model_runs'] = numbermodelruns

        # Fitness values for each experiment
        fitnessvalues = []

        # Set parameter ranges and define experiments
        optparams, levels, levelsdiff = calculate_ranges_experiments(
            optparams, optparamsinit, levels, levelsopt, levelsdiff, OA, N, k,
            s, iteration)

        # Run model for each experiment
        if args.mpi:  # Mixed mode MPI/OpenMP - MPI task farm for models with each model parallelised with OpenMP
            run_mpi_sim(args, numbermodelruns, inputfile, usernamespace,
                        optparams)
        else:  # Standard behaviour - models run serially with each model parallelised with OpenMP
            run_std_sim(args, numbermodelruns, inputfile, usernamespace,
                        optparams)

        # Calculate fitness value for each experiment
        for experiment in range(1, numbermodelruns + 1):
            outputfile = inputfileparts[0] + str(experiment) + '.out'
            fitnessvalues.append(fitness_metric(outputfile, fitness['args']))
            os.remove(outputfile)

        print(
            '\nTaguchi optimisation, iteration {}: {} initial experiments with fitness values {}.'
            .format(iteration + 1, numbermodelruns, fitnessvalues))

        # Calculate optimal levels from fitness values by building a response table; update dictionary of parameters with optimal values
        optparams, levelsopt = calculate_optimal_levels(
            optparams, levels, levelsopt, fitnessvalues, OA, N, k)

        # Run a confirmation experiment with optimal values
        numbermodelruns = 1
        usernamespace['number_model_runs'] = numbermodelruns
        run_std_sim(args, numbermodelruns, inputfile, usernamespace, optparams)

        # Calculate fitness value for confirmation experiment
        outputfile = inputfileparts[0] + '.out'
        fitnessvalueshist.append(fitness_metric(outputfile, fitness['args']))

        # Rename confirmation experiment output file so that it is retained for each iteraction
        os.rename(
            outputfile,
            os.path.splitext(outputfile)[0] + '_final' + str(iteration + 1) +
            '.out')

        print(
            '\nTaguchi optimisation, iteration {} completed. History of optimal parameter values {} and of fitness values {}'
            .format(iteration + 1, dict(optparamshist), fitnessvalueshist,
                    68 * '*'))
        iteration += 1

        # Stop optimisation if stopping criterion has been reached
        if fitnessvalueshist[iteration - 1] > fitness['stop']:
            print('\nTaguchi optimisation stopped as fitness criteria reached')
            break

        # Stop optimisation if successive fitness values are within a percentage threshold
        if iteration > 2:
            fitnessvaluesclose = (np.abs(fitnessvalueshist[iteration - 2] -
                                         fitnessvalueshist[iteration - 1]) /
                                  fitnessvalueshist[iteration - 1]) * 100
            fitnessvaluesthres = 0.1
            if fitnessvaluesclose < fitnessvaluesthres:
                print(
                    '\nTaguchi optimisation stopped as successive fitness values within {}%'
                    .format(fitnessvaluesthres))
                break

    # Save optimisation parameters history and fitness values history to file
    opthistfile = inputfileparts[0] + '_hist'
    np.savez(opthistfile, dict(optparamshist), fitnessvalueshist)

    print(
        '\n{}\nTaguchi optimisation completed after {} iteration(s).\nHistory of optimal parameter values {} and of fitness values {}\n{}\n'
        .format(68 * '*', iteration, dict(optparamshist), fitnessvalueshist,
                68 * '*'))

    # Plot the history of fitness values and each optimised parameter values for the optimisation
    plot_optimisation_history(fitnessvalueshist, optparamshist, optparamsinit)