def grafica_trayectoria(t0, tf, q0, qf, n=100): from matplotlib.pyplot import subplots, style style.use("ggplot") # se adquieren valores de funcion trayectoria ts, qs, q̇s, q̈s, tb = trayectoria(t0, tf, q0, qf, n) fig, axes = subplots(nrows=1, ncols=3, figsize=(17, 5)) # se grafica posicion, velocidad, aceleracion axes[0].plot(ts, qs) axes[1].plot(ts, q̇s) axes[2].plot(ts, q̈s) # se calculan datos para cursores y limites datos = qs, q̇s, q̈s mins = [min(arreglo) for arreglo in datos] maxs = [max(arreglo) for arreglo in datos] spans = [ma - mi for ma, mi in zip(maxs, mins)] Δt = tf - t0 for i, span in enumerate(spans): if span == 0: spans[i] = 1 for i in range(3): axes[i].plot([tb, tb], [mins[i] - 0.1*spans[i], maxs[i] + 0.1*spans[i]], "--") axes[i].plot([tf - (tb - t0), tf - (tb - t0)], [mins[i] - 0.1*spans[i], maxs[i] + 0.1*spans[i]], "--") axes[i].set_xlim(-0.1*Δt + t0, 0.1*Δt + tf) axes[i].set_ylim(-0.1*spans[i] + mins[i], 0.1*spans[i] + maxs[i]) return ts, qs, q̇s, q̈s
def main(): languages = DataFrame(columns=columns, data=data) print(languages) print(languages.groupby("typepos").mean()) rcParams.update({"font.size": 14}) style.use("dark_background") ax: Axes = None colors = dict(leading=[0.4, 0.6, 1], trailing=[1, 0.7, 0]) colors.update(both=colors["trailing"]) edges = {**colors, **dict(both=colors["leading"])} for typepos, group in languages.groupby("typepos"): ax = group.plot( x="year", y="share", ax=ax, color=colors[typepos], label=typepos, markeredgecolor=edges[typepos], markeredgewidth=3, markersize=10, style="o", ) for _, row, *_ in group.iterrows(): ax.annotate( s=row["name"], xy=[row["year"], row["share"]], xytext=[0, 12], textcoords="offset pixels", horizontalalignment="center", # verticalalignment="center", ) ax.set_ylim([0, ax.get_ylim()[1]]) ax.set_ylabel("2020Q1 GitHub Activity Share") # ax.figure.tight_layout() show()
def stock_price_prediction(filePath): import numpy as np import pandas as pd import matplotlib.pyplot as plt import datetime import statsmodels.api as sm from matplotlib.pyplot import style # Set Style for Graphs style.use('ggplot') # Get Data data = pd.read_csv('a.us.csv') dates = data.index.tolist() prices = data['Close'].tolist() dates = sm.add_constant(dates) model = sm.OLS(prices, dates).fit() predictions = model.predict(dates) print_model = model.summary() print('\n') print('\n') print(print_model) print('\n') print('\n')
def plot_n(data, bt_returns): style.use('ggplot') bt_cumprod = portfolio_returns_to_prices(bt_returns) if isinstance(bt_returns.index, MultiIndex): date_index = bt_returns.index.levels[-1] legend_ncol = int( (bt_returns.index.shape[0] / bt_returns.index.levshape[-1]) * bt_returns.columns.levshape[0] // 25) + 1 else: date_index = bt_returns.index legend_ncol = int(bt_returns.columns.levshape[0] // 20) + 1 # bt_cumprod = bt_rets.add(1).cumprod() indice_ordenado = bt_cumprod.iloc[-1].sort_values( ascending=False).index.tolist() data_returns = weekly_returns(data).loc[date_index] # data_returns = weekly_returns(data) bt_cumprod[indice_ordenado].mul(100).plot(figsize=(20, 12)) data_returns.mean(axis=1).add(1).cumprod().mul(100).plot(linestyle='-', linewidth=3, c='k') legend(indice_ordenado + ['benchmark'], ncol=legend_ncol, loc='best') return
def plot_pontos(regiao, media): style.use("ggplot") for i in range(len(media)): plt.scatter(media[i][0], media[i][1]) plt.xlabel(u"Plataformas") plt.ylabel(u"Medias") plt.title(u"Top 10 média de vendas por plataforma.\nRegião: {}.".format( regiao)) plt.show()
def missings_viz(self, df, visualizar=True, escolhido_tipo=None, df_missings=False): ''' Visualizar os missings, plota o tipo de visualizacao : param df: pd.DataFrame para visualizar : param visualizar: booleano para decidir qual visualizar : param escolhido_tipo: inteiro para decidir qual tipo visualizar : param df_missings: booleano para retorna Dataframe com percentual de nulos : return: pd.DataFrame com nomes das colunas e porcentagem missings ''' if visualizar: # para quem usar um tema dark na IDE from matplotlib.pyplot import style style.use('classic') # colunas com missings apenas cols_miss = df.isnull().any() cols_miss = df.columns[cols_miss] if escolhido_tipo == None: print('Tipo de visualizacao: ', '\n', 'total de missings - 1', '\n', 'ordem de aparição - 2', '\n', 'correlação - 3', '\n', 'dendograma - 4') escolhido_tipo = int(input()) print('Visualização missings') # total if escolhido_tipo == 1: from missingno import bar bar(df[cols_miss]) # ordem aparicao elif escolhido_tipo == 2: from missingno import matrix matrix(df[cols_miss]) # correlacao elif escolhido_tipo == 3: from missingno import heatmap heatmap(df[cols_miss]) # dendograma elif escolhido_tipo == 4: from missingno import dendrogram dendrogram(df[cols_miss]) if df_missings: from funcoesProprias import dfExploracao print('Cálculo do percentual de missings num DataFrame') explora = dfExploracao(df) explora = explora.sort_values(['tipos', 'na_perct', 'quantUnicos']) return explora
def ask_graph(): graph_req = input("\nWhich graph would you like to display?") try: style.use("ggplot") pyplot.scatter(student_data[graph_req], student_data["G3"]) pyplot.xlabel(graph_req) pyplot.ylabel("Final Grade") pyplot.show() except KeyError: print("Oops! That's not an option. These are though!") print("age", "Medu", "Fedu", "traveltime", "studytime", "failures", "famrelquality", "freetime", "goout", "dayalcohol", "weekendalcohol", "health", "absences", "G1", "G2", "G3", sep=", ") ask_graph()
def plot_barras(dicio, publica): style.use("ggplot") x = dicio.keys() y = dicio.values() plt.bar(x, y) plt.xlabel(u"Plataformas") plt.ylabel(u"Vendas") plt.title( u"Vendas Globais de jogos por plataforma.\n Publicadora: {}.".format( publica)) plt.show()
def linear_regressor_demo(): style.use('ggplot') stockit.train() point_in_question = data_len + 1 point_prediction = stockit.predict(point_in_question) print(point_prediction) predictions = stockit.reg.predict(np.sort(stockit.x_index, axis=0)) plt.title(stock) plt.plot(stockit.x_index, predictions, label="reg predictions") plt.plot(stockit.x_index, stockit.y_index, label="real") plt.scatter([point_in_question], [point_prediction], label=f'stockit.predict[{point_in_question}]') plt.legend() plt.show()
def graph(self,df,stock): #Style for graph set as ggplot style.use("ggplot") #Graphing adj. close and actual forecast #Creates a legend and labels for graph df["Adj. Close"].plot() df["Forecast"].plot() plt.legend(loc=4) plt.title(user.__getname__()) plt.xlabel("Date") plt.ylabel("Price") plt.show()
def plot_barras_2(ano, medias): style.use("ggplot") fig = plt.figure() ax1 = plt.subplot2grid((1, 1), (0, 0)) for label in ax1.xaxis.get_ticklabels(): label.set_rotation(45) for i in range(len(medias)): ax1.bar(medias[i][0], medias[i][1]) plt.xlabel(u"Gêneros") plt.ylabel(u"Média de Vendas") plt.title(u"Média de vendas por gênero.\n Ano: {}.".format(ano)) plt.show()
def stockit_demo(): style.use('ggplot') stockit.train(index=250) point_in_question = data_len + 1 point_prediction = stockit.predict(point_in_question) print(point_prediction) predictions = stockit.reg.predict(np.sort(stockit.x_index, axis=0)) plt.title(stock) plt.plot(stockit.x_index, predictions, label="reg predictions") plt.scatter([point_in_question], [point_prediction], label=f'stockit regression of day: {point_in_question}') stockit.moving_avg(index=25, show_plt=False) plt.savefig("stockit example.png", dpi=1200)
def compare(self, ff, stock): print("Do you want to zoom in?") days = input("If so, how many days would you like to see on the x-axis? (The forecast is 30 days) ") days = int(days) style.use("ggplot") ff["Adj. Close"][-days:-1].plot() ff["Forecast"][-days:-1].plot() plt.legend(loc=4) plt.title(user.__getname__()) plt.xlabel("Date") plt.ylabel("Price") plt.show()
def model_wrapper_test_plot(res): """ Plot the test result and returns the figure. """ from .const import Grav, m_p, mu, k_b, year, AU import numpy as np import matplotlib.pyplot as plt from matplotlib.pyplot import style style.use(['seaborn-dark', {'axes.grid': True, 'font.size': 10}]) # read the results args = res.args # noqa x = res.x # noqa sig_0 = res.sigma_g[0] # noqa sig_g = res.sigma_g[-1] # noqa t = res.timesteps[-1] # noqa temp = res.T # noqa alpha = args.alpha # noqa gamma = args.gamma # noqa rc = args.rc # noqa mdisk = args.mdisk # noqa mstar = args.mstar # noqa # calculate analytical solution cs1 = np.sqrt(k_b * temp[0] / mu / m_p) om1 = np.sqrt(Grav * mstar / x[0]**3) nu1 = alpha * cs1**2 / om1 siga_0, _ = lbp_solution(x, gamma, nu1, mstar, mdisk, rc) siga_1, _ = lbp_solution(x, gamma, nu1, mstar, mdisk, rc, time=t) # compare results against analytical solution f, axs = plt.subplots(1, 2, figsize=(10, 4), sharex=True, sharey=True) axs[0].loglog(x / AU, siga_0, '-', label='analytical') axs[0].loglog(x / AU, sig_0, 'r--', label='initial') axs[0].set_title('t = 0 years') axs[0].legend() axs[1].loglog(x / AU, siga_1, '-', label='analytical') axs[1].loglog(x / AU, sig_g, 'r--', label='simulated') axs[1].set_title('t = {:3.2g} years'.format(t / year)) axs[1].legend() axs[1].set_ylim(1e-5, 1e5) for ax in axs: ax.set_xlabel('r [AU]') ax.set_ylabel(r'$\Sigma_\mathrm{g}$ [g cm$^{-2}$]') return f
def _pair_correlation(files, bin_size, r_max): from glob import glob paths = glob(files) if paths: from .io import load_locs from .postprocess import pair_correlation from matplotlib.pyplot import plot, style, show, xlabel, ylabel style.use('ggplot') for path in paths: print('Loading {}...'.format(path)) locs, info = load_locs(path) print('Calculating pair-correlation...') bins_lower, pc = pair_correlation(locs, info, bin_size, r_max) plot(bins_lower, pc) xlabel('r (pixel)') ylabel('pair-correlation (pixel^-2)') show()
def plot_linha_2(lista_ano, anoI, anoF): style.use("ggplot") lista = [] for i in range((anoF - anoI) + 1): lista.append(anoI + i) for k in range(len(lista_ano)): plt.plot(lista, lista_ano[k][1:len(lista_ano[k])], label="{}".format(lista_ano[k][0])) plt.xlabel(u"Anos") plt.ylabel(u"Quantidade de Jogos") plt.title( u"Quantidade de jogos pelos {} maiores gêneros.\nEntre {} e {}.". format(len(lista_ano), anoI, anoF)) plt.legend() plt.show()
def plot_linha(EU, NA): style.use("ggplot") x = [] x1 = [] y = [] for i in range(len(EU)): x.append(EU[i][1]) x1.append(NA[i][1]) y.append(i + 1) plt.plot(y, x, label=u"EU") plt.plot(y, x1, label=u"NA") plt.xlabel(u"Posição dos jogos mais vendidos em cada região.") plt.ylabel(u"Total de Vendas") plt.title(u"Top {} de vendas, EU e NA".format(len(EU))) plt.legend() plt.show()
def _pair_correlation(files, bin_size, r_max): from glob import glob paths = glob(files) if paths: from .io import load_locs from .postprocess import pair_correlation from matplotlib.pyplot import plot, style, show, xlabel, ylabel, title style.use("ggplot") for path in paths: print("Loading {}...".format(path)) locs, info = load_locs(path) print("Calculating pair-correlation...") bins_lower, pc = pair_correlation(locs, info, bin_size, r_max) plot(bins_lower - bin_size / 2, pc) xlabel("r (pixel)") ylabel("pair-correlation (pixel^-2)") title("Pair-correlation. Bin size: {}, R max: {}".format( bin_size, r_max)) show()
def main(): style.use("ggplot") # start = datetime.datetime(2015, 1, 1) # end = datetime.datetime(2015, 3, 1) # df = data.DataReader("XOM", "google", start, end, retry_count=10) # # print df.head() # # df.plot() # plt.show() stats = { "Day": [1, 2, 3, 4, 5, 6], "Visitors": [43, 53, 34, 45, 64, 34], "Bounce_Rate": [65, 72, 62, 64, 54, 66] } df = pd.DataFrame(stats) # print(df) # print(df.head(2)) # # print(df.tail(2)) # Index returning a new DataFrame # print(df.set_index("Day")) # makes changes to df in-place df.set_index("Day", inplace=True) # print(df["Bounce_Rate"]) # print(df[["Bounce_Rate", "Visitors"]]) # print(df.Visitors.tolist()) print(np.array(df[["Bounce_Rate", "Visitors"]])) # convert df to ndarray to df df2 = pd.DataFrame(np.array(df[["Bounce_Rate", "Visitors"]])) print(df2)
def show_map(all_x, all_y, player_name, map_name): style.use("dark_background") palette = get_cmap("Set1") color = 0 for y_name, y_vals in all_y.items(): x_note_time = (all_x["Left notes timing"] if "left" in y_name else all_x["Right notes timing"]) linewidth = 1 if "Hit timing" in y_name else 2 plot( x_note_time, y_vals, marker="", color=palette(color), linewidth=linewidth, alpha=0.9, label=y_name, ) color += 1 legend( loc="upper center", bbox_to_anchor=(0.5, 1.15), ncol=5, fancybox=True, shadow=True, ) title(f"|{player_name}| ({map_name})", loc="left", fontsize=14, fontweight=4, color="White") xlabel("Time (seconds)") ylabel("Score (points) & hit timing (millisecs)") grid() mng = get_current_fig_manager() mng.resize(*mng.window.maxsize()) show()
def plot_graph(xy_per_type): # styles available : ['Solarize_Light2', '_classic_test_patch', 'bmh', 'classic', 'dark_background', 'fast', 'fivethirtyeight', 'ggplot', 'grayscale', 'seaborn', 'seaborn-bright', 'seaborn-colorblind', 'seaborn-dark', 'seaborn-dark-palette', 'seaborn-darkgrid', 'seaborn-deep', 'seaborn-muted', 'seaborn-notebook', 'seaborn-paper', 'seaborn-pastel', 'seaborn-poster', 'seaborn-talk', 'seaborn-ticks', 'seaborn-white', 'seaborn-whitegrid', 'tableau-colorblind10'] style.use("dark_background") palette = get_cmap("Set1") for type_maps in xy_per_type.keys(): print(f"Graph for {type_maps}\n") x_axis, all_y = xy_per_type[type_maps] for palette_color, player in enumerate(all_y.keys()): # fig = figure(palette_color) plot( x_axis, all_y[player], marker="", color=palette(palette_color), linewidth=2, alpha=0.9, label=player, ) legend( loc="upper center", bbox_to_anchor=(0.5, 1.15), ncol=4, fancybox=True, shadow=True, ) title(type_maps, loc="left", fontsize=24, fontweight=4, color="orange") xlabel("Date") ylabel("Score") grid() mng = get_current_fig_manager() mng.resize(*mng.window.maxsize()) # mng.window.state('zoomed') # mng.frame.Maximize(True) show()
def set_style(styles): """ Set the experiment specific plotting style Example: >>> import mplhep as hep >>> hep.set_style("ATLAS") >>> hep.set_style(mplhep.style.CMS) Parameters ---------- styles (`str` or `mplhep.style` `dict`): The experiment style """ if not isinstance(styles, list): styles = [styles] # passed in experiment mplhep.style dict or str alias styles = [ style if isinstance(style, dict) else getattr(sys.modules[__name__], f"{style}") for style in styles ] plt_style.use(styles)
import sys import matplotlib matplotlib.use('agg') from matplotlib.pyplot import style style.use('classic') import pynsqd status = pynsqd.test(*sys.argv[1:]) sys.exit(status)
from pathlib import Path import numpy as np from h5py import File from matplotlib.pyplot import style, subplots from scipy.constants import physical_constants as phys_c from shabanipy.jj.fraunhofer.deterministic_reconstruction import ( extract_current_distribution, ) from shabanipy.jj.fraunhofer.utils import find_fraunhofer_center from shabanipy.jj.utils import extract_switching_current from shabanipy.plotting import jy_pink, plot, plot2d from shabanipy.plotting.utils import stamp jy_pink.register() style.use(["presentation", "jy_pink"]) scans = { "WFS02_078": { "jj_width": 4e-6, "jj_length": 1e-6, "threshold": 50 }, "WFS02_073": { "jj_width": 1e-6, "jj_length": 1e-6, "threshold": 50 }, "WFS02_042": { "jj_width": 4e-6, "jj_length": 1e-6, "threshold": 50
# In[17]: from datetime import datetime import tkinter.messagebox as messagebox import tkinter as tk from tkinter import ttk import requests, json, io import pandas as pd import webbrowser import pylab import matplotlib.pyplot as plt from matplotlib.pyplot import style from matplotlib.figure import Figure from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg style.use('seaborn') class TS_dashboard(tk.Tk): def __init__(self, parent): tk.Tk.__init__(self, parent) tk.Tk.configure(self, background='white') # App bg color tk.Tk.geometry(self, "800x600") # Set window size tk.Tk.wm_title(self, 'Transportstyrelsen statistics') # Window title tk.Tk.iconbitmap(self, r'pictures/ts_logo_cut.ico') # Window icon self.parent = parent self.initialize_app() self.currentYear = datetime.now().year self.resizable(False, False) # Top dropdown
from matplotlib.pyplot import savefig, style from lcapy import * style.use('function.mplstyle') trap(t - 0.75, 0.5).plot((-2, 2), title='trap(t - 0.75, 0.5)') savefig(__file__.replace('.py', '.png'))
from collections import defaultdict, OrderedDict from glob import glob from matplotlib import pyplot as mpl from matplotlib.pyplot import (figure, legend, xticks, text, scatter, bar, savefig, style) from os import path from scipy.io import netcdf_file from sklearn.decomposition import PCA import GCMSUtils as gu import GCMS_Plots as gp import Utils as ut import numpy as np import pandas as pd style.use('lowink') norm_kwargs = dict(zeroed=20, t_offset='auto', normed=(1010, 1032), norm_method='max') norm_kwargs2 = norm_kwargs.copy() norm_kwargs2['normed'] = (1060, 1070) if __name__ == "__main__": normers = {} allsamples = defaultdict(list) for fname in glob('*/*.CDF'): sample = netcdf_file(fname) title = sample.experiment_title.decode().replace('pac', '').split('r')[0].strip('_').lstrip('_-').replace('zaza-', 'zaza') allsamples[title].append(sample)
import math import matplotlib.pyplot as plt import pandas_datareader as web import pandas as pd import datetime as dt from matplotlib.pyplot import style style.use("ggplot") start = dt.datetime(1970, 1, 1) end = dt.datetime.now() USA = "^GSPC" Canada = "^GSPTSE" Japan = "^N225" def MAIN(): data = web.get_data_yahoo(USA, start, end)[["Close"]] data = data.rename(columns={"Close": "S&P 500"}) data_2 = web.get_data_yahoo(Canada, start, end)[["Close"]] data_2 = data_2.rename(columns={"Close": "TSX 60"}) data_3 = web.get_data_yahoo(Japan, start, end)[["Close"]] data_3 = data_3.rename(columns={"Close": "Nikkei 225"}) frames = [data, data_2, data_3] result = pd.concat(frames, axis=1) result.dropna(inplace=True) index = pd.DataFrame(index=result.index)
from statistics import mean import numpy as np import matplotlib.pyplot as plt from matplotlib.pyplot import style import random style.use('fivethirtyeight') ''' xs=np.array([1,2,3,4,5,6],dtype=np.float64); ys=np.array([5,4,6,5,6,7],dtype=np.float64); ''' def create_dataset(hm, variance, step=2, correlation=False): val = 1 ys = [] for i in range(hm): y = val + random.randrange(-variance, variance) ys.append(y) if correlation and correlation == 'pos': val += step elif correlation and correlation == 'neg': val -= step xs = [i for i in range(len(ys))] return np.array(xs, dtype=np.float64), np.array(ys, dtype=np.float64) def best_fit_slope(xs, ys): m = (((mean(xs) * mean(ys)) - mean(xs * ys)) / ((mean(xs) * mean(xs)) - mean(xs**2)))
from PyQt4.uic import loadUiType from PyQt4 import QtGui, QtCore import pyodbc from datetime import datetime from matplotlib.figure import Figure from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as \ FigureCanvas import smith_data as sd import signal_reader as sr from matplotlib.pyplot import style import math import os os.startfile('C:/smiths_micrologix_data/connector.py') style.use('bmh') Ui_main, Qmain = loadUiType('ui/main.ui') class Main(Qmain, Ui_main): def __init__(self): self.now = datetime.now() # Current time # Database connection. self.conn = pyodbc.connect('DRIVER={SQL Server};' 'SERVER=ZIRSYSPRO;' 'DATABASE=MAINTDATA;' 'Trusted_Connection=yes') self.data = sd.DataManager() # Separate class created to manage the # data aspect of the application.
import pandas as pd import quandl, math,datetime import numpy as np #svm- Support vector machine from sklearn import preprocessing, cross_validation, svm from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt from matplotlib.pyplot import style #importing graphics plotting and styling style.use('ggplot') df = quandl.get('Wiki/GOOGL') df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']] df ['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close'] * 100 df ['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100 df = df[['Adj. Close','HL_PCT','PCT_change','Adj. Volume']] #we're predicting forecast_col forecast_col = 'Adj. Close' #Backfilling empty slots df.fillna(-99999, inplace=True) #Rounds up to nearest whole integer forecast_out = int(math.ceil(.01*len(df))) df['label'] = df[forecast_col].shift(-forecast_out) df.dropna(inplace=True) #Features = x & labels = Y X = np.array(df.drop(['label'],1)) X = preprocessing.scale(X)
# exercise 8.2.6 from matplotlib.pyplot import figure, plot, title, show, bar, style, savefig, xlabel, ylabel import numpy as np import neurolab as nl from sklearn import model_selection from scipy import stats from projekt2 import X, pimaData np.random.seed(2) style.use('default') # Set plot theme X = X[:, [0, 2, 3, 4, 5, 6]] # extract attributes vi want to use y = np.array(pimaData[['glucose']]) # real prediction N, M = X.shape #C = 2 # Normalize data X = stats.zscore(X) y = stats.zscore(y) # Parameters for neural network classifier #n_hidden_units = 2 # number of hidden units n_train = 2 # number of networks trained in each k-fold learning_goal = 10 # stop criterion 1 (train mse to be reached) max_epochs = 80 # stop criterion 2 (max epochs in training) show_error_freq = 10 # frequency of training status updates # K-fold crossvalidation K = 5 # only five folds to speed up this example CV = model_selection.KFold(K, shuffle=True)
import os import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as pyplot from mpl_toolkits.mplot3d import Axes3D from matplotlib.pyplot import style #from validity import f3 style.use("seaborn-darkgrid") if not os.path.exists("fitness_function"): os.makedirs("fitness_function") write_file_objective1 = open('fitness_function/prostrate_MLL_fitness.txt', 'w') class Plotter(): def __init__(self, problem, individual_list): self.directory = 'plots' self.problem = problem self.individual_list = individual_list def plot_population_best_front(self, population, generation_number): if generation_number % 5 == 0: test = "fitness_function/MLL_NDS_labels_" + str( generation_number) + ".txt" write_file_objective = open(test, 'w') write_file_objective.write("\n\nGeneration Number:- %s \n" % generation_number) write_file_objective1.write("\n\nGeneration Number:- %s \n" % generation_number)