Пример #1
0
    def on_click_register_btn(self):

        input_account = self.input_account.text()
        input_password = self.input_password.text()
        if os.path.exists(config.account_path):
            QMessageBox.warning(self, "!", "不要给老子重复注册!!", QMessageBox.Yes)
            return
        if self.check_account(is_login=False,
                              account=input_account,
                              password=input_password):
            register_time = int(time.time())
            user_data = {
                "account": input_account,
                "password": input_password,
                "start_time": register_time,
                "end_time": register_time + 24 * 3600
            }

            user_data = self.encrypt(str(user_data))
            util.write_data(config.account_path, data=user_data)
            # util.write_json_file(config.account_path, data=user_data)
            self.is_finish_login.emit(True)
            self.hide()
        else:
            QMessageBox.warning(self, "!", "账号或密码错误", QMessageBox.Yes)
Пример #2
0
def next_thread(bot, update):
    global update_mode
    global update_progress
    global update_list
    global data
    logging.info('next')
    if not update_mode:
        bot.send_message(
                chat_id=update.message.chat_id,
                text="Not in update mode, use /update to start update."
        )
        return
    if update_progress < len(update_list):
        item = update_list[update_progress]
        bot.send_message(
                chat_id=update.message.chat_id,
                text=str(update_progress+1) + '/' + str(len(update_list)) + ':\n' + item['title'] + '\nhttp://bbs.pcbeta.com/forum.php?mod=viewthread&tid=' + item['tid']
        )
        update_progress = update_progress + 1
    else:
        bot.send_message(
                chat_id=update.message.chat_id,
                text="Update Mode - Off"
        )
        data['lastUpdateTime'] = update_list[-1]['createTime']
        data['lastUpdateThread'] = update_list[-1]['tid']
        update_mode = False
        update_progress = 0
        update_list = []
        util.write_data(data)
Пример #3
0
def main():
	window_size = 100
	threshold = calc_threshold(exp_moving_average, window_size)

	print threshold

	filename = sys.argv[1]
	data_in = load_data(filename)

	# Uncomment for more realistic first values. First window_size/4 values
	# should not be taken into account in the output data and plots.
	# data_in[:0] = [sum(data_in[:(window_size/4)])/(window_size/4)]

	filtered_ma = average_diff(data_in, moving_average, window_size)
	filtered_ema = average_diff(data_in, exp_moving_average, window_size)

	plot([0] * len(data_in),
	     filtered_ma,
	     filtered_ema,
	     [threshold] * len(data_in),
	     [-threshold] * len(data_in),
	     )

	mean_ma  = mean_value_detector(filtered_ma,  threshold)
	mean_ema = mean_value_detector(filtered_ema, threshold)

	plot(mean_ema)
	plot(mean_ma)

	write_data(mean_ema, filename + ".out")
Пример #4
0
def _deploy_files(image_id, dockerfile, app_data):
    dockerfile_dir = dir_path + '/data/' + image_id
    print dockerfile_dir
    if not os.path.exists(dockerfile_dir):
        os.makedirs(dockerfile_dir)
    util.deploy_package(dockerfile_dir, app_data)
    dockerfile = dockerfile.replace(r'\n', '\n')
    util.write_data(dockerfile_dir + '/Dockerfile', dockerfile)
    return dockerfile_dir
Пример #5
0
    def __init__(self, density):
        """Initialize the simulation."""

        #Initialize the container
        container = LJContainer(density)

        #Equilibriate the system
        while self.t < EQUILIBRIATION_STEPS:
            sys.stdout.write("\rEquilibriating the container: {0:3.1f}%".format(
                self.t*100/EQUILIBRIATION_STEPS))
            sys.stdout.flush()

            #Do one 'tick' consisting of two integrations and a force update
            container.tick(rescale=True)

            #Increment time
            self.t += 1

        print("\n")
        self.t = 0.0

        #Start measuring
        while self.t < MEASUREMENT_STEPS:
            sys.stdout.write("\rCalculating averages: {0:3.1f}%".format(
                self.t*100/MEASUREMENT_STEPS))
            sys.stdout.flush()

            #Do one 'tick' consisting of two integrations and a force update
            container.tick(rescale=True)

            #Sample averages
            container.sample(self.t)

            #Increment time
            self.t += 1

        #Store sampling data
        util.write_data(container.data, density)

        #Generate a plot of the measured properties
        util.generate_report(density)

        #Print out the average value for the pressure
        pressure = util.calculate_average(container.data, "P")

        #Write calculated pressure to disk
        util.store_pressure(pressure, density)

        print("\nAverage pressure for density {0}: {1:6.4}".format(
            density, pressure))
Пример #6
0
def predict_metabolties(samples,
                        eta,
                        tau,
                        mu,
                        gamma,
                        weight_observations,
                        compounds,
                        record_mets=True):
    if (np.sum(samples) == 0):
        print("PUMA ERROR - Metabolite identification: samples are all zero")
        exit()

    c = np.dot(samples, eta)
    phi = 1 - np.array(np.exp(np.log(1 - mu) * c))
    b = np.log(1 - gamma * phi)
    psi = np.dot(b, np.dot(tau, tau.transpose())) - b
    v = np.dot(weight_observations, tau.transpose())
    m_one = v * (phi * (1 -
                        ((1 - gamma) * np.exp(psi)))) + (1 - v) * (phi *
                                                                   (1 - gamma))
    m_zero = v * ((1 - phi) * (1 - np.exp(psi))) + (1 - v) * (1 - phi)

    mean_mets_activity = np.mean(
        m_one, axis=0) / (np.mean(m_one, axis=0) + np.mean(m_zero, axis=0))
    print("mean_mets_activity_PUMA_detected:", list(mean_mets_activity))

    # write out m_one, m_zero, and mean_mets_activity files:
    if record_mets:
        outdata_dir = os.environ['PUMA_OUTPUT_DATA']
        metabolite_prediction_output = os.path.join(
            outdata_dir, 'metabolite_prediction_output.xlsx')
        mean_mets_activity = np.squeeze(mean_mets_activity).reshape(1, -1)
        write_data(mean_mets_activity,
                   metabolite_prediction_output,
                   sheetname="results",
                   header=compounds)

    n_active_metabolites = len([
        metabolite for metabolite in mean_mets_activity[0] if metabolite >= 0.5
    ])
    print("number_active_metabolites_PUMA_detected:", n_active_metabolites)
    active_mets_indices = np.nonzero(mean_mets_activity[0] >= 0.5)[0]
    active_mets_ID = [compounds[index] for index in active_mets_indices]
    print("active_metabolites_PUMA_detected:", active_mets_ID)
    return
Пример #7
0
def pathway_prediction(landa, a_init, mu, gamma, eta, tau, observed_weight_vector, pathway_dict,
                       record_samples=True):
    number_of_pathways = np.size(eta, 0)
    number_of_metabolites = np.size(eta, 1)
    myModel = pm.Model()
    with myModel:

        landa_value = pm.Beta('landa_value', alpha=1, beta=1)
        # define prior
        a = pm.Bernoulli('a', p=landa_value, shape=number_of_pathways)  # 1 x p
        # define posterior:  p (w|a)
        l = pm.math.dot(a, eta)  # 1xf: number of pathways that can generate each metabolite f
        phi = 1 - tt.exp(tt.log(1 - mu) * l)  # 1xf: p(m_j = 1| a)
        psi = 1 - tt.exp(tt.dot(tt.log(1 - (gamma * phi)), tau))  # 1xk: p(w_k=1 | a)
        w = pm.Bernoulli('w', p=psi, observed=observed_weight_vector, shape=observed_weight_vector.shape)

        start_point = {'landa_value': landa, 'a': a_init.astype(np.int32)}
        step1 = pm.Metropolis([landa_value])
        step2 = pm.BinaryGibbsMetropolis([a])
        trace = pm.sample(draws=1000, step=[step1, step2], start=start_point, random_seed=42)

    landa_value_samples_logodds = trace.get_values(trace.varnames[0], burn=100)
    landa_value_samples = logistic.pdf(landa_value_samples_logodds)
    pathways_samples = trace.get_values(trace.varnames[1],  burn=100)

    mean_pathways_activity = np.mean(pathways_samples, axis=0)
    if record_samples:
        outdata_dir = os.environ['PUMA_OUTPUT_DATA']
        pathway_prediction_output = os.path.join(outdata_dir, 'pathway_prediction_output.xlsx')
        mean_pathways_activity_in_samples = np.squeeze(mean_pathways_activity).reshape(1, -1)
        write_data(mean_pathways_activity_in_samples, pathway_prediction_output, sheetname="samples",
                   header=pathway_dict["pathway"])

    print("mean_pathways_activity_PUMA_detected:", list(mean_pathways_activity))
    n_active_pathways = len(
        [pathway_activity for pathway_activity in np.mean(pathways_samples, axis=0) if pathway_activity >= 0.5])
    print("number_active_pathways [PUMA detected]:", n_active_pathways)
    active_pathways_indices = np.nonzero(mean_pathways_activity >= 0.5)[0]
    active_pathways_ID = [pathway_dict["pathway"][index] for index in active_pathways_indices]
    print("active_pathways_PUMA_detected:", active_pathways_ID)
    not_active_pathways_indices = np.nonzero(mean_pathways_activity < 0.5)[0]
    not_active_pathways_ID = [pathway_dict["pathway"][index] for index in not_active_pathways_indices]
    print("not_active_pathways_PUMA_detected:", not_active_pathways_ID)
    return pathways_samples
Пример #8
0
def get_gpi_index_dictionary(year):

    response = get_response(index_url_for_download)
    if not response:
        return None
    gpi_data = response.content
    data_file = write_data(file_path, gpi_data)

    df = read_pdf(file_path, pages='10,11')
    frame_part1 = df[['COUNTRY', 'SCORE']]
    frame_part2 = df[['COUNTRY SCORE.1', 'Unnamed: 10']]
    frame_part3 = df[['COUNTRY SCORE', 'Unnamed: 6']]

    frame_part2 = frame_part2.rename(columns={
        'COUNTRY SCORE.1': 'COUNTRY',
        'Unnamed: 10': 'SCORE'
    })
    frame_part3 = frame_part3.rename(columns={
        'COUNTRY SCORE': 'COUNTRY',
        'Unnamed: 6': 'SCORE'
    })

    df = pd.DataFrame(np.concatenate(
        [frame_part1.values, frame_part2.values, frame_part3.values]),
                      columns=frame_part1.columns)
    df = df.dropna()
    df = df[df['COUNTRY'] != 'COUNTRY']
    df['SCORE'] = df['SCORE'].astype(float).round(3)
    df = df.set_index('COUNTRY')
    iso_names=['Czechia', 'Bolivia, Plurinational State of',\
    'Kyrgyzstan', "Côte d'Ivoire",'Viet Nam','Gambia',\
    'Macedonia, the former Yugoslav Republic of','Moldova, Republic of','Bosnia and Herzegovina','Palestine, State of',\
    'Venezuela, Bolivarian Republic of',"Korea, Democratic People's Republic of",'Russian Federation',\
    'Congo, Democratic Republic of the', 'Central African Republic','Syrian Arab Republic',\
    'United States of America', 'United Kingdom of Great Britain and Northern Ireland',\
    'Taiwan, Province of China',"Lao People's Democratic Republic",'Tanzania, United Republic of',\
    'Korea, Republic of','Congo','Iran, Islamic Republic of']

    countries_to_rename=['Czech Republic', 'Bolivia', \
    'Kyrgyz Republic', "Cote d' Ivoire", 'Vietnam', 'The Gambia',\
     'North Macedonia', 'Moldova', 'Bosnia & Herzegovina', 'Palestine', \
     'Venezuela', 'North Korea', 'Russia',\
     'Dem. Rep of the Congo', 'Central African Rep', 'Syria',\
     'USA','United Kingdom',\
     'Taiwan', 'Laos', 'Tanzania',\
      'South Korea', 'Rep of the Congo', 'Iran']

    gpi_map = df.T.to_dict('list')
    for country, info in gpi_map.items():
        info.insert(0, year)
    gpi_dictionary = rename_country_to_iso_name(countries_to_rename, iso_names,
                                                gpi_map)
    return gpi_dictionary
Пример #9
0
def get_gpi_index_dictionary(year, pages=default_pages):
	iso_names=['Czechia','Trinidad and Tobago','Macedonia, the former Yugoslav Republic of',\
	'Bosnia and Herzegovina', 'Bolivia, Plurinational State of','Kyrgyzstan',"Côte d'Ivoire",\
	'United Kingdom of Great Britain and Northern Ireland','Viet Nam','Moldova, Republic of', 'Monaco',\
	'Eswatini','Gambia','Palestine, State of','Venezuela, Bolivarian Republic of',\
	"Korea, Democratic People's Republic of",'Russian Federation','Central African Republic',\
	'Congo, Democratic Republic of the','Syrian Arab Republic','Taiwan, Province of China',\
	'United Arab Emirates',"Lao People's Democratic Republic",'Korea, Republic of',\
	'Tanzania, United Republic of','United States of America','Congo',\
	'Iran, Islamic Republic of']

	countries_to_rename=['Czech Republic', 'Trinidad & Tobago', 'Macedonia (FYR)', \
	'Bosnia & Herzegovina', 'Bolivia', 'Kyrgyz Republic', "Cote d' Ivoire",\
	'United Kingdom', 'Vietnam', 'Moldova', 'Swaziland', 'The Gambia', \
	'Palestine', 'Venezuela', 'North Korea', 'Russia', 'Central African Rep', \
	'Dem. Rep Congo', 'Syria', 'Taiwan', 'UAE', 'Laos', 'South Korea',\
	'Tanzania', 'USA', 'Rep of the Congo', 'Iran']

	response=get_response(index_url)
	if not response:
		return
	data=response.content
	data_file=write_data(to_file, data)

	df = read_pdf(data_file, pages=pages)
	columns_to_find=['country', 'score']
	columns_to_save=[]
	ending_to_save=[]
	for to_find in columns_to_find:
		for column in df.columns:
			if to_find in column.lower():
				columns_to_save.append(column)
				lower=column.lower()
				ending_to_save.append(lower[lower.find(to_find)+len(to_find):])
	ending_to_save=set(ending_to_save)

	binding=[('COUNTRY{}'.format(ending), 'SCORE{}'.format(ending)) for ending in ending_to_save]
	#binding=[('country', 'score'), ('country.1', 'score.1'), ('country.2', 'score.2')]
	gpi_dictionary={}
	frames=[]
	for country,value in binding:
		sub_df=df[[country, value]]
		sub_df.rename(index=str, columns={country: "country", value: "score"}, inplace=True)
		frames.append(sub_df)
	df = pd.concat(frames)
	df.dropna(subset=['score'], inplace=True)
	df = df[df.country != 'COUNTRY']
	df=df.set_index('country')
	gpi_map=df.T.to_dict('list')
	for country, info in gpi_map.items():
		info.insert(0, year)
	gpi_dictionary=rename_country_to_iso_name(countries_to_rename, iso_names, gpi_map)	
	return gpi_dictionary
Пример #10
0
def insert_one(bot, update, args):
    global data
    logging.info('insert')
    if len(args) < 2 or len(args) > 2:
        display_help(bot, update)
        return
    oem = args[0]
    tid = args[1]
    obj = crawler.getThreadObj(cookies, tid)
    if oem not in data['data']:
        bot.send_message(
                chat_id=update.message.chat_id,
                text="Incorrect OEM name."
        )
        return
    index = -1
    if tid < data['data'][oem][0]['tid']:
        index = 0
        data['data'][oem].insert(index, obj)
    elif tid > data['data'][oem][-1]['tid']:
        index = len(data['data'][oem])
        data['data'][oem].append(obj)
    else:
        index = 1
        while True:
            if index == len(data['data'][oem]):
                break
            if tid > data['data'][oem][index - 1]['tid'] and tid < data['data'][oem][index]['tid']:
                data['data'][oem].insert(index, obj)
                break
            index = index + 1
    util.write_data(data)
    bot.send_message(
            chat_id=update.message.chat_id,
            text="Added to local data."
    )
from nltk.corpus import wordnet as wn
from util import remove_punctuation, remove_stopwords, load_data, write_data
import unicodedata

def synset_word(word):
	synsets = wn.synsets(word)
	return set().union(*[s.lemma_names() for s in synsets])

def synset_review(review):
	review = unicodedata.normalize('NFKD', review).encode('ascii','ignore')
	review = remove_stopwords(remove_punctuation(review.lower()))
	words = review.split()
	return ' '.join([' '.join(synset_word(word)) for word in words])


if __name__ == '__main__':
	input_files = ['%s_electronics.dat'%source for source in ['amazon', 'ebay', 'twitter']]
	for input_filename in input_files:
		dataset = load_data(input_filename)
		for d in dataset:
			d['wordnet_expansion'] = synset_review(d['review'])
		output_filename = input_filename.split('.')[0] + '_expanded' + '.dat'
		write_data(dataset, output_filename)
Пример #12
0
 def __save_protocol(self, protocol_data: str):
     log_name = "Robot-%s" % self.__uid
     log_path = "%s%s.log" % (self.__protocol_path, log_name)
     util.write_data(write_path=log_path,
                     data=protocol_data + "\n\n",
                     mode="a+")
Пример #13
0
def main(argv):

    # Test we have the correct number of arguments
    if len(argv) < 2:
        sys.stderr.write("Usage: cfd.py <scalefactor> <iterations> \n")
        sys.exit(1)

    # Get the system parameters from the arguments
    scalefactor = int(argv[0])
    niter = int(argv[1])

    sys.stdout.write("\n2D CFD Simulation\n")
    sys.stdout.write("=================\n")
    sys.stdout.write("Scale factor = {0}\n".format(scalefactor))
    sys.stdout.write("Iterations   = {0}\n".format(niter))

    # Time the initialisation
    tstart = time.time()

    # Set the minimum size parameters
    mbase = 32
    nbase = 32
    bbase = 10
    hbase = 15
    wbase = 5

    # Set the dimensions of the array
    m = mbase * scalefactor
    n = nbase * scalefactor

    # Set the parameters for boundary conditions
    b = bbase * scalefactor
    h = hbase * scalefactor
    w = wbase * scalefactor

    # Define the psi array of dimension [m+2][n+2] and set it to zero
    psi = [[0 for col in range(n + 2)] for row in range(m + 2)]

    # Set the boundary conditions on bottom edge
    for i in range(b + 1, b + w):
        psi[i][0] = float(i - b)
    for i in range(b + w, m + 1):
        psi[i][0] = float(w)

    # Set the boundary conditions on right edge
    for j in range(1, h + 1):
        psi[m + 1][j] = float(w)
    for j in range(h + 1, h + w):
        psi[m + 1][j] = float(w - j + h)

    # Write the simulation details
    tend = time.time()
    sys.stdout.write("\nInitialisation took {0:.5f}s\n".format(tend - tstart))
    sys.stdout.write("\nGrid size = {0} x {1}\n".format(m, n))

    # Call the Jacobi iterative loop (and calculate timings)
    sys.stdout.write("\nStarting main Jacobi loop...\n")
    tstart = time.time()
    jacobi(niter, psi)
    tend = time.time()
    sys.stdout.write("\n...finished\n")
    sys.stdout.write("\nCalculation took {0:.5f}s\n\n".format(tend - tstart))

    # Write the output files
    util.write_data(m, n, scalefactor, psi, "velocity.dat", "colourmap.dat")

    # Finish nicely
    sys.exit(0)
Пример #14
0
from matplotlib import pyplot as plt

data, means, stds = util.load_augment_data(
    util.loadf(
        '../mp3/Kimiko Ishizaka - J.S. Bach- -Open- Goldberg Variations, BWV 988 (Piano) - 01 Aria.mp3'
    ), 1024)

vae = VAE(z_dim=256, net_size=2 * 256, chunk_samples=1024)
dirname = 'save-vae'
ckpt = tf.train.get_checkpoint_state(dirname)
vae.load_model(dirname)

x = np.zeros((2000, 1024))
vz = np.random.randn(1, 20)
z = np.random.randn(1, 20)
z, zs = vae.encode(data[500:501, :1024])
zh = []
sh = []
for n in range(2000):
    z += 0.03 * (-0.5 * z + 3 * np.random.randn(*z.shape))
    zh.append(np.sqrt(np.sum(z**2)))
    mu, s = vae.generate(z)
    sh.append(np.sqrt(np.exp(s)))
    x[n, :] = (
        mu + 1.00 * np.sqrt(np.exp(s)) * np.random.randn(*mu.shape)).squeeze()

out = np.zeros((2 * 1024, 2000))
out[:1024, :] = (x * stds + means).T
out[1024:, :] = 1 - 2 * np.random.randint(2, size=(1024, 2000))
sample_trace = util.write_data(np.minimum(out.T, 1.1), fname="out-vae.wav")
Пример #15
0
import util

from modelVAE import VAE

from matplotlib import pyplot as plt

data , means, stds = util.load_augment_data(util.loadf('../mp3/Kimiko Ishizaka - J.S. Bach- -Open- Goldberg Variations, BWV 988 (Piano) - 01 Aria.mp3'),1024)

vae = VAE(z_dim=256,net_size=2*256,chunk_samples=1024)
dirname = 'save-vae'
ckpt = tf.train.get_checkpoint_state(dirname)
vae.load_model(dirname)

x = np.zeros((2000,1024))
vz = np.random.randn(1,20)
z = np.random.randn(1,20)
z,zs = vae.encode(data[500:501,:1024])
zh = []
sh = []
for n in range(2000):
    z += 0.03*(-0.5*z + 3*np.random.randn(*z.shape))
    zh.append(np.sqrt(np.sum(z**2)))
    mu,s = vae.generate(z)
    sh.append(np.sqrt(np.exp(s)))
    x[n,:] = (mu+1.00*np.sqrt(np.exp(s))*np.random.randn(*mu.shape)).squeeze()

out = np.zeros((2*1024,2000))
out[:1024,:] = (x*stds+means).T
out[1024:,:] = 1-2*np.random.randint(2,size=(1024,2000))
sample_trace = util.write_data(np.minimum(out.T,1.1), fname = "out-vae.wav")
from nltk.corpus import wordnet as wn
from util import remove_punctuation, remove_stopwords, load_data, write_data
import unicodedata


def synset_word(word):
    synsets = wn.synsets(word)
    return set().union(*[s.lemma_names() for s in synsets])


def synset_review(review):
    review = unicodedata.normalize('NFKD', review).encode('ascii', 'ignore')
    review = remove_stopwords(remove_punctuation(review.lower()))
    words = review.split()
    return ' '.join([' '.join(synset_word(word)) for word in words])


if __name__ == '__main__':
    input_files = [
        '%s_electronics.dat' % source
        for source in ['amazon', 'ebay', 'twitter']
    ]
    for input_filename in input_files:
        dataset = load_data(input_filename)
        for d in dataset:
            d['wordnet_expansion'] = synset_review(d['review'])
        output_filename = input_filename.split('.')[0] + '_expanded' + '.dat'
        write_data(dataset, output_filename)
Пример #17
0
from model import Model
import numpy as np

with open(os.path.join('save', 'config.pkl')) as f:
    saved_args = cPickle.load(f)

data, means, stds = util.load_augment_data(
    util.loadf(
        '../mp3/Kimiko Ishizaka - J.S. Bach- -Open- Goldberg Variations, BWV 988 (Piano) - 01 Aria.mp3'
    ), saved_args.chunk_samples)

model = Model(saved_args, True)
sess = tf.InteractiveSession()
saver = tf.train.Saver(tf.all_variables())

ckpt = tf.train.get_checkpoint_state('save')
print "loading model: ", ckpt.model_checkpoint_path

saver.restore(sess, ckpt.model_checkpoint_path)
n = np.random.randint(data.shape[0] - 100)
sample_data, mus, sigmas, pis = model.sample(sess,
                                             saved_args,
                                             start=data[n:n + 100, :])
sample_data[:, :saved_args.
            chunk_samples] = sample_data[:, :saved_args.
                                         chunk_samples] * stds + means
data[:, :saved_args.
     chunk_samples] = data[:, :saved_args.chunk_samples] * stds + means
sample_trace = util.write_data(np.minimum(sample_data, 1.1), fname="out.wav")
util.write_data(data[500:1700, :], fname="out_ref.wav")
Пример #18
0
def get_fsi_index_dictionary(year):

    iso_names = [
        'Bolivia, Plurinational State of',
        'Cabo Verde',
        'Congo, Democratic Republic of the',
        'Congo',
        'Côte d\'Ivoire',
        #'Czechia',
        'Guinea-Bissau',
        'Iran, Islamic Republic of',
        'Israel',
        'Kyrgyzstan',
        "Lao People's Democratic Republic",
        'Macedonia, the former Yugoslav Republic of',
        'Micronesia, Federated States of',
        'Moldova, Republic of',
        "Korea, Democratic People's Republic of",
        'Russian Federation',
        'Slovakia',
        'Korea, Republic of',
        #'Eswatini',
        'Syrian Arab Republic',
        'Tanzania, United Republic of',
        'United Kingdom of Great Britain and Northern Ireland',
        'United States of America',
        'Venezuela, Bolivarian Republic of',
        'Viet Nam'
    ]
    #add check if year is less than 2019 add to iso 'Czechia' and to rename list 'Czech Republic'
    #'Czechia'<-'Czech Republic'
    #'Eswatini' < - 'Swaziland'
    countries_to_rename = [
        'Bolivia',
        'Cape Verde',
        'Congo Democratic Republic',
        'Congo Republic',
        "Cote d'Ivoire",
        #'Czech Republic'
        'Guinea Bissau',
        'Iran',
        'Israel and West Bank',
        'Kyrgyz Republic',
        'Laos',
        'Macedonia',
        'Micronesia',
        'Moldova',
        'North Korea',
        'Russia',
        'Slovak Republic',
        'South Korea',
        #'Swaziland',
        'Syria',
        'Tanzania',
        'United Kingdom',
        'United States',
        'Venezuela',
        'Vietnam'
    ]

    response = get_response(last_available_index_url.format(year, year),
                            ignore_404=True)
    if response.status_code == 404:
        response = get_response(previous_index_url.format(year, year))
    if not response:
        return None
    fsi_data = response.content
    data_file = write_data(to_file.format(year), fsi_data)

    df = pd.read_excel(data_file)
    fsi_dictionary = dict()
    for index, row in df.iterrows():
        fsi_dictionary[row.Country] = [row.Year.year, round(row.Total, 2)]

    fsi_dictionary = rename_country_to_iso_name(countries_to_rename, iso_names,
                                                fsi_dictionary)
    return fsi_dictionary