Esempio n. 1
0
# This is a script that you should run on Discovery,
# as part of a Slurm Array job, with 100 jobs.
import numpy as np
import os
from model import max_vehicle_power
big_parameter_list = np.loadtxt("parameter_values.txt")
header = open("parameter_values.txt").readline()
header = header.strip('#\n ') # remove the '#', space, and newline from the start and end
column_names = header.split()

job_number = int(os.getenv('SLURM_ARRAY_TASK_ID', default='0'))
assert 0<=job_number<100, "Job number should run from 0 to 99"
for i in range(80):
    parameter_number = (80 * job_number) + i
    parameters = big_parameter_list[parameter_number]
    arguments_dictionary = { key:value for key, value in zip(column_names, parameters)}
    result = max_vehicle_power(**arguments_dictionary)
    print('{} {}'.format(parameter_number, result))
Esempio n. 2
0
# This is a script that you should run on Discovery,
# as part of a Slurm Array job, with 100 jobs.
import numpy as np
import os
from model import max_vehicle_power
big_parameter_list = np.loadtxt("parameter_values.txt")
header = open("parameter_values.txt").readline()
header = header.strip(
    '#\n ')  # remove the '#', space, and newline from the start and end
column_names = header.split()

job_number = int(os.getenv('SLURM_ARRAY_TASK_ID', default='0'))
assert 0 <= job_number < 100, "Job number should run from 0 to 99"
for i in range(80):
    parameter_number = (80 * job_number) + i
    parameters = big_parameter_list[parameter_number]
    arguments_dictionary = {
        key: value
        for key, value in zip(column_names, parameters)
    }
    result = max_vehicle_power(**arguments_dictionary)
    print('{} {}'.format(parameter_number, result))
          'w') as result_file:  # 'w' is write mode, and will clear the file.
    result_file.write('')

# Now to run this on Discovery, make a script file that looks like the following cell.
# We don't know what order the jobs will complete in, so we record the job number in the output file as well as the result.

# In[8]:

import numpy as np
import os
from model import max_vehicle_power
big_parameter_list = np.loadtxt("parameter_values.txt")
job_number = int(os.getenv('SLURM_ARRAY_TASK_ID', default='0'))
parameters = big_parameter_list[job_number]
parameters
result = max_vehicle_power(*parameters)
with open("results.txt",
          'a') as result_file:  # 'a' is append mode, and will add to the file.
    result_file.write('{} {}\n'.format(job_number,
                                       result))  # the '\n' is a new line

# Run it as an Array job to fill the `results.txt` file with results.
# This is how many jobs you will need:

# In[9]:

len(sample)

# Then come back here to load the results and continue the sensitivy analysis.
# Because our results file may not be in order, but contains the job number at the start of each line, we need to do a little manipulation to get the `output` array as needed
def plot_power(connector, battery_size, distance_driven, range_buffer,
               dispatch_time):
    power = max_vehicle_power(connector, battery_size, distance_driven,
                              range_buffer, dispatch_time)
    return print("The maximum power is {} kW".format(round(power, 2)))
def monte_carlo_large(data):
    y = max_vehicle_power(data[0], data[1], data[2], data[3], data[6], data[4], data[5])
    return y