def name(): """The detected processor name. Example: >>> name() 'Intel Core(TM) i7-7700 CPU' Returns: String: Processor name. """ cpu_name = None try: if os.path.exists("/proc/cpuinfo"): cpu_name = grep.file("/proc/cpuinfo", r"^model name\s*:\s*") logging.debug("/proc/cpuinfo model name: %s", str(cpu_name)) if cpu_name: cpu_name = " ".join(cpu_name[0].strip().split()[3:]) return cpu_name if not shutil.which("lscpu"): return None lscpu_output = execute.output("lscpu") cpu_name = grep.text(lscpu_output, "Model name:") logging.debug("lscpu model name: %s", str(cpu_name)) if not cpu_name: return None cpu_name = " ".join(cpu_name[0].strip().split()[2:]) if cpu_name: return cpu_name cpu_name = grep.text(lscpu_output, "CPU:") cpu_name = " ".join(cpu_name[0].strip().split()[1:]) return cpu_name except IOError as err: logging.error(err) except ValueError as err: logging.error(err) except TypeError as err: logging.error(err)
def __image_built(self, name, env=None): """Check if the named image is built. Notes: * Requires Docker daemon (`dockerd`) to be running. Args: name (str): The name of the image. env (dict): The shell environment exports. Returns: Boolean: True if image found otherwise False. """ if env is None: env = os.environ.copy() env["PATH"] = self.docker_dir + ":" + env["PATH"] logging.debug("Checking if Docker image is built.") image_output = execute.output("docker images", working_dir=self.docker_dir, environment=env) found_images = grep.text(image_output, name) if found_images: return True return False
def flags(): """The detected `-march=|-mcpu=` and `-mtune=` flags for GCC. Example: >>> flags() '-march=native -march=native' Returns: A string of march and mtune. march (str): The complete flag for `-march=` or `-mcpu=`. mtune (str): The complete flag for `-mtune=`. """ try: march_mcpu = "march" cpu_name = processor.name().lower() if "power" in cpu_name or "ppc" in cpu_name: march_mcpu = "mcpu" march_output = execute.output( "gcc -{}=native -Q --help=target".format(march_mcpu)) march_flag = grep.text(march_output, "-{}=".format(march_mcpu)) march_flag = march_flag[0].rstrip().split()[1].strip() if "native" in march_flag or not march_flag: march_flag = "native" mtune_output = execute.output( "gcc -{}={} -mtune=native -Q --help=target".format( march_mcpu, march_flag)) mtune_flag = grep.text(mtune_output, "-mtune=") mtune_flag = mtune_flag[0].rstrip().split()[1].strip() if "native" in mtune_flag or not mtune_flag: mtune_flag = "native" march = "-{}={}".format(march_mcpu, march_flag) mtune = "-mtune=" + mtune_flag return march + " " + mtune except IOError as err: logging.error(err) except ValueError as err: logging.error(err) except TypeError as err: logging.error(err)
def frequency(): """The detected memory frequency. Example: >>> frequency() 2666 Returns: Integer: The memory frequency in MHz. """ dimm_freq = None freq = None try: if shutil.which("dmidecode"): output = None dmidecode_output = execute.output("dmidecode -t memory") outputs = grep.text(dmidecode_output, r"^\s*Speed:") for dimm in outputs: if "Unknown" in dimm: continue output = dimm break if output: dimm_freq = output.strip().split()[1] if not dimm_freq and shutil.which("lshw"): lshw_output = execute.output("lshw -short -C memory") dimms = grep.text(lshw_output, "DIMM") if dimms: dimm_freq = dimms[0].strip().split()[6] if "." in dimm_freq: freq = int(float(dimm_freq)) elif dimm_freq and dimm_freq.isdigit(): freq = int(dimm_freq) return freq except IOError as err: logging.error(err) except ValueError as err: logging.error(err) except TypeError as err: logging.error(err)
def frequency(): """The detected processor frequency. Example: >>> frequency() 3200 Returns: Integer: The processor frequency in MHz. """ try: mhz_freq = None freq = None if shutil.which("dmidecode"): dmidecode_output = execute.output("dmidecode -t processor") dmidecode_output = grep.text(dmidecode_output, "Max Speed") if dmidecode_output: mhz_freq = dmidecode_output[0].strip().split()[2] elif shutil.which("lscpu"): lscpu_output = execute.output("lscpu") lscpu_output = grep.text(lscpu_output, "CPU max MHz:") if lscpu_output: mhz_freq = lscpu_output[0].strip().split()[3] elif os.path.isfile("/proc/cpuinfo"): cpuinfo_output = grep.file("/proc/cpuinfo", "cpu MHz") if cpuinfo_output: mhz_freq = cpuinfo_output[0].strip().split()[3] if "." in mhz_freq: freq = int(float(mhz_freq)) elif mhz_freq and mhz_freq.isdigit(): freq = int(mhz_freq) return freq except IOError as err: logging.error(err) except ValueError as err: logging.error(err) except TypeError as err: logging.error(err)
def architecture(): """The system architecture. Example: >>> architecture() (64, 'x86_64') Returns: A tuple of (bits, type). bits (int): The architecture bit version (32 or 64). type (str): The machine type. """ try: first_pattern = r"architecture:\s*" second_pattern = r"/x86_" third_pattern = r"i[3-6]86" machine_type = os.uname().machine # pylint: disable=E1101 if shutil.which("lscpu"): lscpu_output = execute.output("lscpu") arch = grep.text(lscpu_output, "Architecture:") arch = re.sub(first_pattern, "", arch[0]) arch = re.sub(second_pattern, "", arch) arch = re.sub(third_pattern, "32", arch) if not arch: arch = machine_type arch = re.sub(second_pattern, "", arch) arch = re.sub(third_pattern, "32", arch) arch = arch.lower() if "arm" in arch: arm_ver = re.sub("armv", "", arch) if int(arm_ver) >= 8: arch = 64 else: arch = 32 if "64" in arch: arch = 64 return int(arch), machine_type except IOError as err: logging.error(err) except ValueError as err: logging.error(err) except TypeError as err: logging.error(err)
def run(self): """Run MLC three times. Returns: If success: A dict containing (unit, run1, run2, run3, average, median). unit (str): Latency units. run1 (list): Latency for each NUMA node of the first run. run2 (list): Latency for each NUMA node of the second run. run3 (list): Latency for each NUMA node of the third run. average (list): Average for each NUMA node of run1, run2, and run3. median (list): Median for each NUMA node of run1, run2, and run3. If error: A dict containing (error). error (str): Error message. """ bin_loc = self.mlc_dir + "/Linux/mlc_avx512" cmd = "modprobe msr; {} --latency_matrix".format(bin_loc) results = {"unit": "ns"} if not os.path.isfile(bin_loc): text = 'Cannot run MLC because "{}" could not be found.'.format( bin_loc) prettify.error_message(text) return {"error": text} os.makedirs(self.results_dir, exist_ok=True) self.commands.append("Run: " + cmd) output = execute.output(cmd, self.mlc_dir) file.write(self.results_dir + "/mlc_output.txt", output) found_lines = grep.text(output, r"^\s*0") if found_lines: node_latencies = found_lines[0].strip().split() node_latencies.pop(0) # Remove leading '0' for first node for index, latency in enumerate(node_latencies): node_latencies[index] = float(latency) results["latencies"] = node_latencies logging.info("MLC results: %s", str(results)) return results
def topology(): """The processor topology. Examples: >>> topology() (4, 8, 1) Returns: A tuple of (cores, threads, sockets). cores (int): The number of physical processors. threads (int): The number of logical processors. sockets (int): The number of processor sockets. """ cores = None sockets = None threads_per_core = None threads = None cores_per_processor = None try: if shutil.which("lscpu"): lscpu_output = execute.output("lscpu") sockets = grep.text(lscpu_output, "Socket") sockets = re.sub(r"Socket\(s\):\s*", "", sockets[0]) sockets = int(sockets.strip()) threads_per_core = grep.text(lscpu_output, r"Thread\(s\) per core:") threads_per_core = re.sub(r"Thread\(s\) per core:\s*", "", threads_per_core[0]) threads_per_core = int(threads_per_core.strip()) cores_per_processor = grep.text(lscpu_output, r"Core\(s\) per socket:") cores_per_processor = re.sub(r"Core\(s\) per socket:\s*", "", cores_per_processor[0]) cores_per_processor = int(cores_per_processor.strip()) if not sockets and shutil.which("dmidecode"): dmidecode_output = execute.output("dmidecode -t 4") sockets = len(grep.text(dmidecode_output, "Socket Designation")) total_threads = grep.text(dmidecode_output, r"Thread Count\:") total_threads = re.sub(r"Thread Count:", "", total_threads[0]) total_threads = total_threads.strip().split()[0] total_threads = int(total_threads) cores_per_processor = grep.text(dmidecode_output, r"Core Count\:") cores_per_processor = re.sub(r"Core Count:\s*", "", cores_per_processor[0]) cores_per_processor = cores_per_processor.strip().split()[0] cores_per_processor = int(cores_per_processor) threads_per_core = total_threads / cores_per_processor if not sockets: thread_siblings = (file.read( "/sys/devices/system/cpu/cpu1/topology/thread_siblings_list"). strip().split(",")) threads_per_core = 1 if shutil.which("nproc"): total_threads = execute.output("nproc --all") elif os.path.isfile("/proc/cpuinfo"): total_threads = len(grep.file("/proc/cpuinfo", r"^processor")) else: total_threads = execute.output("getconf _NPROCESSORS_ONLN") total_threads = int(total_threads) if len(thread_siblings) > 1: threads_per_core = 2 if total_threads: cores_per_processor = total_threads / threads_per_core if not sockets: raise Exception("The number of sockets was not found.") if not cores_per_processor: raise Exception("The number of cores per processor was not found.") if not threads_per_core: raise Exception("The number of threads per core was not found.") cores = sockets * cores_per_processor threads = threads_per_core * cores return cores, threads, sockets except IOError as err: logging.error(err) except ValueError as err: logging.error(err) except TypeError as err: logging.error(err)
def cache(): """Processor cache information. Example: >>> cache() (32768, 65536, 524288, 8388608) >>> # With no L3 cache >>> cache() (32768, 32768, 524288, None) Returns: A tuple of (level_one_instruction, level_one_data, level_two, level_three, cache_sum). level_one_instruction (int): L1 instruction cache in B. level_one_data (int): L1 data cache in B. level_two (int): L2 cache in B. level_three (int): L3 cache in B. """ try: cache_loc = "/sys/devices/system/cpu/cpu0/cache" level_one_data_file = cache_loc + "/index0/size" level_one_instruction_file = cache_loc + "/index1/size" level_two_file = cache_loc + "/index2/size" level_three_file = cache_loc + "/index3/size" level_one_data = None level_one_instruction = None level_two = None level_three = None if shutil.which("lscpu"): lscpu_output = execute.output("lscpu") level_one_data_line = grep.text(lscpu_output, "L1d cache") if level_one_data_line: level_one_data = level_one_data_line[0].rstrip().split()[2] level_one_ins_line = grep.text(lscpu_output, "L1i cache") if level_one_ins_line: level_one_instruction = level_one_ins_line[0].rstrip() level_one_instruction = level_one_instruction.split()[2] level_two_line = grep.text(lscpu_output, "L2 cache") if level_two_line: level_two = level_two_line[0].rstrip().split()[2] level_three_line = grep.text(lscpu_output, "L3 cache") if level_three_line: level_three = level_three_line[0].rstrip().split()[2] if not level_one_data and os.path.isfile(level_one_data_file): if os.path.isfile(level_one_data_file): level_one_data = file.read(level_one_data_file) if os.path.isfile(level_one_instruction_file): level_one_instruction = file.read(level_one_instruction_file) if os.path.isfile(level_two_file): level_two = file.read(level_two_file) if os.path.isfile(level_three_file): level_three = file.read(level_three_file) level_one_data = __cache_size_convert(level_one_data) level_one_instruction = __cache_size_convert(level_one_instruction) level_two = __cache_size_convert(level_two) level_three = __cache_size_convert(level_three) return (level_one_instruction, level_one_data, level_two, level_three) except IOError as err: logging.error(err) except ValueError as err: logging.error(err) except TypeError as err: logging.error(err)
def run(self, threads): """Run YCSB with MySQL three times. Args: threads (int): The number of threads on the system. """ shell_env = os.environ.copy() maven_dir = self.src_dir + "/maven" error = False results = {"unit": {"throughput": "ops/sec", "latency": "us"}} if os.path.isdir(maven_dir): shell_env["M2_HOME"] = maven_dir shell_env["M2"] = maven_dir + "/bin" else: return {"error": "Maven not found."} mysql_dir = self.src_dir + "/mysql" mysql_data = mysql_dir + "/mysql-files" if not os.path.exists(mysql_data + "/ycsb"): text = 'Unable to find "ycsb" table in MySQL.' prettify.error_message(text) return {"error": text} os.makedirs(self.results_dir, exist_ok=True) # Start MySQL service subprocess.Popen( "{0}/bin/mysqld_safe --user=root --basedir={0} --datadir={1} " "--plugin-dir={0}/lib/plugin --pid-file=/tmp/mysql.pid " "--log-error=ycsb.err &".format(mysql_dir, mysql_data), cwd=mysql_dir, shell=True, env=shell_env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) time.sleep(20) read_latency_results = [] update_latency_results = [] throughput_results = [] run_cmd = ("./bin/ycsb run jdbc -s -P workloads/workloada -p " "db.driver=com.mysql.jdbc.Driver -p " "db.url=jdbc:mysql://localhost:3306/ycsb?useSSL=false -p " 'db.user=root -p db.passwd="" -threads {} -p ' "operationcount=1000000".format(threads)) self.commands.append("Run: " + run_cmd) for count in range(1, 4): run_num = "run" + str(count) result_file = "{}/ycsb-sql_{}.txt".format(self.results_dir, run_num) optimize.prerun() time.sleep(10) # Run YCSB output = execute.output(run_cmd, working_dir=self.ycsb_dir, environment=shell_env) file.write(result_file, output) if "UPDATE-FAILED" in output or "READ-FAILED" in output: error = True break throughput_line = grep.text(output, r"\[OVERALL\], Throughput\(ops/sec\),") if throughput_line: throughput = float(throughput_line[-1].split(",")[2].strip()) throughput_results.append(throughput) readlat_line = grep.text(output, r"\[READ\], 95thPercentileLatency\(us\),") if readlat_line: readlat = float(readlat_line[-1].split(",")[2].strip()) read_latency_results.append(readlat) updatelat_line = grep.text( output, r"\[UPDATE\], 95thPercentileLatency\(us\),") if updatelat_line: updatelat = float(updatelat_line[-1].split(",")[2].strip()) update_latency_results.append(updatelat) if throughput_line and readlat_line and updatelat_line: results[run_num] = { "throughput": throughput, "read_latency": readlat, "update_latency": updatelat, } # Stop MySQL service if os.path.exists("/tmp/mysql.pid"): pid = file.read("/tmp/mysql.pid").strip() execute.kill(pid) execute.kill(pid) execute.kill(pid) if error: return {"error": "YCSB failed to update and/or read database."} if "run1" in results: results["average"] = {} results["median"] = {} results["variance"] = {} results["range"] = {} results["average"]["throughput"] = statistics.mean( throughput_results) results["median"]["throughput"] = statistics.median( throughput_results) results["variance"]["throughput"] = statistics.variance( throughput_results) sorted_throughput = sorted(throughput_results) results["range"]["throughput"] = (sorted_throughput[-1] - sorted_throughput[0]) results["average"]["read_latency"] = statistics.mean( read_latency_results) results["median"]["read_latency"] = statistics.median( read_latency_results) results["variance"]["read_latency"] = statistics.variance( read_latency_results) sorted_read_latency = sorted(read_latency_results) results["range"]["read_latency"] = (sorted_read_latency[-1] - sorted_read_latency[0]) results["average"]["update_latency"] = statistics.mean( update_latency_results) results["median"]["update_latency"] = statistics.median( update_latency_results) results["variance"]["update_latency"] = statistics.variance( update_latency_results) sorted_update_latency = sorted(update_latency_results) results["range"]["update_latency"] = (sorted_update_latency[-1] - sorted_update_latency[0]) logging.info("YCSB MySQL results: %s", str(results)) return results
def run(self, threads): """Run YCSB with Cassandra three times. Args: threads (int): The number of threads on the system. """ pid = None shell_env = os.environ.copy() maven_dir = self.src_dir + "/maven" error = False results = {"unit": {"throughput": "ops/sec", "latency": "us"}} if os.path.isdir(maven_dir): shell_env["M2_HOME"] = maven_dir shell_env["M2"] = maven_dir + "/bin" else: prettify.error_message("Maven could not be found.") return False cassandra_dir = self.src_dir + "/cassandra" if not os.path.exists(cassandra_dir + "/data/data/ycsb"): text = 'Unable to find "ycsb" table in Cassandra.' prettify.error_message(text) return {"error": text} read_latency_results = [] update_latency_results = [] throughput_results = [] os.makedirs(self.results_dir, exist_ok=True) # Start Cassandra service subprocess.Popen( "./bin/cassandra -R -p /tmp/cassandra.pid &", shell=True, cwd=cassandra_dir, env=shell_env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, ) time.sleep(20) if os.path.isfile("/tmp/cassandra.pid"): pid = file.read("/tmp/cassandra.pid").strip() if not pid or not os.path.dirname("/proc/" + pid): text = "Cassandra failed to start." prettify.error_message(text) return {"error": text} run_cmd = ("./bin/ycsb run cassandra-cql -s -P workloads/workloada " '-p hosts="localhost" -threads {} ' "-p operationcount=10000000".format(threads)) self.commands.append("Run: " + run_cmd) for count in range(1, 4): run_num = "run" + str(count) result_file = "{}/ycsb-nosql_{}.txt".format(self.results_dir, run_num) optimize.prerun() time.sleep(10) output = execute.output(run_cmd, working_dir=self.ycsb_dir, environment=shell_env) file.write(result_file, output) if "UPDATE-FAILED" in output or "READ-FAILED" in output: error = True break throughput_line = grep.text(output, r"\[OVERALL\], Throughput\(ops/sec\),") if throughput_line: throughput = float(throughput_line[-1].split(",")[2].strip()) throughput_results.append(throughput) readlat_line = grep.text(output, r"\[READ\], 95thPercentileLatency\(us\),") if readlat_line: readlat = float(readlat_line[-1].split(",")[2].strip()) read_latency_results.append(readlat) updatelat_line = grep.text( output, r"\[UPDATE\], 95thPercentileLatency\(us\),") if updatelat_line: updatelat = float(updatelat_line[-1].split(",")[2].strip()) update_latency_results.append(updatelat) if throughput_line and readlat_line and updatelat_line: results[run_num] = { "throughput": throughput, "read_latency": readlat, "update_latency": updatelat, } # Stop Cassandra service if pid: execute.kill(pid) execute.kill(pid) execute.kill(pid) if error: return {"error": "YCSB failed to update and/or read database."} if "run1" in results: results["average"] = {} results["median"] = {} results["variance"] = {} results["range"] = {} results["average"]["throughput"] = statistics.mean( throughput_results) results["median"]["throughput"] = statistics.median( throughput_results) results["variance"]["throughput"] = statistics.variance( throughput_results) sorted_throughput = sorted(throughput_results) results["range"]["throughput"] = (sorted_throughput[-1] - sorted_throughput[0]) results["average"]["read_latency"] = statistics.mean( read_latency_results) results["median"]["read_latency"] = statistics.median( read_latency_results) results["variance"]["read_latency"] = statistics.variance( read_latency_results) sorted_read_latency = sorted(read_latency_results) results["range"]["read_latency"] = (sorted_read_latency[-1] - sorted_read_latency[0]) results["average"]["update_latency"] = statistics.mean( update_latency_results) results["median"]["update_latency"] = statistics.median( update_latency_results) results["variance"]["update_latency"] = statistics.variance( update_latency_results) sorted_update_latency = sorted(update_latency_results) results["range"]["update_latency"] = (sorted_update_latency[-1] - sorted_update_latency[0]) logging.info("YCSB Cassandra results: %s", str(results)) return results
def run(self, threads): """Run GCC compiled STREAM three times. Args: threads (int): The total number of threads on the system. Returns: If success, a dict containing (unit, run1, run2, run3, average, median). unit (str): Score units. run1 (float): Score for the first run. run2 (float): Score for the second run. run3 (float): Score for the third run. average (float): Average of run1, run2, and run3. median (float): Median of run1, run2, and run3. Else, a dict containing (error). error (str): Error message. """ stream_bin = self.stream_dir + "/stream" shell_env = os.environ.copy() shell_env["OMP_NUM_THREADS"] = str(threads) mpi_root = self.src_dir + "/openmpi/build" mpi_path = mpi_root + "/bin" mpi_lib = mpi_root + "/lib" shell_env["PATH"] += ":" + mpi_path if "LD_LIBRARY_PATH" in shell_env: shell_env["LD_LIBRARY_PATH"] += mpi_lib else: shell_env["LD_LIBRARY_PATH"] = mpi_lib results = {"unit": "MB/s"} shell_env["OMP_PROC_BIND"] = "true" if not os.path.isfile(stream_bin): text = 'Cannot run STREAM because "{}" could not be found.'.format( stream_bin) prettify.error_message(text) return {"error": text} logging.info("Running STREAM with %d OMP threads.", threads) os.makedirs(self.results_dir, exist_ok=True) tmp_results = [] cmd = "./stream" self.commands.append("Run: OMP_NUM_THREADS = " + str(threads)) self.commands.append("Run: OMP_PROC_BIND = true") self.commands.append("Run: " + cmd) for count in range(1, 4): run_num = "run" + str(count) result_file = "{}/stream_{}.txt".format(self.results_dir, run_num) optimize.prerun() time.sleep(10) output = execute.output(cmd, working_dir=self.stream_dir, environment=shell_env) file.write(result_file, output) result = grep.text(output, "Triad") result = result[0].split()[1] # 2nd word result = float(result) results[run_num] = result tmp_results.append(result) results["average"] = statistics.mean(tmp_results) results["median"] = statistics.median(tmp_results) results["variance"] = statistics.variance(tmp_results) sorted_results = sorted(tmp_results) results["range"] = sorted_results[-1] - sorted_results[0] logging.info("STREAM results: %s", str(results)) return results
def run(self, mpi_threads, threads, arch=None): """Run High-Performance Linpack three times. Args: mpi_threads (int): The number of MPI threads used by LINPACK. This number is usually the number of physical cores on the system. threads (int): The total number of logical threads on the system. arch (str, optional): The architecture type of the system. Returns: If success, a dict containing (unit, run1, run2, run3, average, median). unit (str): Score units. run1 (float): Score for the first run. run2 (float): Score for the second run. run3 (float): Score for the third run. average (float): Average of run1, run2, and run3. median (float): Median of run1, run2, and run3. Else, a dict containing (error). error (str): Error message. """ if arch is None: arch = "x86_64" shell_env = os.environ.copy() openmpi_dir = "{}/openmpi/build/bin".format(self.src_dir) bin_dir = "{}/bin/{}".format(self.hpl_dir, arch) bin_loc = bin_dir + "/xhpl" results = {"unit": "GFLOPS", "mathlib": self.mathlib} tmp_results = [] if not os.path.isfile(bin_loc): text = 'Could not find HPL binaries at "{}".'.format(bin_loc) prettify.error_message(text) logging.error(text) return {"error": text} if not os.path.isdir(openmpi_dir): text = 'Could not find OpenMPI directory at "{}".'.format( openmpi_dir) prettify.error_message(text) logging.error(text) return {"error": text} grid = self.__grid(mpi_threads) nb_size = self.__nb_size(threads) mpi_cmd = "{}/mpirun -n {} --allow-run-as-root --mca mpi_paffinity_alone 1".format( openmpi_dir, mpi_threads) if threads == mpi_threads: mpi_cmd = "{}/mpirun -n {} --allow-run-as-root".format( openmpi_dir, mpi_threads) logging.info('Running LINPACK using "%s" arch.', arch) os.makedirs(self.results_dir, exist_ok=True) shutil.copyfile(self.hpl_dir + "/Make." + arch, self.results_dir) shutil.copyfile(self.hpl_dir + "/bin/{}/HPL.dat".format(arch), self.results_dir) cmd = mpi_cmd + " ./xhpl" self.commands.append("Run: " + cmd) optimize.prerun() time.sleep(10) output = execute.output(cmd, working_dir=bin_dir, environment=shell_env) file.write(self.results_dir + "/linpack_output.txt", output) result = grep.text( output, r"\s+{}\s+{}\s+{}\s+".format(nb_size, grid.P, grid.Q)) for line in result: # 7th word tmp = float(line.split()[6]) tmp_results.append(tmp) if tmp_results: sorted_results = sorted(tmp_results) results["score"] = sorted_results[-1] logging.info("LINPACK results: %s", str(results)) return results