Beispiel #1
0
def build_slicc(host):
    log_message("Building slicc")
    # build slicc
    os.chdir("slicc")
    output = tools.run_command("make clean")
    output = tools.run_command("make -j 4", max_lines=50)
    os.chdir("..")
Beispiel #2
0
def build_opal_smt(smt_test, protocol):
    name = smt_test["name"]
    protocol_name = protocol["name"]
    log_message("Building SMT test: %s" % (name))
    # build ruby tester/module
    os.chdir("opal")

    # before making any modifications, backup all files that will be modified
    for (filename_exp, re_to_replace,
         replacement) in smt_test.get("modification", []):
        for filename in glob.glob(filename_exp):
            if os.access(filename, os.W_OK) != 1:
                output = tools.run_command("bk edit %s" % filename)
            input_file = open(filename, "r")
            file_contents = input_file.read()
            # backup the file
            backup_file = open(filename + ".bak", "w")
            backup_file.write(file_contents)
            backup_file.close()

    # perform any replacements on the code
    for (filename_exp, re_to_replace,
         replacement) in smt_test.get("modification", []):
        for filename in glob.glob(filename_exp):
            input_file = open(filename, "r")
            file_contents = input_file.read()
            # modify the file
            (new_file_contents,
             number_of_subs) = re.subn(re_to_replace, replacement,
                                       file_contents)
            if number_of_subs == 0:
                raise RegressionError(
                    "Modification specification error: no replacement performed for '%s' in '%s'"
                    % (re_to_replace, filename), "")
            output_file = open(filename, "w")
            output_file.write(new_file_contents)
            output_file.close()

    # build Opal with changed configfile
    command = "make -j 4 DESTINATION=%s" % protocol_name
    output = tools.run_command(command, max_lines=50)

    # restore any modification
    for (filename_exp, re_to_replace,
         replacement) in smt_test.get("modification", []):
        for filename in glob.glob(filename_exp):
            # read the backup file
            backup_file = open(filename + ".bak", "r")
            if not backup_file:
                raise RegressionError(
                    "Modification specification error: Where is the backup file for file %s?"
                    % filename)
            file_contents = backup_file.read()
            backup_file.close()
            # write the original file
            input_file = open(filename, "w")
            input_file.write(file_contents)
            input_file.close()

    os.chdir("..")
Beispiel #3
0
def opal_clean(protocol, remove_module=0):
    os.chdir("opal")
    tools.run_command("make clean")
    if remove_module:
        output = tools.run_command("make removemodule DESTINATION=%s" %
                                   protocol["name"])
    os.chdir("..")
Beispiel #4
0
def build_slicc(host):
    log_message("Building slicc")
    # build slicc
    os.chdir("slicc")
    output = tools.run_command("make clean")
    output = tools.run_command("make -j 4", max_lines=50)
    os.chdir("..")
Beispiel #5
0
def clone_multifacet():

    # get the mfacet_code repository
    if not os.path.exists("BitKeeper"):
        log_message("Performing BitKeeper Clone of mfacet_code")
        output = tools.run_command("bk clone /p/multifacet/projects/xact_memory/multifacet_release .")

    # Update the code base with the bk root directory
    if 0 and os.environ.has_key("CHECKOUT") and os.environ["CHECKOUT"] == "true":
        output = tools.run_command("bk pull")

    for dir in core_directories:
        # ensure all core directories are there
        if not os.path.exists(dir):
            raise RegressionError("Error find all core code directories", output)

    # get the mfacet_simics_sup repository and export simics_source repository
    if not os.path.exists("simics/BitKeeper"):
        log_message("Performing BitKeeper Clone of mfacet_simics and BitKeeper Export of simics_source")
        output = tools.run_command("bk clone /p/multifacet/bkroot/mfacet_simics_sup simics")
        output = tools.run_command("bk export -r+ -w /p/multifacet/bkroot/simics_source simics")
    
    # Change dirs to the root of the multifacet code tree
    os.chdir("simics")

    # Update the simics base with the bk root directory
    if 0 and os.environ.has_key("CHECKOUT") and os.environ["CHECKOUT"] == "true":
        output = tools.run_command("bk pull")

    # Move back up to the top level
    os.chdir("..")
Beispiel #6
0
def opal_clean(protocol, remove_module=0):
    os.chdir("opal")
    tools.run_command("make clean")
    if remove_module:
        output = tools.run_command("make removemodule DESTINATION=%s" %
                                   protocol["name"])
    os.chdir("..")
Beispiel #7
0
def embedding(original_graph,deleted_edges_file,rank = 30, is_dense_matrix = False,using_GPU = True, using_svd = False, strategy = "ln"):
	from hierarchical_matrix import  build_hierarchical_matrix
	matrix,ranking_difference_file = build_hierarchical_matrix(original_graph,deleted_edges_file,is_dense_matrix = is_dense_matrix, strategy = strategy)
	from tools import dir_tail_name
	dir_name,tail = dir_tail_name(original_graph)
	if not using_GPU:
		if is_dense_matrix:
			print("matrix shape: %s" % str(matrix.shape))
			if using_svd:
				W, s, V = np.linalg.svd(matrix, full_matrices=False)
				S = np.diag(s)
				H = np.dot(S,V)
				print("(SVD) W matrix shape: %s" % str(W.shape))
				print("(SVD) H matrix shape: %s" % str(H.shape))
				return W,H
			
			# from sklearn.decomposition import NMF
			# model = NMF(n_components= rank, init='random', random_state=0)
			# W = model.fit_transform(matrix)
			# H = model.components_
			
			from matrix_factorization import run_lsnmf,run_nmf
			W,H = run_nmf(matrix,rank = rank)
			print("(NMF) W matrix shape: %s" % str(W.shape))
			print("(NMF) H matrix shape: %s" % str(H.shape))
			# print np.matmul(W,H)
			return W,H
		else:
			if using_svd:
				import scipy
				W, s, V = scipy.sparse.linalg.svds(matrix,k = rank,)
				S = np.diag(s)
				H = np.dot(S,V)
				print("(SVDs) W matrix shape: %s" % str(W.shape))
				print("(SVDs) H matrix shape: %s" % str(H.shape))
				return W,H
			saved_matrix_file_name = os.path.join(dir_name,tail.split(".")[0]+"_HM.pkl")
			saved_WH_file_name = os.path.join(dir_name,tail.split(".")[0]+"_HM_WH.pkl")

			print("saved matrix file name: %s " % saved_matrix_file_name)
			with open(saved_matrix_file_name,"wb") as f:
				pickle.dump(matrix,f)
			command = "python libpmf-1.41/python/pmf_main.py --matrix " + saved_matrix_file_name + " --model " + saved_WH_file_name + " --rank " + str(rank)
			run_command(command)

			with open(saved_WH_file_name,"rb") as f:
				model = pickle.load(f)
				return model['W'],model['H'].T
	else:
		print("Preparing file for cumf_ccd...")
		from prepare_cumf_data import generate_cumf_input_files
		generate_cumf_input_files(ranking_difference_file)
		run_command("./cumf/cumf_ccd/ccdp_gpu -T 1 -t 100 -l 0.01 -k  " + str(rank) + " " + dir_name + " " + dir_name + "/test.ratings")
		from load_cumf_ccd_matrices import load_cumf_WH 
		W,H = load_cumf_WH(dir_name)
		return W,H
Beispiel #8
0
def timer_abort():
    global g_lockfile_path
    global g_timer
    log_error("Error: Watchdog timer expired");
    # unlock the directory
    if (g_locked == 1):
        tools.run_command("rm -f %s" % g_lockfile_path)
    send_message()
    import signal
    os.kill(os.getpid(), signal.SIGKILL)
Beispiel #9
0
def timer_abort():
    global g_lockfile_path
    global g_timer
    log_error("Error: Watchdog timer expired")
    # unlock the directory
    if (g_locked == 1):
        tools.run_command("rm -f %s" % g_lockfile_path)
    send_message()
    import signal
    os.kill(os.getpid(), signal.SIGKILL)
Beispiel #10
0
def build_ruby(protocol):
    name = protocol["name"]
    log_message("Building: %s" % (name))
    # build ruby tester/module
    os.chdir("ruby")
#    tools.run_command("make clean_slicc")

    # before making any modifications, backup all files that will be modified
    for (filename_exp, re_to_replace, replacement) in protocol.get("modification", []):
        for filename in glob.glob(filename_exp):
            if os.access(filename, os.W_OK) != 1:
                output = tools.run_command("bk edit %s"%filename)
            input_file = open(filename, "r")
            file_contents = input_file.read()
            # backup the file
            backup_file = open(filename+".bak", "w")
            backup_file.write(file_contents)
            backup_file.close()
    
    # perform any replacements on the code
    for (filename_exp, re_to_replace, replacement) in protocol.get("modification", []):
        for filename in glob.glob(filename_exp):
            input_file = open(filename, "r")
            file_contents = input_file.read()
            # modify the file
            (new_file_contents, number_of_subs) = re.subn(re_to_replace, replacement, file_contents)
            if number_of_subs == 0:
                raise RegressionError("Modification specification error: no replacement performed for '%s' in '%s'" % (re_to_replace, filename), "")
            output_file = open(filename, "w")
            output_file.write(new_file_contents)
            output_file.close()
        
    # build
    if protocol.has_key("no_html"):
        command = "make -j 4 PROTOCOL=%s DESTINATION=%s NO_HTML=yes" % (name, name)
    else:
        command = "make -j 4 PROTOCOL=%s DESTINATION=%s" % (name, name)
    output = tools.run_command(command, max_lines=50)
    
    # restore any modification
    for (filename_exp, re_to_replace, replacement) in protocol.get("modification", []):
        for filename in glob.glob(filename_exp):
            # read the backup file
            backup_file = open(filename+".bak", "r")
            if not backup_file:
                raise RegressionError("Modification specification error: Where is the backup file for file %s?"%filename)
            file_contents = backup_file.read()
            backup_file.close()
            # write the original file
            input_file = open(filename, "w")
            input_file.write(file_contents)
            input_file.close()
    
    os.chdir("..")
Beispiel #11
0
def build_opal_smt(smt_test, protocol):
    name = smt_test["name"]
    protocol_name = protocol["name"]
    log_message("Building SMT test: %s" % (name))
    # build ruby tester/module
    os.chdir("opal")

    # before making any modifications, backup all files that will be modified
    for (filename_exp, re_to_replace, replacement) in smt_test.get("modification", []):
        for filename in glob.glob(filename_exp):
            if os.access(filename, os.W_OK) != 1:
                output = tools.run_command("bk edit %s"%filename)
            input_file = open(filename, "r")
            file_contents = input_file.read()
            # backup the file
            backup_file = open(filename+".bak", "w")
            backup_file.write(file_contents)
            backup_file.close()
    
    # perform any replacements on the code
    for (filename_exp, re_to_replace, replacement) in smt_test.get("modification", []):
        for filename in glob.glob(filename_exp):
            input_file = open(filename, "r")
            file_contents = input_file.read()
            # modify the file
            (new_file_contents, number_of_subs) = re.subn(re_to_replace, replacement, file_contents)
            if number_of_subs == 0:
                raise RegressionError("Modification specification error: no replacement performed for '%s' in '%s'" % (re_to_replace, filename), "")
            output_file = open(filename, "w")
            output_file.write(new_file_contents)
            output_file.close()
        
    # build Opal with changed configfile
    command = "make -j 4 DESTINATION=%s" % protocol_name
    output = tools.run_command(command, max_lines=50)
    
    # restore any modification
    for (filename_exp, re_to_replace, replacement) in smt_test.get("modification", []):
        for filename in glob.glob(filename_exp):
            # read the backup file
            backup_file = open(filename+".bak", "r")
            if not backup_file:
                raise RegressionError("Modification specification error: Where is the backup file for file %s?"%filename)
            file_contents = backup_file.read()
            backup_file.close()
            # write the original file
            input_file = open(filename, "w")
            input_file.write(file_contents)
            input_file.close()
    
    os.chdir("..")
Beispiel #12
0
def checkout_multifacet():
    log_message("Performing CVS checkout")
    # checkout files
    for dir in cvs_directories:
        output = tools.run_command("cvs checkout " + dir)

    # Change dirs to the root of the CVS tree
    os.chdir("multifacet")

    # expand the symlinks
    output = tools.run_command("scripts/cvsmapfs -cw < symlinks.list")
    if output != "":  # any output means an error occurred
        print output
        raise RegressionError("Error expanding symlinks", output)
Beispiel #13
0
def build_opal(protocol):
    os.chdir("opal")
    log_message("Building opal")
    tools.run_command("make clean")
    # opal module must be build before building stand-alone executables
    if protocol != "":
        output = tools.run_command("make -j 4 DESTINATION=%s module" %
                                   protocol["name"], max_lines=25)
    else:
        raise RegressionError("Opal build failed: ", "build without DESTINATION being set")
    # build the stand alone tester
    output = tools.run_command("make tester", max_lines=25)
    output = tools.run_command("make usd", max_lines=25)        
    os.chdir("..")
Beispiel #14
0
def plan_cobra(agent_pos, jobs, grid, config) -> ([], []):
    agent_job = []
    cobra_filename_base = str(uuid.uuid1())
    cobra_bin = os.getenv("COBRA_BIN")
    if not cobra_bin:  # if env not set, assuming bin in path
        cobra_bin = "cobra"

    job_endpoints_i, agent_points_i = write_map_file(agent_pos, jobs, grid,
                                                     cobra_filename_base)
    write_task_file(job_endpoints_i, agent_points_i, cobra_filename_base)

    time.sleep(1)

    pwd = os.getcwd()
    cmd = " ".join([
        cobra_bin, cobra_filename_base + MAP_EXT,
        cobra_filename_base + TASK_EXT
    ])
    res = tools.run_command(cmd)

    time.sleep(.2)

    try:
        if res != 0:
            logging.warn("Error when calling cobra: " + cmd + "\nin: " + pwd)
            return [], []
        paths = read_path_file(cobra_filename_base + PATH_EXT, grid)
        agent_job, paths = allocation_from_paths(paths, agent_pos, jobs)
        paths = make_paths_comparable(paths, agent_job, agent_pos, jobs)
        return agent_job, paths
    finally:
        clean_up(cobra_filename_base)
Beispiel #15
0
    def test_put_one_file1_in_dir(self):
        """
        Test that one file is put in the dir correctly
        """
        with TemporaryDirectory() as tmpdir, NamedTemporaryFile() as file1:
            output_dir = os.path.join(tmpdir, "output")
            input_json = {
                "output_directory_name": output_dir,
                "files": [
                    {
                        "class": "File",
                        "path": file1.name
                    },
                ]
            }
            input_json_file = os.path.join(tmpdir, "input.json")
            with open(input_json_file, "w") as input_json_file_data:
                json.dump(input_json, input_json_file_data)
            command = ["cwl-runner", *CWL_ARGS, cwl_file, input_json_file]
            returncode, proc_stdout, proc_stderr = run_command(command)
            output_json = json.loads(proc_stdout)

            # test that command ran successfully
            self.assertEqual(returncode, 0)
            # make sure the dir exists
            self.assertTrue(os.path.exists(output_json['directory']['path']))
            self.assertTrue(os.path.isdir(output_json['directory']['path']))
            self.assertEqual(output_dir, output_json['directory']['path'])
            # make sure both files were output to the dir
            self.assertEqual(len(os.listdir(output_json['directory']['path'])),
                             1)
            self.assertTrue(
                os.path.basename(file1.name) in os.listdir(
                    output_json['directory']['path']))
Beispiel #16
0
def build_opal(protocol):
    os.chdir("opal")
    log_message("Building opal")
    tools.run_command("make clean")
    # opal module must be build before building stand-alone executables
    if protocol != "":
        output = tools.run_command("make -j 4 DESTINATION=%s module" %
                                   protocol["name"],
                                   max_lines=25)
    else:
        raise RegressionError("Opal build failed: ",
                              "build without DESTINATION being set")
    # build the stand alone tester
    output = tools.run_command("make tester", max_lines=25)
    output = tools.run_command("make usd", max_lines=25)
    os.chdir("..")
Beispiel #17
0
def run_tester(host, protocol, length=10, processors=16, purify=0):
    os.chdir("ruby")
    name = protocol["name"]
    if protocol.has_key("specific_processor_count"):
        processors = protocol.get("specific_processor_count", 1)
    procs_per_chip = protocol.get("procs_per_chip", 1)
    if procs_per_chip > processors:
        procs_per_chip = 1
    l2_cache_banks = protocol.get("specific_cache_count", processors)
    
    # the '-r random' parameter make the tester pick a new seed each night
    command = "%s/generated/%s/bin/tester.exec -r %s -l %d -p %d -a %d -e %d " % (host, name, g_random_seed, length, processors, procs_per_chip, l2_cache_banks)
    if purify == 0:
        log_message("Running tester:   %s for %d with %d processors and %d procs_per_chip" % (name, length, processors, procs_per_chip))
    else:
        log_message("Running valgrind: %s for %d with %d processors and %d procs_per_chip" % (name, length, processors, procs_per_chip))
        command = "/s/valgrind-2.2.0/bin/valgrind --tool=memcheck -v --leak-check=yes " + command

    output = tools.run_command(command)
    result = check_valgrind(output, purify)
    if result != 1:
        raise RegressionError("Tester error: 'Success' message not displayed", output)


    # log random seed and ruby_cycles
    lines = string.split(output, "\n")
    for i in lines:
        if re.search("g_RANDOM_SEED", i):
            tokens = i.split()
            log_message("  Random seed: %d"%int(tokens[1]))
        if re.search("Ruby_cycle", i):
            log_message("  %s" % i)
    
    os.chdir("..")
    def test_generate_cases_sequenced(self):
        """
        # cases_sequenced.txt

        generate_cbioPortal_files.py \
        cases_sequenced \
        --cancer-study-id "$(PROJ_ID)" \
        --data-clinical-file "$(DATA_CLINICAL_FILE)" \
        --output "$(CBIO_CASES_SEQUENCED_FILE)"
        """
        data_clinical_file = os.path.join(DATA_SETS['Proj_08390_G']['INPUTS_DIR'], "Proj_08390_G_sample_data_clinical.txt")
        input_json = {
            "subcommand": "cases_sequenced",
            "output_filename": "cases_sequenced.txt",
            "data_clinical_file": {
                "path": data_clinical_file,
                "class": "File"
                },
            "cancer_study_id": "Proj_08390_G"
        }

        with TemporaryDirectory() as tmpdir:
            input_json_file = os.path.join(tmpdir, "input.json")
            with open(input_json_file, "w") as json_out:
                json.dump(input_json, json_out)

            output_dir = os.path.join(tmpdir, "output")
            tmp_dir = os.path.join(tmpdir, "tmp")
            cache_dir = os.path.join(tmpdir, "cache")

            command = [
                "cwl-runner",
                *CWL_ARGS,
                "--outdir", output_dir,
                "--tmpdir-prefix", tmp_dir,
                "--cachedir", cache_dir,
                cwl_file, input_json_file
                ]
            returncode, proc_stdout, proc_stderr = run_command(command)

            if returncode != 0:
                print(proc_stderr)

            self.assertEqual(returncode, 0)

            output_json = json.loads(proc_stdout)

            expected_output = {
                'output_file': {
                    'location': 'file://' + os.path.join(output_dir, 'cases_sequenced.txt'),
                    'basename': 'cases_sequenced.txt',
                    'class': 'File',
                    'checksum': 'sha1$ef9f5aef03c2527bf576470168660557ca1c7cc9',
                    'size': 641,
                    'path': os.path.join(output_dir,'cases_sequenced.txt')
                }
            }
            self.maxDiff = None
            self.assertDictEqual(output_json, expected_output)
Beispiel #19
0
    def checkout(self):
        # We don't need to check out the working copy
        if self.rev == 'WORK':
            return

        path = self.checkout_dir
        if not os.path.exists(path):
            run_command(['hg', 'clone', '-r', self.rev, self.repo, path])
        else:
            logging.info('Checkout "%s" already exists' % path)
            run_command(['hg', 'pull', self.repo], cwd=path)

        retcode = run_command(['hg', 'update', '-r', self.rev], cwd=path)
        if not retcode == 0:
            # Unknown revision
            logging.error('Repo at %s has no revision %s.' % (path, self.rev))
            sys.exit(1)
Beispiel #20
0
    def test_fusion_filter1(self):
        """
        """
        fusion_file = os.path.join(DATA_SETS['Proj_08390_G']['MAF_DIR'], "Sample1.Sample2.svs.pass.vep.portal.txt")

        input_json = {
            "fusions_file": {
                  "class": "File",
                  "path": fusion_file
                },
            "output_filename": "data_fusions.txt",
            "known_fusions_file": {
                "class": "File",
                "path": KNOWN_FUSIONS_FILE
            }
        }

        with TemporaryDirectory() as tmpdir:

            input_json_file = os.path.join(tmpdir, "input.json")

            with open(input_json_file, "w") as input_json_file_data:
                json.dump(input_json, input_json_file_data)

            output_dir = os.path.join(tmpdir, "output")
            tmp_dir = os.path.join(tmpdir, "tmp")
            cache_dir = os.path.join(tmpdir, "cache")

            command = [
            "cwl-runner",
            *CWL_ARGS,
            "--outdir", output_dir,
            "--tmpdir-prefix", tmp_dir,
            "--cachedir", cache_dir,
            cwl_file, input_json_file
            ]

            returncode, proc_stdout, proc_stderr = run_command(command)

            if returncode != 0:
                print(proc_stderr)

            self.assertEqual(returncode, 0)

            output_json = json.loads(proc_stdout)

            expected_output = {
                "output_file": {
                    'location': 'file://' + os.path.join(output_dir, "data_fusions.txt"),
                    'basename': "data_fusions.txt",
                    'class': 'File',
                    'checksum': 'sha1$c16f763b248813fcdde76f7486f1ddc4e9856038',
                    'size': 99,
                    'path': os.path.join(output_dir, "data_fusions.txt")
                    }
                }
        self.assertDictEqual(output_json, expected_output)
    def test_generate_data_clinical_patient(self):
        """
        data_clinical_patient.txt

        generate_cbioPortal_files.py \
        patient \
        --data-clinical-file "$(DATA_CLINICAL_FILE)" \
        --output "$(CBIO_CLINCIAL_PATIENT_DATA_FILE)"
        """
        data_clinical_file = os.path.join(DATA_SETS['Proj_08390_G']['INPUTS_DIR'], "Proj_08390_G_sample_data_clinical.txt")

        input_json = {
        "subcommand": "patient",
        "data_clinical_file": {
            "path": data_clinical_file,
            "class": "File"
            },
        "output_filename": "data_clinical_patient.txt"
        }
        with TemporaryDirectory() as tmpdir:
            input_json_file = os.path.join(tmpdir, "input.json")
            with open(input_json_file, "w") as json_out:
                json.dump(input_json, json_out)

            output_dir = os.path.join(tmpdir, "output")
            tmp_dir = os.path.join(tmpdir, "tmp")
            cache_dir = os.path.join(tmpdir, "cache")

            command = [
                "cwl-runner",
                *CWL_ARGS,
                "--outdir", output_dir,
                "--tmpdir-prefix", tmp_dir,
                "--cachedir", cache_dir,
                cwl_file, input_json_file
                ]
            returncode, proc_stdout, proc_stderr = run_command(command)

            if returncode != 0:
                print(proc_stderr)

            self.assertEqual(returncode, 0)

            output_json = json.loads(proc_stdout)

            expected_output = {
                'output_file': {
                    'location': 'file://' + os.path.join(output_dir, 'data_clinical_patient.txt'),
                    'basename': 'data_clinical_patient.txt',
                    'class': 'File',
                    'checksum': 'sha1$9417dcabddd6ab2cbe98167bccd9b9e4fa182562',
                    'size': 643,
                    'path': os.path.join(output_dir,'data_clinical_patient.txt')
                }
            }
            self.maxDiff = None
            self.assertDictEqual(output_json, expected_output)
Beispiel #22
0
    def test_put_two_files_in_dir(self):
        """
        Test that two files are put in the dir correctly
        """
        with TemporaryDirectory() as tmpdir, NamedTemporaryFile(
        ) as file1, NamedTemporaryFile() as file2:
            # set path to the dir which this CWL should to output to
            output_dir = os.path.join(tmpdir, "output")

            # create input data
            input_json = {
                "output_directory_name":
                output_dir,
                "files": [{
                    "class": "File",
                    "path": file1.name
                }, {
                    "class": "File",
                    "path": file2.name
                }]
            }

            # write input data
            input_json_file = os.path.join(tmpdir, "input.json")
            with open(input_json_file, "w") as input_json_file_data:
                json.dump(input_json, input_json_file_data)

            # command args to run CWL
            command = ["cwl-runner", *CWL_ARGS, cwl_file, input_json_file]

            # run the command
            returncode, proc_stdout, proc_stderr = run_command(command)

            # test that command ran successfully
            self.assertEqual(returncode, 0)

            # parse the stdout
            output_json = json.loads(proc_stdout)

            # make sure the output is a dir
            self.assertTrue("directory" in output_json)
            # make sure there's only one element output
            self.assertEqual(len(output_json), 1)
            # make sure the dir exists
            self.assertTrue(os.path.exists(output_json['directory']['path']))
            self.assertTrue(os.path.isdir(output_json['directory']['path']))
            self.assertEqual(output_dir, output_json['directory']['path'])
            # make sure both files were output to the dir
            self.assertEqual(len(os.listdir(output_json['directory']['path'])),
                             2)
            self.assertTrue(
                os.path.basename(file1.name) in os.listdir(
                    output_json['directory']['path']))
            self.assertTrue(
                os.path.basename(file2.name) in os.listdir(
                    output_json['directory']['path']))
Beispiel #23
0
def run_opal_simics(checkpoint, workload_name, transactions, protocol={"name" : "None"},
               processors=16, bandwidth=6400,
               condor_process = "1", condor_cluster = "1",
               opal_param_file = "",
               use_ruby=0,
               expected_ruby_cycles=0,
               tolerance=.05
               ):
    name = protocol["name"]
    log_message("Running simics: checkpoint=%s, processors=%s, transactions=%d, protocol: %s" % (checkpoint, processors, transactions, name))
    start_time = time.time()

    # create results directory
    if not os.path.exists("condor/results"):
        os.mkdir("condor/results")
    if not os.path.exists("condor/results/"+workload_name):
        os.mkdir("condor/results/"+workload_name)

    # use the default opal configuration file, unless otherwise specified
    if opal_param_file != "":
        os.environ["OPAL_PARAMFILE"] = "%s" % opal_param_file
    if use_ruby != 0:
        os.environ["USE_RUBY"] = "1"

    os.environ["SIMICS_EXTRA_LIB"] = "./modules"
    os.environ["VTECH_LICENSE_FILE"] = "/p/multifacet/projects/simics/licenses/license.dat"
    #os.environ["SIMICS_HOME"] = "."
    os.environ["WORKLOAD"] = "%s" % workload_name
    os.environ["CHECKPOINT"] = "%s" % checkpoint
    os.environ["MODULE"] = "%s" % name
    os.environ["PROCESSORS"] = "%d" % processors
    os.environ["TRANSACTIONS"] = "%d" % transactions
    os.environ["BANDWIDTH"] = "%d" % bandwidth
    os.environ["CONDORCLUSTER"] = "%s" % condor_cluster
    os.environ["CONDORPROCESS"] = "%s" % condor_process
    
    print os.getcwd()
    if (name == "None"):
        os.chdir("simics/home/template/")
    else:
        os.chdir("simics/home/%s/" % name)
        
    output = tools.run_command("../../x86-linux/bin/simics-sparc-u2 -echo -verbose -no-log -no-win -x ../../../condor/gen-scripts/go.simics", "quit 666\n")
    os.chdir("../../..")

    end_time = time.time()
    minutes = (end_time - start_time)/60.0
    if minutes > 1:
        log_message("  runtime: %f minutes" % minutes)

    if (name != "None"):
        opal_log_filename = "condor/results/%s-%sp-%s-%s-%s-%s.opal" % (workload_name, processors, module, bandwidth, condor_cluster, condor_process)
        if (not os.path.exists(opal_log_filename)):
            raise RegressionError(("Opal log file not present: %s" %
                                   opal_log_filename), output)
    def test_run_facets_wrapper(self):
        """
        """
        input_maf = os.path.join(DATA_SETS['Proj_08390_G']['MAF_DIR'], "Sample1.Sample2.muts.maf")
        input_rds = os.path.join(DATA_SETS['Proj_08390_G']['FACETS_SUITE_DIR'], "Sample1_hisens.rds")
        input_json = {
            "maf_file": {
                "path": input_maf,
                "class": "File"
            },
            "facets_rds": {
                "path": input_rds,
                "class": "File"
            },
            "output_filename": "Sample1_hisens.ccf.maf"
        }
        with TemporaryDirectory() as tmpdir:
            input_json_file = os.path.join(tmpdir, "input.json")
            with open(input_json_file, "w") as json_out:
                json.dump(input_json, json_out)

            output_dir = os.path.join(tmpdir, "output")
            tmp_dir = os.path.join(tmpdir, "tmp")
            cache_dir = os.path.join(tmpdir, "cache")

            command = [
                "cwl-runner",
                *CWL_ARGS,
                "--outdir", output_dir,
                "--tmpdir-prefix", tmp_dir,
                "--cachedir", cache_dir,
                cwl_file, input_json_file
                ]
            returncode, proc_stdout, proc_stderr = run_command(command)

            if returncode != 0:
                print(proc_stderr)

            self.assertEqual(returncode, 0)

            output_json = json.loads(proc_stdout)

            expected_output = {
                'output_file': {
                    'location': 'file://' + os.path.join(output_dir, 'Sample1_hisens.ccf.maf'),
                    'basename': 'Sample1_hisens.ccf.maf',
                    'class': 'File',
                    'checksum': 'sha1$7e478a8a44d27735f26e368989c672ed6ef5d52a',
                    'size': 19217199,
                    'path': os.path.join(output_dir, 'Sample1_hisens.ccf.maf')
                }
            }
            self.maxDiff = None
            self.assertDictEqual(output_json, expected_output)
    def test_generate_meta_fusion(self):
        """
        # meta_fusions.txt

        generate_cbioPortal_files.py \
        meta_fusion \
        --cancer-study-id "$(PROJ_ID)" \
        --fusion-data-filename "$(CBIO_FUSION_DATA_FILENAME)" \
        --output "$(CBIO_META_FUSIONS_FILE)"
        """
        input_json = {
        "subcommand": "meta_fusion",
        "output_filename": "meta_fusions.txt",
        "cancer_study_id": "cancer_study",
        "fusion_data_filename": "data_fusions.txt"
        }
        with TemporaryDirectory() as tmpdir:
            input_json_file = os.path.join(tmpdir, "input.json")
            with open(input_json_file, "w") as json_out:
                json.dump(input_json, json_out)

            output_dir = os.path.join(tmpdir, "output")
            tmp_dir = os.path.join(tmpdir, "tmp")
            cache_dir = os.path.join(tmpdir, "cache")

            command = [
                "cwl-runner",
                *CWL_ARGS,
                "--outdir", output_dir,
                "--tmpdir-prefix", tmp_dir,
                "--cachedir", cache_dir,
                cwl_file, input_json_file
                ]
            returncode, proc_stdout, proc_stderr = run_command(command)

            if returncode != 0:
                print(proc_stderr)

            self.assertEqual(returncode, 0)

            output_json = json.loads(proc_stdout)

            expected_output = {
                'output_file': {
                    'location': 'file://' + os.path.join(output_dir, 'meta_fusions.txt'),
                    'basename': 'meta_fusions.txt',
                    'class': 'File',
                    'checksum': 'sha1$5e71daac57615260e685b9f7184a86ddf0e3a6d4',
                    'size': 227,
                    'path': os.path.join(output_dir,'meta_fusions.txt')
                }
            }
            self.maxDiff = None
            self.assertDictEqual(output_json, expected_output)
    def test_generate_meta_mutations_extended(self):
        """
        # meta_mutations_extended.txt

        generate_cbioPortal_files.py \
        meta_mutations \
        --cancer-study-id "$(PROJ_ID)" \
        --mutations-data-filename "$(CBIO_MUTATION_DATA_FILENAME)" \
        --output "$(CBIO_META_MUTATIONS_FILE)"
        """
        input_json = {
        "subcommand": "meta_mutations",
        "output_filename": "meta_mutations_extended.txt",
        "cancer_study_id": "cancer_study",
        "mutations_data_filename": "data_mutations_extended.txt"
        }
        with TemporaryDirectory() as tmpdir:
            input_json_file = os.path.join(tmpdir, "input.json")
            with open(input_json_file, "w") as json_out:
                json.dump(input_json, json_out)

            output_dir = os.path.join(tmpdir, "output")
            tmp_dir = os.path.join(tmpdir, "tmp")
            cache_dir = os.path.join(tmpdir, "cache")

            command = [
                "cwl-runner",
                *CWL_ARGS,
                "--outdir", output_dir,
                "--tmpdir-prefix", tmp_dir,
                "--cachedir", cache_dir,
                cwl_file, input_json_file
                ]
            returncode, proc_stdout, proc_stderr = run_command(command)

            if returncode != 0:
                print(proc_stderr)

            self.assertEqual(returncode, 0)

            output_json = json.loads(proc_stdout)

            expected_output = {
                'output_file': {
                    'location': 'file://' + os.path.join(output_dir, 'meta_mutations_extended.txt'),
                    'basename': 'meta_mutations_extended.txt',
                    'class': 'File',
                    'checksum': 'sha1$d6681566b68ec2eba1c16369f6838ed52986b044',
                    'size': 253,
                    'path': os.path.join(output_dir,'meta_mutations_extended.txt')
                }
            }
            self.maxDiff = None
            self.assertDictEqual(output_json, expected_output)
    def test_generate_meta_segments(self):
        """
        # <project_id>_meta_cna_hg19_seg.txt

        generate_cbioPortal_files.py \
        meta_segments \
        --cancer-study-id "$(PROJ_ID)" \
        --output "$(CBIO_META_CNA_SEGMENTS_FILE)" \
        --segmented-data-file "$(CBIO_SEGMENT_DATA_FILENAME)"
        """
        input_json = {
        "subcommand": "meta_segments",
        "output_filename": "Proj_08390_G_meta_cna_hg19_seg.txt",
        "cancer_study_id": "cancer_study",
        "segmented_data_filename": "Proj_08390_G_data_cna_hg19.seg"
        }
        with TemporaryDirectory() as tmpdir:
            input_json_file = os.path.join(tmpdir, "input.json")
            with open(input_json_file, "w") as json_out:
                json.dump(input_json, json_out)

            output_dir = os.path.join(tmpdir, "output")
            tmp_dir = os.path.join(tmpdir, "tmp")
            cache_dir = os.path.join(tmpdir, "cache")

            command = [
                "cwl-runner",
                *CWL_ARGS,
                "--outdir", output_dir,
                "--tmpdir-prefix", tmp_dir,
                "--cachedir", cache_dir,
                cwl_file, input_json_file
                ]
            returncode, proc_stdout, proc_stderr = run_command(command)

            if returncode != 0:
                print(proc_stderr)

            self.assertEqual(returncode, 0)

            output_json = json.loads(proc_stdout)

            expected_output = {
                'output_file': {
                    'location': 'file://' + os.path.join(output_dir, 'Proj_08390_G_meta_cna_hg19_seg.txt'),
                    'basename': 'Proj_08390_G_meta_cna_hg19_seg.txt',
                    'class': 'File',
                    'checksum': 'sha1$72f05c56f8304f1e12f1d922ccfb89a3c8559660',
                    'size': 200,
                    'path': os.path.join(output_dir,'Proj_08390_G_meta_cna_hg19_seg.txt')
                }
            }
            self.maxDiff = None
            self.assertDictEqual(output_json, expected_output)
    def test_generate_meta_CNA(self):
        """
        # meta_CNA.txt
            generate_cbioPortal_files.py \
            meta_cna \
            --cancer-study-id "$(PROJ_ID)" \
            --cna-data-filename "$(CBIO_CNA_DATA_FILENAME)" \
            --output "$(CBIO_META_CNA_FILE)"
        """
        input_json = {
        "subcommand": "meta_cna",
        "output_filename": "meta_CNA.txt",
        "cancer_study_id": "cancer_study",
        "cna_data_filename": "data_CNA.txt"
        }
        with TemporaryDirectory() as tmpdir:
            input_json_file = os.path.join(tmpdir, "input.json")
            with open(input_json_file, "w") as json_out:
                json.dump(input_json, json_out)

            output_dir = os.path.join(tmpdir, "output")
            tmp_dir = os.path.join(tmpdir, "tmp")
            cache_dir = os.path.join(tmpdir, "cache")

            command = [
                "cwl-runner",
                *CWL_ARGS,
                "--outdir", output_dir,
                "--tmpdir-prefix", tmp_dir,
                "--cachedir", cache_dir,
                cwl_file, input_json_file
                ]
            returncode, proc_stdout, proc_stderr = run_command(command)

            if returncode != 0:
                print(proc_stderr)

            self.assertEqual(returncode, 0)

            output_json = json.loads(proc_stdout)

            expected_output = {
                'output_file': {
                    'location': 'file://' + os.path.join(output_dir, 'meta_CNA.txt'),
                    'basename': 'meta_CNA.txt',
                    'class': 'File',
                    'checksum': 'sha1$a0c50ba21af32710c6895201ec2ec74809f43fec',
                    'size': 270,
                    'path': os.path.join(output_dir,'meta_CNA.txt')
                }
            }
            self.maxDiff = None
            self.assertDictEqual(output_json, expected_output)
    def test_meta_clinical_patient(self):
        """
        # meta_clinical_patient.txt
        generate_cbioPortal_files.py \
        meta_patient \
        --cancer-study-id "$(PROJ_ID)" \
        --patient-data-filename "$(CBIO_CLINCIAL_PATIENT_DATA_FILENAME)" \
        --output "$(CBIO_CLINCAL_PATIENT_META_FILE)"
        """
        input_json = {
        "subcommand": "meta_patient",
        "output_filename": "meta_clinical_patient.txt",
        "cancer_study_id": "cancer_study",
        "patient_data_filename": "data_clinical_patient.txt"
        }
        with TemporaryDirectory() as tmpdir:
            input_json_file = os.path.join(tmpdir, "input.json")
            with open(input_json_file, "w") as json_out:
                json.dump(input_json, json_out)

            output_dir = os.path.join(tmpdir, "output")
            tmp_dir = os.path.join(tmpdir, "tmp")
            cache_dir = os.path.join(tmpdir, "cache")

            command = [
                "cwl-runner",
                *CWL_ARGS,
                "--outdir", output_dir,
                "--tmpdir-prefix", tmp_dir,
                "--cachedir", cache_dir,
                cwl_file, input_json_file
                ]
            returncode, proc_stdout, proc_stderr = run_command(command)

            if returncode != 0:
                print(proc_stderr)

            self.assertEqual(returncode, 0)

            output_json = json.loads(proc_stdout)

            expected_output = {
                'output_file': {
                    'location': 'file://' + os.path.join(output_dir, 'meta_clinical_patient.txt'),
                    'basename': 'meta_clinical_patient.txt',
                    'class': 'File',
                    'checksum': 'sha1$cae62ab4638ff2ff39b71a43b5bd996f8eea16ea',
                    'size': 142,
                    'path': os.path.join(output_dir,'meta_clinical_patient.txt')
                }
            }
            self.maxDiff = None
            self.assertDictEqual(output_json, expected_output)
Beispiel #30
0
def clone_multifacet():

    # get the mfacet_code repository
    if not os.path.exists("BitKeeper"):
        log_message("Performing BitKeeper Clone of mfacet_code")
        output = tools.run_command(
            "bk clone /p/multifacet/projects/xact_memory/multifacet_release .")

    # Update the code base with the bk root directory
    if 0 and os.environ.has_key(
            "CHECKOUT") and os.environ["CHECKOUT"] == "true":
        output = tools.run_command("bk pull")

    for dir in core_directories:
        # ensure all core directories are there
        if not os.path.exists(dir):
            raise RegressionError("Error find all core code directories",
                                  output)

    # get the mfacet_simics_sup repository and export simics_source repository
    if not os.path.exists("simics/BitKeeper"):
        log_message(
            "Performing BitKeeper Clone of mfacet_simics and BitKeeper Export of simics_source"
        )
        output = tools.run_command(
            "bk clone /p/multifacet/bkroot/mfacet_simics_sup simics")
        output = tools.run_command(
            "bk export -r+ -w /p/multifacet/bkroot/simics_source simics")

    # Change dirs to the root of the multifacet code tree
    os.chdir("simics")

    # Update the simics base with the bk root directory
    if 0 and os.environ.has_key(
            "CHECKOUT") and os.environ["CHECKOUT"] == "true":
        output = tools.run_command("bk pull")

    # Move back up to the top level
    os.chdir("..")
Beispiel #31
0
def main_opal_regression():
    checkout_multifacet()
    global host
    host = string.strip(tools.run_command("scripts/calc_host.sh"))
    log_message("Host type: %s" % host)
    opal_clean()
    build_opal("")
    run_opal_tester()
    for protocol in opal_protocols:
        build_opal( protocol )
        for workload in workloads.regress_list:
            run_opal_simics(workload[0], workload[1], workload[2], protocol=protocol)
    purify_opal()
Beispiel #32
0
 def compile(self):
     """
     We issue the build_all command unconditionally and let "make" take care
     of checking if something has to be recompiled.
     """
     try:
         retcode = run_command(['./build_all'], cwd=self.src_dir)
     except OSError:
         logging.error('Changeset %s does not have the build_all script. '
                       'Revision cannot be used by the scripts.' % self.rev)
         sys.exit(1)
     if not retcode == 0:
         logging.error('Build script failed in: %s' % self.src_dir)
         sys.exit(1)
Beispiel #33
0
def build_opal(protocol):
    os.chdir("opal")
    log_message("Building opal")
    if protocol != "":
        output = tools.run_command("make -j 4 DESTINATION=%s module" %
                                   protocol["name"], max_lines=25)
    else:
        output = tools.run_command("make -j 4 module", max_lines=25)
        
    # build the stand alone tester
    output = tools.run_command("make readipage", max_lines=25)
    output = tools.run_command("make makeipage", max_lines=25)
    output = tools.run_command("make tester", max_lines=25)
    output = tools.run_command("make regtest", max_lines=25)
    output = tools.run_command("make memscan", max_lines=25)
    output = tools.run_command("make usd", max_lines=25)
    os.chdir("..")
    def test_generate_meta_sample(self):
        """
        meta_clinical_sample.txt
        """
        input_json = {
        "subcommand": "meta_sample",
        "cancer_study_id": "cancer_study",
        "sample_data_filename": "data_clinical_sample.txt",
        "output_filename": "meta_clinical_sample.txt"
        }
        with TemporaryDirectory() as tmpdir:
            input_json_file = os.path.join(tmpdir, "input.json")
            with open(input_json_file, "w") as json_out:
                json.dump(input_json, json_out)

            output_dir = os.path.join(tmpdir, "output")
            tmp_dir = os.path.join(tmpdir, "tmp")
            cache_dir = os.path.join(tmpdir, "cache")

            command = [
                "cwl-runner",
                *CWL_ARGS,
                "--outdir", output_dir,
                "--tmpdir-prefix", tmp_dir,
                "--cachedir", cache_dir,
                cwl_file, input_json_file
                ]
            returncode, proc_stdout, proc_stderr = run_command(command)

            if returncode != 0:
                print(proc_stderr)

            self.assertEqual(returncode, 0)

            output_json = json.loads(proc_stdout)

            expected_output = {
                'output_file': {
                    'location': 'file://' + os.path.join(output_dir, "meta_clinical_sample.txt"),
                    'basename': "meta_clinical_sample.txt",
                    'class': 'File',
                    'checksum': 'sha1$14021d16e19aa53440f953aece0e66e41d09c7f5',
                    'size': 140,
                    'path': os.path.join(output_dir, "meta_clinical_sample.txt")
                    }
                }
            self.maxDiff = None
            self.assertDictEqual(output_json, expected_output)
Beispiel #35
0
def run_opal_tester():
    os.chdir("opal")    
    # clean up all possible output files before beginning
    tools.run_command("/bin/rm -f gzip-trace imap-gzip-trace-0 imap-gzip-trace-6213 output")
    for test in opal_tests:
        # set up the test
        for setup in test[0]:
            tools.run_command(setup)
        # run the test
        for cmd in test[1]:
            tools.run_command(cmd % host)
        # check the results
        for check in test[2]:
            output = tools.run_command(check, max_lines=50)
            if output != "":
                log_message("    check fails: %s" % check)
                log_message("    testing: %s" % test[1])
                raise RegressionError("Opal tester failed: ", output)
        # clean up after the test
        for cleanup in test[3]:
            tools.run_command(cleanup)
    os.chdir("..")
Beispiel #36
0
def run_opal_tester():
    os.chdir("opal")
    # clean up all possible output files before beginning
    tools.run_command(
        "/bin/rm -f gzip-trace imap-gzip-trace-0 imap-gzip-trace-6213 output")
    for test in opal_tests:
        # set up the test
        for setup in test[0]:
            tools.run_command(setup)
        # run the test
        for cmd in test[1]:
            tools.run_command(cmd % host)
        # check the results
        for check in test[2]:
            output = tools.run_command(check, max_lines=50)
            if output != "":
                log_message("    check fails: %s" % check)
                log_message("    testing: %s" % test[1])
                raise RegressionError("Opal tester failed: ", output)
        # clean up after the test
        for cleanup in test[3]:
            tools.run_command(cleanup)
    os.chdir("..")
Beispiel #37
0
def run_tester(host, protocol, length=10, processors=16, purify=0):
    os.chdir("ruby")
    name = protocol["name"]
    if protocol.has_key("specific_processor_count"):
        processors = protocol.get("specific_processor_count", 1)
    procs_per_chip = protocol.get("procs_per_chip", 1)
    if procs_per_chip > processors:
        procs_per_chip = 1
    l2_cache_banks = protocol.get("specific_cache_count", processors)

    # the '-r random' parameter make the tester pick a new seed each night
    command = "%s/generated/%s/bin/tester.exec -r %s -l %d -p %d -a %d -e %d " % (
        host, name, g_random_seed, length, processors, procs_per_chip,
        l2_cache_banks)
    if purify == 0:
        log_message(
            "Running tester:   %s for %d with %d processors and %d procs_per_chip"
            % (name, length, processors, procs_per_chip))
    else:
        log_message(
            "Running valgrind: %s for %d with %d processors and %d procs_per_chip"
            % (name, length, processors, procs_per_chip))
        command = "/s/valgrind-2.2.0/bin/valgrind --tool=memcheck -v --leak-check=yes " + command

    output = tools.run_command(command)
    result = check_valgrind(output, purify)
    if result != 1:
        raise RegressionError("Tester error: 'Success' message not displayed",
                              output)

    # log random seed and ruby_cycles
    lines = string.split(output, "\n")
    for i in lines:
        if re.search("g_RANDOM_SEED", i):
            tokens = i.split()
            log_message("  Random seed: %d" % int(tokens[1]))
        if re.search("Ruby_cycle", i):
            log_message("  %s" % i)

    os.chdir("..")
Beispiel #38
0
def run_opal_tester():
    os.chdir("opal")    
    # clean up all possible output files before beginning
    os.system("/bin/rm gzip-tr imap-gzip-tr-0 imap-gzip-tr-6213 output")
    for test in opal_tests:
        # set up the test
        for setup in test[0]:
            tools.run_command( setup )
        # run the test
        for cmd in test[1]:
            tools.run_command( cmd % host )
        # check the results
        for check in test[2]:
            output = tools.run_command( check )
            if output != "":
                print "check fails: test ", index, "failed!!!!"
                print output
        # clean up after the test
        for cleanup in test[3]:
            tools.run_command( cleanup )
    os.chdir("..")
def main(args):

    if args.chromosomes == "all":
        chromosomes = list(range(1, 23)) + ['X']
    else:
        chromosomes = args.chromosomes.split(",")

    for chromosome in chromosomes:
        print("Starting chr" + str(chromosome) + " ... ")
        outdir = "{0}/chr{1}/".format(args.outdir, chromosome)
        command = "mkdir -p " + outdir
        out = subprocess.getoutput(command)

        ## Download observed matrix with KR normalization
        command = args.juicebox + " dump observed KR {0} {1} {1} BP {3} {2}/chr{1}.KRobserved".format(
            args.hic_file, chromosome, outdir, args.resolution)
        print(command)
        out = subprocess.getoutput(command)
        if not args.skip_gzip:
            run_command("gzip {0}/chr{1}.KRobserved".format(
                outdir, chromosome))

        ## Download KR norm file
        command = args.juicebox + " dump norm KR {0} {1} BP {3} {2}/chr{1}.KRnorm".format(
            args.hic_file, chromosome, outdir, args.resolution)
        out = subprocess.getoutput(command)
        print(command)
        if not args.skip_gzip:
            run_command("gzip {0}/chr{1}.KRnorm".format(outdir, chromosome))

        if args.include_raw:
            ## Download raw observed matrix
            command = args.juicebox + " dump observed NONE {0} {1} {1} BP {3} {2}/chr{1}.RAWobserved".format(
                args.hic_file, chromosome, outdir, args.resolution)
            print(command)
            out = subprocess.getoutput(command)
            if not args.skip_gzip:
                run_command("gzip {0}/chr{1}.RAWobserved".format(
                    outdir, chromosome))
Beispiel #40
0
build_ruby = make_timed_function(build_ruby)
build_opal = make_timed_function(build_opal)
build_opal_smt = make_timed_function(build_opal_smt)
run_tester = make_timed_function(run_tester)
run_simics = make_timed_function(run_simics)

########### Main ###########

try:

    # lock the directory
    if(os.path.exists(g_lockfile_path)):
      raise RegressionError("Another regression tester is running?", "")
    else:
      g_locked = 1
      tools.run_command("touch %s" % g_lockfile_path)
    
    # watchdog timer
    from threading import Timer
    g_timer = Timer(60*60*g_timer_hours, timer_abort)
    g_timer.start()
    
    log_message("Regression tester started at %s" % time.asctime())
    
    # setup the default search path for tools module
    tools.set_default_search_path(os.environ["PATH"])
    
    import socket
    log_message("Running on host %s" % socket.getfqdn())

    '''
#!/s/std/bin/python

import os, sys, re
sys.path.append("%s/../condor/gen-scripts" % os.path.dirname(os.path.abspath(sys.argv[0])))
import tools

simics_path = "%s/../simics/"%os.path.dirname(os.path.abspath(sys.argv[0]))
#craff_path = "%s/x86-linux/bin/craff"%simics_path
craff_path = "%s/v9-sol8-64/bin/craff"%simics_path

for i in sys.argv[1:]:
  output = tools.run_command("%s -n %s" % (craff_path, i), echo = 0, throw_exception = 0)
  for line in output.split("\n"):
    if re.search("Compression: 0", line):
      print "Craffing %s ..." % i,
      sys.stdout.flush()
      tools.run_command("%s -o craff.out %s" % (craff_path, i), echo=0)
      tools.run_command("mv craff.out %s" % i, echo=0)
      print "done."

Beispiel #42
0
import os, sys
import matplotlib
matplotlib.use('Agg')

path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../lib'))
if not path in sys.path:
    sys.path.insert(1, path)

import tools
import data_processing

if not os.path.exists('unprocessed_data'):
    os.makedirs('unprocessed_data')

tools.run_command('wget -P unprocessed_data http://vajra.cs.nyu.edu/iclr04/full-images.zip')

if not os.path.exists('unprocessed_data/full-images/the-departed-00207091_05003.jpg'):
    tools.run_command('unzip unprocessed_data/full-images.zip -d unprocessed_data')

    
data  = data_processing.DatasetProcessFLIC('SHOULDER')
Beispiel #43
0
 def run(self, *args):
     if self.options.dry_run or self.options.verbose:
         self.info('Run command: %s' % quote_arg_str(args), unwrapped=True)
     if self.options.dry_run:
         return
     run_command(*args)
Beispiel #44
0
    def start_workload_generators(self, workload_args, performance_args):

        try:
            command = ["sudo", "rm", "/root/exp_*"]
            out, err, p = tools.run_command(command)
            tools.log(
                type="INFO2",
                code="rm_root_exp*",
                file_name="experiment.py",
                function_name="start_workload_generators",
                message="%s @@ %s" % (" ".join(command), out),
                exception=err)
        except Exception as err:
            pass

        number_of_servers_running = 0

        for server in self.servers:
            remote_machine = RemoteMachine(server_ip=server["ip"])

            self.remote_machine_list.append(remote_machine)

            remote_machine.start(workload_args=workload_args, performance_args=performance_args)

            number_of_servers_running = number_of_servers_running + 1

            print ("number of servers currently running: " + str(number_of_servers_running))

        last_bad_vol_count = 0

        while True:

            current_bad_vol_count = communication.get_error_log_count(experiment_id=self.experiment["id"])

            if "start-new" in _args_commands and current_bad_vol_count - last_bad_vol_count >= communication.Communication.get_config(
                    "restart_controller_compute_bad_volume_count_threshold"):

                tools.log(
                    type="WARNING",
                    code="regular_service_restart",
                    file_name="experiment.py",
                    function_name="start_workload_generators",
                    message="restarting compute and controller services to make sure everything will be fine. ever 7 minutes")

                try:
                    tools.run_command2(
                        'sudo python /root/cinder/cinder/MLScheduler/experiment.py execute-controller --command "service keystone restart; service nova-api restart; service nova-cert restart; service nova-consoleauth restart; service nova-scheduler restart; service nova-conductor restart; service nova-novncproxy restart; service neutron-server restart; service memcached restart" > /root/exp_controller_restart_err_count.out',
                        get_out=True
                    )

                    tools.run_command2(
                        'python /root/cinder/cinder/MLScheduler/experiment.py execute-compute --command "service nova-compute restart; service neutron-linuxbridge-cleanup restart; service neutron-linuxbridge-agent restart" > /root/exp_computes_restart_err_count.out',
                        get_out=True
                    )

                    tools.run_command2("python ~/cinder/cinder/MLScheduler/experiment.py del-err", get_out=True)

                    last_bad_vol_count = last_bad_vol_count + current_bad_vol_count
                except Exception as err:
                    tools.log(
                        type="ERROR",
                        code="error_count_limit_failed",
                        file_name="experiment.py",
                        function_name="start_workload_generators",
                        message="failed restarting services. will wait for 10 seconds",
                        exception=err)

            is_all_remote_machines_done = True

            for remote_machine in self.remote_machine_list:
                is_all_remote_machines_done = is_all_remote_machines_done and remote_machine.is_alive() is False

            if is_all_remote_machines_done is True:
                print ("\n%%%%%%%%%%%%%%All experiments done%%%%%%%%%%%%%%")
                break

            time.sleep(12)

        if (self.debug_server_ip is None or self.debug_server_ip.strip()) == '':
            tools.run_command2(
                'python /root/cinder/cinder/MLScheduler/experiment.py execute-compute --command "service nova-compute restart; service neutron-linuxbridge-cleanup restart; service neutron-linuxbridge-agent restart" > exp_computes_restart_simulation_done.out')

            tools.run_command2(
                'python ~/cinder/cinder/MLScheduler/experiment.py execute --command "sudo reboot" > exp_reboot_hosts_simulation_done.out')
Beispiel #45
0
def run_simics(checkpoint, workload_name, 
               transactions,
               protocol={"name" : "test"},
               processors=16,
               smt_threads=1,
               phy_signature="Perfect_",
               virtual_signature="Perfect_",
               summary_signature="Perfect_",
               xact_max_depth="1",
               microbenchmark = 0,
               mbench_arg_prefix="",
               mbench_arg_string="",
               bench_arg = 0,
               condor_process = os.getpid(), condor_cluster = os.getppid(),
               expected_ruby_cycles=0, tolerance=.05,
               check_opal=0, check_ruby=0,
               ):
    # use SPARC V8 directory
    name = protocol["name"]
    output = tools.run_command("scripts/prepare_simics_home.sh simics/home/%s"%name)
    if protocol.has_key("specific_processor_count"):
        processors = protocol.get("specific_processor_count", 1)
    procs_per_chip = protocol.get("procs_per_chip", 1)
    l2_cache_banks = protocol.get("specific_cache_count", (processors/smt_threads))
    eager_cd = protocol.get("eager_cd", 1)
    eager_vm = protocol.get("eager_vm", 1)
    magic_waiting = protocol.get("magic_waiting", 0)
    no_backoff = protocol.get("no_backoff", 0)
    
    bandwidth = protocol.get("bandwidth", 6400)
    if procs_per_chip > processors:
        procs_per_chip = 1
        
    log_message("Running simics: checkpoint=%s, processors=%s, smt_threads=%s, procs_per_chip=%d transactions=%d, protocol: %s" % (checkpoint, processors, smt_threads, procs_per_chip, transactions, name))

    # prepare environment variables for running simics
    env_dict = workloads.prepare_env_dictionary(simics = 0)
    workloads.set_var(env_dict, "RESULTS_DIR", "../../../results")
    workloads.set_var(env_dict, "WORKLOAD", workload_name)
    workloads.set_var(env_dict, "CHECKPOINT", checkpoint)
    workloads.set_var(env_dict, "CHECKPOINT_DIR", " ")
    workloads.set_var(env_dict, "PROTOCOL", name)
    workloads.set_var(env_dict, "PROCESSORS", processors)
    workloads.set_var(env_dict, "CHIPS", 1)
    workloads.set_var(env_dict, "SMT_THREADS", smt_threads)
    workloads.set_var(env_dict, "PROCS_PER_CHIP", procs_per_chip)
    workloads.set_var(env_dict, "NUM_L2_BANKS", l2_cache_banks)
    workloads.set_var(env_dict, "TRANSACTIONS", transactions)
    workloads.set_var(env_dict, "BANDWIDTH", bandwidth)
    if(g_random_seed != "random"):
      workloads.set_var(env_dict, "RANDOM_SEED", g_random_seed)
    workloads.set_var(env_dict, "CONDORCLUSTER", condor_cluster)
    workloads.set_var(env_dict, "CONDORPROCESS", condor_process)

    # Transactional Memory variables
    workloads.set_var(env_dict, "MICROBENCH_DIR", "microbenchmarks/transactional")
    workloads.set_var(env_dict, "BENCHMARK", workload_name)
    workloads.set_var(env_dict, "MAX_DEPTH", xact_max_depth)
    workloads.set_var(env_dict, 'LOCK_TYPE', "TM")
    workloads.set_var(env_dict, 'READ_WRITE_FILTER', phy_signature)        
    workloads.set_var(env_dict, 'VIRTUAL_READ_WRITE_FILTER', virtual_signature)
    workloads.set_var(env_dict, 'SUMMARY_READ_WRITE_FILTER', summary_signature)
    workloads.set_var(env_dict, "XACT_EAGER_CD", eager_cd)
    if(eager_vm == 0):
        workloads.set_var(env_dict, "XACT_LAZY_VM", 1)
    else:
        workloads.set_var(env_dict, "XACT_LAZY_VM", 0)
    workloads.set_var(env_dict, "ENABLE_MAGIC_WAITING", magic_waiting)
    workloads.set_var(env_dict, "XACT_NO_BACKOFF", no_backoff)
    
    # set per-microbenchmark specific variables
    #if ((workload_name == "compensation") or (workload_name == "commit-action")
    #      or (workload_name == "isolation-test") or (workload_name == "logging-test")
    #      or (workload_name == "partial-rollback")):
    #    workloads.set_var(env_dict, "MICROBENCH_DIR", "microbenchmarks/transactional/test")
    workloads.set_var(env_dict, 'MBENCH_ARG_PREFIX', mbench_arg_prefix)        
    workloads.set_var(env_dict, 'MBENCH_ARG_STRING', mbench_arg_string)
        
    workloads.update_system_env(env_dict)

    # create results directory
    output = tools.run_command("/bin/rm -rf results")
    
    mbench_arg_prefix = workloads.get_var(env_dict, 'MBENCH_ARG_PREFIX')
    os.mkdir("results")
    if(microbenchmark == 1):
        print "CREATING DIRECTORY results/%s-%s" % (workload_name,mbench_arg_prefix)
        os.mkdir("results/%s-%s" %(workload_name,mbench_arg_prefix))
    else:
        os.mkdir("results/"+workload_name)
    print "WORKLOAD NAME %s" % workload_name

    #'''
    os.chdir("simics/home/%s/" % name)

    # run the microbenchmark script if needed
    if( microbenchmark == 1):
        output = tools.run_command("./simics -echo -verbose -no-log -no-win -x ../../../gen-scripts/microbench.simics", "quit 666\n", verbose=1, max_lines=0)
    else:
        output = tools.run_command("./simics -echo -verbose -no-log -no-win -x ../../../gen-scripts/go.simics", "quit 666\n", verbose=1)
    #tools.run_command("./simics -echo -verbose -no-log -x ../../../gen-scripts/go.simics", "quit 666\n")
    os.chdir("../../..")
    
    # dump simics output
    if(microbenchmark == 0):
        simics_output_filename = "results/%s.output" % workloads.get_output_file_name_prefix(env_dict)
    else:
        simics_output_filename = "results/%s.output" % workloads.get_microbench_output_file_name_prefix(env_dict,0)
    simics_output = open(simics_output_filename, "w")
    simics_output.write(output)
    simics_output.close()
    
    if check_ruby == 1 and name != "template":
        if( microbenchmark == 0):
            ruby_stats_filename = "results/%s.stats" % workloads.get_output_file_name_prefix(env_dict)
            error_output_filename = "condor/results/%s.error" % (workloads.get_output_file_name_prefix(env_dict, 0))
        else:
            ruby_stats_filename = "results/%s.stats" % workloads.get_microbench_output_file_name_prefix(env_dict,0)
            error_output_filename = "condor/results/%s.error" % (workloads.get_microbench_output_file_name_prefix(env_dict, 0))
        if (not os.path.exists(ruby_stats_filename)):
            raise RegressionError("Ruby stats output file not present: %s" % ruby_stats_filename, output)

        # Check for error file, indicating a SIMICS_ASSERT() failure
        if(os.path.exists(error_output_filename)):
            print "SIMICS ASSERT error!"
            raise RegressionError("SIMICS_ASSERT error file found: %s" % error_output_filename, output)

        # get random seed
        simics_output = open(simics_output_filename, "r")
        for line in simics_output.readlines():
            if re.search("g_RANDOM_SEED", line):
                tokens = line.split()
                log_message("  Random seed: %d"%int(tokens[4][:-1]))
        # get ruby cycle
        ruby_stats = open(ruby_stats_filename, "r")
        ruby_cycles = 0
        for line in ruby_stats.readlines():
            line_elements = string.split(line)
            if len(line_elements) > 1 and line_elements[0] == "Ruby_cycles:":
                ruby_cycles = int(line_elements[1])
        if (ruby_cycles == 0):
            raise RegressionError("Ruby_cycles not found from the output file: %s" % ruby_stats_filename, output)
        else:
            log_message("  Ruby_cycles: %d"%ruby_cycles)
        if (expected_ruby_cycles != 0):
            percent_diff = 1.0*ruby_cycles/expected_ruby_cycles
            if percent_diff < (1.0-tolerance) or percent_diff > (1.0 + tolerance):
                log_message("  Checking ruby_cycles - ratio is %f: OUT OF RANGE" % percent_diff)
                log_error("ERROR: Ruby_cycles not within tolerances.  expected %d, actual %d" % (expected_ruby_cycles, ruby_cycles))
            else:
                log_message("  Checking ruby_cycles - ratio is %f: OK" % percent_diff)

    if check_opal == 1:
        opal_log_filename = "results/%s.opal" % workloads.get_output_file_name_prefix(env_dict)
        if (not os.path.exists(opal_log_filename)):
            raise RegressionError(("Opal log file not present: %s" %
                                   opal_log_filename), output)
        # check opal correct rate!
        else:
            opal_log = open(opal_log_filename)
            processor_total_instructions = 1001 # > 1000
            processor_correct_rate = 98 # < 99
            for line in opal_log.readlines():
                tokens = line.split()
                # remember the correct rate
                if(len(tokens) == 5 and tokens[1] == "Percent" and tokens[2] == "correct"):
                    processor_correct_rate = float(tokens[4])
                # remember the processor's commit instruction number
                if(len(tokens) == 6 and tokens[1] == "Total" and tokens[2] == "number" and tokens[3] == "of" and tokens [4] == "instructions"):
                    processor_total_instructions = int(tokens[5])
                    # check the correct rate here since the total instruction
                    # number comes last during the scan of the output file
                    if(processor_correct_rate < 99 and processor_total_instructions > 1000):
                        raise RegressionError(("Opal correct rate too low (%f%% of %d instructions)!" % (processor_correct_rate, processor_total_instructions)), output)
    def test_test_generate_data_clinical_sample_with_facets(self):
        """
        Test that the data clinical sample file is generated correctly when multiple Facets files are provided
        """
        data_clinical_file = os.path.join(DATA_SETS['Proj_08390_G']['INPUTS_DIR'], 'Proj_08390_G_sample_data_clinical.2.txt')
        facets_txt_file1 = os.path.join(DATA_SETS['Proj_08390_G']['FACETS_SUITE_DIR'], 'Sample46.txt')
        facets_txt_file2 = os.path.join(DATA_SETS['Proj_08390_G']['FACETS_SUITE_DIR'], 'Sample44.txt')
        input_json = {
        "subcommand": "sample",
        "data_clinical_file": {
            "path": data_clinical_file,
            "class": "File"
            },
        "output_filename": "data_clinical_sample.txt",
        "project_pi": "jonesd",
        "request_pi": "franklind",
        "facets_txt_files": [
            {
                "path": facets_txt_file1,
                "class": "File"
            },
            {
                "path": facets_txt_file2,
                "class": "File"
            }
        ]
        }
        with TemporaryDirectory() as tmpdir:
            input_json_file = os.path.join(tmpdir, "input.json")
            with open(input_json_file, "w") as json_out:
                json.dump(input_json, json_out)

            output_dir = os.path.join(tmpdir, "output")
            tmp_dir = os.path.join(tmpdir, "tmp")
            cache_dir = os.path.join(tmpdir, "cache")

            command = [
                "cwl-runner",
                *CWL_ARGS,
                "--outdir", output_dir,
                "--tmpdir-prefix", tmp_dir,
                "--cachedir", cache_dir,
                cwl_file, input_json_file
                ]
            returncode, proc_stdout, proc_stderr = run_command(command)

            if returncode != 0:
                print(proc_stderr)

            self.assertEqual(returncode, 0)

            output_json = json.loads(proc_stdout)
            expected_output = {
                'output_file': {
                    'location': 'file://' + os.path.join(output_dir, 'data_clinical_sample.txt'),
                    'basename': 'data_clinical_sample.txt',
                    'class': 'File',
                    'checksum': 'sha1$1a1aee93048facdfb3b25598e3b560a9b6b2856a',
                    'size': 1289,
                    'path': os.path.join(output_dir, 'data_clinical_sample.txt')
                }
            }
            self.assertDictEqual(output_json, expected_output)

            with open(os.path.join(output_dir, 'data_clinical_sample.txt')) as fin:
                lines = [ line.strip().split('\t') for line in fin ]
            expected_lines = [
            ['#SAMPLE_ID', 'IGO_ID', 'PATIENT_ID', 'COLLAB_ID', 'SAMPLE_TYPE', 'SAMPLE_CLASS', 'GENE_PANEL', 'ONCOTREE_CODE', 'SPECIMEN_PRESERVATION_TYPE', 'TISSUE_SITE', 'REQUEST_ID', 'PROJECT_ID', 'PIPELINE', 'PIPELINE_VERSION', 'PROJECT_PI', 'REQUEST_PI', 'genome_doubled', 'ASCN_PURITY', 'ASCN_PLOIDY', 'ASCN_VERSION', 'ASCN_WGD'],
            ['#SAMPLE_ID', 'IGO_ID', 'PATIENT_ID', 'COLLAB_ID', 'SAMPLE_TYPE', 'SAMPLE_CLASS', 'GENE_PANEL', 'ONCOTREE_CODE', 'SPECIMEN_PRESERVATION_TYPE', 'TISSUE_SITE', 'REQUEST_ID', 'PROJECT_ID', 'PIPELINE', 'PIPELINE_VERSION', 'PROJECT_PI', 'REQUEST_PI', 'genome_doubled', 'ASCN_PURITY', 'ASCN_PLOIDY', 'ASCN_VERSION', 'ASCN_WGD'],
            ['#STRING', 'STRING', 'STRING', 'STRING', 'STRING', 'STRING', 'STRING', 'STRING', 'STRING', 'STRING', 'STRING', 'STRING', 'STRING', 'STRING', 'STRING', 'STRING', 'STRING', 'NUMBER', 'NUMBER', 'STRING', 'STRING'],
            ['#1', '1', '1', '0', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '0', '1', '1', '0', '1'],
            ['SAMPLE_ID', 'IGO_ID', 'PATIENT_ID', 'COLLAB_ID', 'SAMPLE_TYPE', 'SAMPLE_CLASS', 'GENE_PANEL', 'ONCOTREE_CODE', 'SPECIMEN_PRESERVATION_TYPE', 'TISSUE_SITE', 'REQUEST_ID', 'PROJECT_ID', 'PIPELINE', 'PIPELINE_VERSION', 'PROJECT_PI', 'REQUEST_PI', 'genome_doubled', 'ASCN_PURITY', 'ASCN_PLOIDY', 'ASCN_VERSION', 'ASCN_WGD'],
            ['Sample46', '08390_G_95', 'p_C_00001', 'COLLAB-01-T', 'Primary', 'Biopsy', 'IMPACT468+08390_Hg19', 'MEL', 'FFPE', '', '08390_G', '08390', 'roslin', '2.5.7', 'jonesd', 'franklind', 'FALSE', '0.36', '2.6', '0.5.14', 'no WGD'],
            ['Sample44', '08390_G_93', 'p_C_00002', 'COLLAB-01-T', 'Primary', 'Biopsy', 'IMPACT468+08390_Hg19', 'MEL', 'FFPE', '', '08390_G', '08390', 'roslin', '2.5.7', 'jonesd', 'franklind', 'FALSE', '0.51', '1.6', '0.5.14', 'no WGD']
            ]
            self.assertEqual(lines, expected_lines)
Beispiel #47
0
                    script_file.write("#   1 change the INTERACTIVE define below\n")
                    script_file.write("#   2 change the ../../gen-scripts/go.simics, to comment out the line of quit\n")
                    script_file.write("\n\n")
                    script_file.write("export HOST=`hostname`\n")
                    workloads.set_var(env_dict, 'CONDORCLUSTER', "`echo $HOST | tr -- '-_.' ' ' | tr '[a-zA-Z]' '[0-90-90-90-90-90-9]' | awk '{ print $1; }'`")
                    workloads.set_var(env_dict, 'CONDORPROCESS', "$$")
                    ## set to -- None -- if you do not want to manually setup runs
                    workloads.set_var(env_dict, 'INTERACTIVE', None)

                    script_file.write(workloads.get_shell_setenv_string(env_dict))
                    script_file.write("\n\n")
                    script_file.write("cd %s/simics/home/%s\n\n" % (cvsroot, protocol))
                    script_file.write("./simics -echo -verbose -no-log -no-win -x ../../../condor/gen-scripts/go.simics\n")
                    script_file.write("\n")
                    script_file.close()
                    tools.run_command("chmod +x %s/%s" % (cvsroot_results_scripts, script_filename), echo = 0)
    
    condor_file.close()

####################################################################################
###                   Custom Workloads
####################################################################################

for (workload_name, checkpoint_dir, checkpoint_prefix, transactions, dump_interval, warmup_file) in config.custom_workload_list:
    if (transactions > 0):
      workload_name = "%s_%d" % (workload_name, transactions)
  
    print workload_name
    
    directory = "%s/%s" % (cvsroot_results, workload_name)
    if not os.path.exists(directory):
Beispiel #48
0
def ruby_clean_slicc(protocol):
    os.chdir("ruby")
    tools.run_command("make clean_slicc")
    os.chdir("..")
    output = tools.run_command("/bin/rm -rf simics/home/%s" % (protocol["name"]))
Beispiel #49
0
def opal_clean():
    os.chdir("opal")
    output = tools.run_command("make clean")
    os.chdir("..")
    def test_generate_meta_study(self):
        """
        # meta_study.txt
            generate_cbioPortal_files.py \
            study \
            --cancer-study-id "$(PROJ_ID)" \
            --name "$(PROJ_NAME)" \
            --short-name "$(PROJ_SHORT_NAME)" \
            --type-of-cancer "$(CANCER_TYPE)" \
            --description "$(PROJ_DESC)" \
            --output "$(CBIO_META_STUDY_FILE)" \
            $(EXTRA_GROUPS_STR)
        """
        input_json = {
        "subcommand": "study",
        "output_filename": "meta_study.txt",
        "cancer_study_id": "cancer_study",
        "name": "cancer_study",
        "short_name": "cancer_study",
        "type_of_cancer": "MEL",
        "description": "description",
        "extra_groups": "FOO1"
        }
        with TemporaryDirectory() as tmpdir:
            input_json_file = os.path.join(tmpdir, "input.json")
            with open(input_json_file, "w") as json_out:
                json.dump(input_json, json_out)

            output_dir = os.path.join(tmpdir, "output")
            tmp_dir = os.path.join(tmpdir, "tmp")
            cache_dir = os.path.join(tmpdir, "cache")

            command = [
                "cwl-runner",
                *CWL_ARGS,
                "--outdir", output_dir,
                "--tmpdir-prefix", tmp_dir,
                "--cachedir", cache_dir,
                cwl_file, input_json_file
                ]
            returncode, proc_stdout, proc_stderr = run_command(command)

            if returncode != 0:
                print(proc_stderr)

            self.assertEqual(returncode, 0)

            output_json = json.loads(proc_stdout)

            expected_output = {
                'output_file': {
                    'location': 'file://' + os.path.join(output_dir, 'meta_study.txt'),
                    'basename': 'meta_study.txt',
                    'class': 'File',
                    'checksum': 'sha1$9625b915f0eba999305026833fa8b32b6ebebaa0',
                    'size': 161,
                    'path': os.path.join(output_dir,'meta_study.txt')
                }
            }
            self.maxDiff = None
            self.assertDictEqual(output_json, expected_output)
Beispiel #51
0
import os, sys
import matplotlib
matplotlib.use('Agg')

path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../lib'))
if not path in sys.path:
    sys.path.insert(1, path)

import tools
import data_processing

if not os.path.exists('unprocessed_data'):
    os.makedirs('unprocessed_data')

tools.run_command('wget --no-check-certificate -P unprocessed_data https://www.dropbox.com/s/lp9ks8zs4w1jhas/full-images.zip')

if not os.path.exists('unprocessed_data/full-images/the-departed-00207091_05003.jpg'):
    tools.run_command('unzip unprocessed_data/full-images.zip -d unprocessed_data')

    
data  = data_processing.DatasetProcessFLIC('SHOULDER')