Beispiel #1
0
def fillAspect(aspectj_setup_dir, monitor_setup_dir, gen_monitor_setup_dir):
    """
    Fills the user-defined parts of a generated .aj file.

    The file should be called unittestMonitorAspect.aj file.

    Args:
        aspectj_setup_dir (str): The destination directory for the
            filled .aj file.

        monitor_setup_dir (str): Input directory that contains two files:
            * import.txt: should contain the code that replaces the
              "// add your own imports." comment in the .aj file.
            * ajcode.txt: should contain the code that replaces the
              "// Implement your code here." comment in the .aj file.

        gen_monitor_dir (str): The input directory, which must a
            a single unittestMonitorAspect.aj file.
    """
    tools.progress("Fill in AspectJ template")
    aspect_file_name = "unittestMonitorAspect.aj"
    aspect_file_path = os.path.join(gen_monitor_setup_dir, aspect_file_name)
    aspectContent = tools.readFile(aspect_file_path)
    aspectContent = aspectContent.replace(
        "// add your own imports.",
        tools.readFile(os.path.join(monitor_setup_dir, "import.txt")))
    aspectContent = aspectContent.replace(
        "// Implement your code here.",
        tools.readFile(os.path.join(monitor_setup_dir, "ajcode.txt")))
    tools.writeFile(os.path.join(aspectj_setup_dir, aspect_file_name),
                    aspectContent)
Beispiel #2
0
def translate(pcompiler_dir, p_spec_dir, gen_monitor_dir, aspectj_dir):
    """
    Translates a P spec to RVM code.

    Args:
        pcompiler_dir (str): The root directory for the P compiler
            repository.

        p_spec_dir (str): The directory containing the P spec.
            It should contain exactly one .p file.

        gen_monitor_dir (str): The directory in which to place
            the Rvm files.

    Raises:
        SubprocessError if the P compiler returned an error code.

        Exception if the p_spec_dir contained no .p file or more
            than one .p file
    """
    tools.progress("Run the PCompiler...")
    p_spec_paths = glob.glob(os.path.join(p_spec_dir, "*.p"))
    if len(p_spec_paths) != 1:
        raise Exception("Expected a single p spec")
    p_spec_path = p_spec_paths[0]
    runPc(pcompiler_dir, [
        p_spec_path, "-g:RVM",
        "-o:%s" % gen_monitor_dir,
        "-a:%s" % aspectj_dir
    ])
    for f in glob.glob(os.path.join(p_spec_dir, "*.aj")):
        shutil.copy(f, aspectj_dir)
Beispiel #3
0
def runMonitor(rvmonitor_bin, gen_monitor_dir, java_dir):
    """
    Translates a .rvm file to Java.

    Args:
        rvmonitor_bin (str): The directory containing the rv-monitor tool.
        
        gen_monitor_dir (str): The directory containing the .rvm file.
            It must contain exactly one .rvm file and it must contain
            an .aj file associated with it.

            This directory is treated as a temporary directory (i.e. the
            Java files are generated here before being copied to the
            java_dir).
        
        java_dir (str): The directory in which to place the generated
            Java files.

    Raises:
        SubprocessError if the rv-monitor tool returned an error.

        Exception if the input directory contains no rvm file or more
            than one.
    """
    tools.progress("Run RVMonitor")
    monitor_binary = os.path.join(rvmonitor_bin, "rv-monitor")
    rvm_file_paths = glob.glob(os.path.join(gen_monitor_dir, "*.rvm"))
    if len(rvm_file_paths) != 1:
        raise Exception("Expected a single rvm spec")
    rvm_file_path = rvm_file_paths[0]
    tools.runNoError([monitor_binary, "--controlAPI", "-merge", rvm_file_path])
    for f in glob.glob(os.path.join(gen_monitor_dir, "*.java")):
        shutil.copy(f, java_dir)
def translate(pcompiler_dir, p_spec_dir, gen_monitor_dir):
    tools.progress("Run the PCompiler...")
    p_spec_paths = glob.glob(os.path.join(p_spec_dir, "*.p"))
    if len(p_spec_paths) != 1:
        raise Exception("Expected a single p spec")
    p_spec_path = p_spec_paths[0]
    runPc(pcompiler_dir, [p_spec_path, "-g:RVM", "-o:%s" % gen_monitor_dir])
Beispiel #5
0
def runGetConsistency():
    tools.progress("Running the getconsistency tests...")
    tools.runNoError(["mvn", "clean", "test", "-Dtest=db.GetConsistentTest"])
    tools.runNoError(
        ["mvn", "clean", "test", "-Dtest=db.GetInconsistent1Test"])
    tools.runNoError(
        ["mvn", "clean", "test", "-Dtest=db.GetInconsistent2Test"])
    tools.runNoError(["mvn", "clean"])
Beispiel #6
0
def initGitModules():
    """
    Runs "git submodule update --init --recursive".

    Raises:
        SubprocessError if the git command returns an error code.
    """
    tools.progress("Updating the RV-Monitor submodules...")
    tools.runNoError(["git", "submodule", "update", "--init", "--recursive"])
Beispiel #7
0
def buildRvMonitor():
    """
    Builds and installs the RV-Monitor tool in the local Maven repository.

    Raises:
        SubprocessError if the mvn command returns an error code.
    """
    tools.progress("Building RV-Monitor...")
    tools.runNoError(["mvn", "-B", "clean", "install", "-DskipTests"])
def runMonitor(rvmonitor_bin, gen_monitor_dir, java_dir):
    tools.progress("Run RVMonitor")
    monitor_binary = os.path.join(rvmonitor_bin, "rv-monitor")
    rvm_file_paths = glob.glob(os.path.join(gen_monitor_dir, "*.rvm"))
    if len(rvm_file_paths) != 1:
        raise Exception("Expected a single rvm spec")
    rvm_file_path = rvm_file_paths[0]
    tools.runNoError([monitor_binary, "-merge", rvm_file_path])
    for f in glob.glob(os.path.join(gen_monitor_dir, "*.java")):
        shutil.copy(f, java_dir)
Beispiel #9
0
def runTwoPhaseCommit():
    tools.progress("Running the twophasecommit tests...")
    tools.runNoError(
        ["mvn", "clean", "test", "-Dtest=twophasecommit.PrepareSuccessTest"])
    tools.runNoError(
        ["mvn", "clean", "test", "-Dtest=twophasecommit.PrepareFailureTest"])
    tools.runNoError([
        "mvn", "clean", "test", "-Dtest=twophasecommit.BuggyPrepareFailureTest"
    ])
    tools.runNoError(["mvn", "clean"])
def fillAspect(aspectj_dir, gen_monitor_dir):
    tools.progress("Fill in AspectJ template")
    aspect_file_paths = glob.glob(os.path.join(gen_monitor_dir, "*.aj"))
    if len(aspect_file_paths) != 1:
        raise Exception("Expected a single aspectJ template")
    aspect_file_path = aspect_file_paths[0]
    aspectContent = readFile(aspect_file_path)
    aspectContent = aspectContent.replace("// add your own imports.",
                                          readFile("import.txt"))
    aspectContent = aspectContent.replace("// Implement your code here.",
                                          readFile("ajcode.txt"))
    writeFile(aspect_file_path, aspectContent)
    for f in glob.glob(os.path.join(gen_monitor_dir, "*.aj")):
        shutil.copy(f, aspectj_dir)
Beispiel #11
0
def runTests(test_name):
    """
    Uses Maven to run a Rvm unit-test.

    Args:
        test_name (str): The name of the test, used for user-friendly
            messages.

    Raises:
        SubprocessError if mvn returns an error code.
    """
    tools.progress("Running the %s test(s)..." % test_name)
    tools.runNoError(["mvn", "-B", "clean", "test"])
    tools.runNoError(["mvn", "-B", "clean"])
Beispiel #12
0
    def initialize(self):
        f = open(self.ret_file, 'r')
        data_q = json.load(f)
        abstracts_needed = set()
        for i in range(len(data_q["questions"])):
            abstracts_needed = abstracts_needed | set(data_q["questions"][i]["retrieved"])
        f.close()

        print "Collecting Abstracts.."
        f = open(self.corpus_file, 'r')
        corpus = ijson.items(f, 'articles.item')
        for article in corpus:
            pmid = article["pmid"]
            if pmid in abstracts_needed:
                self.corpus_index[pmid] = article["title"] + ' ' + article["abstractText"]
                abstracts_needed.remove(pmid)
                if not abstracts_needed:
                    break
        f.close()


        print len(self.corpus_index)
        q_array_q = []
        q_array_d = []
        q_array_max = []
        print "Reranking.."
        n_questions = len(data_q["questions"])
        for i in range(n_questions):
            #print i
            progress(i+1, n_questions, 'questions')
            q_id = data_q["questions"][i]["id"]
            q_body = data_q["questions"][i]["body"]
            q_retrieved = data_q["questions"][i]["retrieved"]

            retr_array_q, retr_array_d, retr_array_max = self.rerank(q_body, q_retrieved)

            q_array_q.append(Question(q_body, q_id, [x[0] for x in retr_array_q], [x[1] for x in retr_array_q]))
            q_array_d.append(Question(q_body, q_id, [x[0] for x in retr_array_d], [x[1] for x in retr_array_d]))
            q_array_max.append(Question(q_body, q_id, [x[0] for x in retr_array_max], [x[1] for x in retr_array_max]))

        with open('.'.join(self.ret_file.split('.')[:-1]) + '_RWMD_Q.json', 'w+') as outfile:
            outfile.write(json.dumps({"questions":[ob.__dict__ for ob in q_array_q]}, indent=2))

        with open('.'.join(self.ret_file.split('.')[:-1]) + '_RWMD_D.json', 'w+') as outfile:
            outfile.write(json.dumps({"questions":[ob.__dict__ for ob in q_array_d]}, indent=2))

        with open('.'.join(self.ret_file.split('.')[:-1]) + '_RWMD_MAX.json', 'w+') as outfile:
            outfile.write(json.dumps({"questions":[ob.__dict__ for ob in q_array_max]}, indent=2))
Beispiel #13
0
    def main(self):

        chunks = split_list(range(len(self.idmap)), self.n_chunks)
        q_centroids, question_details = self.load_questions_matrix()
        q_arrays = []

        c = 0  # Number of current chunk
        for chunk in chunks:
            start_row = chunk[0]
            n_rows = len(chunk)
            corpus_centroids = read_npy_chunk(centroids_file, start_row, n_rows)
            q_arrays.append(self.get_chunk_nns(corpus_centroids, q_centroids, question_details, chunk))

            c += 1
            progress(c, len(chunks), 'chunks')
        self.reorder(q_arrays)
        return
Beispiel #14
0
def runTests(parallelism, test_names, temporary_directory, command_creator):
    """Runs a test suite.

  Args:
    parallelism (int): maximum number of tests to run in parallel.
    test_names (list of string): the names of all tests in the suite.
    temporary_directory (string): path of a directory for temporary files.
    command_creator (func(string, string) : list of string): function that
        receives a temporary directory and a test name and produces the
        command which runs that test.

  Returns:
    0 for success, 1 for failure.
  """
    running = []
    failed = []
    test_count = len(test_names)
    while running or test_names:
        time.sleep(0.1)  # 0.1 sec

        new_running = []
        for test in running:
            if test.isRunning():
                new_running.append(test)
            elif test.failed():
                test.closeAndPrintFailure()
                failed.append(test)
            else:
                print("%s finished" % test.name())
                test.close()

        running = new_running

        while test_names and len(running) < parallelism:
            print("Starting: %s" % test_names[0])
            running.append(
                Test(test_names[0], temporary_directory, command_creator))
            test_names = test_names[1:]

    if failed:
        tools.progress("Finished running, %d out of %d tests failed." %
                       (len(failed), test_count))
        return 1
    else:
        tools.progress("Finished running, all tests passed.")
        return 0
Beispiel #15
0
def translate(script_dir, root_dir, gen_monitor_setup_dir):
    """
    Translates a `spec.p` file to RVM code.

    Args:
        script_dir (str): Input directory containing the `spec.p` file.

        root_dir (str): The root directory for the P compiler
            repository.

        gen_monitor_setup_dir (str): Output directory that will contain
            the generated files.

    Raises:
        SubprocessError if the P compiler returned an error code.
    """
    tools.progress("Run the PCompiler...")
    runPc(root_dir, [os.path.join(script_dir, "spec.p"), "-g:RVM", "-o:%s" % gen_monitor_setup_dir])
Beispiel #16
0
def main(argv):
  if len(argv) == 0:
    parallelism = 3 * tools.findAvailableCpus() / 2
  elif len(argv) == 1:
    parallelism = int(argv[0])
  else:
    usageError()

  temp_dir = tempfile.mkdtemp()
  tools.progress("Temporary directory: %s" % temp_dir)

  script_dir = os.path.dirname(os.path.abspath(__file__))

  exit_code = paralleltests.runTests(
      parallelism,
      getTests(),
      temp_dir,
      lambda temp_dir, test_name: buildCommand(script_dir, temp_dir, test_name))
  shutil.rmtree(temp_dir)
  sys.exit(exit_code)
Beispiel #17
0
    def main(self):

        chunks = split_list(range(len(self.idmap)), self.n_chunks)
        q_centroids, question_details = self.load_questions_matrix()
        q_arrays = []

        c = 0  # Number of current chunk
        for chunk in chunks:
            start_row = chunk[0]
            n_rows = len(chunk)
            corpus_centroids = read_npy_chunk(centroids_file, start_row,
                                              n_rows)
            q_arrays.append(
                self.get_chunk_nns(corpus_centroids, q_centroids,
                                   question_details, chunk))

            c += 1
            progress(c, len(chunks), 'chunks')
        self.reorder(q_arrays)
        return
Beispiel #18
0
def createRvm(rvmonitor_bin, gen_monitor_setup_dir, java_dir):
    """
    Compiles a rvm file to java.

    Args:
        rvmonitor_bin (str): Directory containing the rv-monitor binary.
        
        gen_monitor_setup_dir (str): Input directory containing a file
            called "unittest.rvm". The file will be changed during the
            call.
        
        java_dir: Destination directory for the generated Java files

    Raises:
        SubprocessError if the rv-monitor tool returned an error code.
    """
    tools.progress("Run RVMonitor")
    monitor_binary = os.path.join(rvmonitor_bin, "rv-monitor")
    rvm_file = os.path.join(gen_monitor_setup_dir, "unittest.rvm")
    addRvmExceptions(rvm_file)
    tools.runNoError([monitor_binary, "-merge", "-d", java_dir, rvm_file])
Beispiel #19
0
def fillAspect(aspectj_dir, gen_monitor_dir):
    """
    Fills the user-defined parts of a generated .aj file.

    The current directory should contain two files:
        * import.txt: should contain the code that replaces the
          "// add your own imports." comment in the .aj file.
        * ajcode.txt: should contain the code that replaces the
          "// Implement your code here." comment in the .aj file.
    
    The fillAspect function replaces the comments mentioned above in
    the .aj file, and copies the result to the destination directory.

    Args:
        aspectj_dir (str): The destination directory for the
            filled .aj file.

        gen_monitor_dir (str): The input directory, which must contain
            a single .aj file

    Raises:
        Exception if the input directory does not contain exactly
            one .aj file.
    """
    tools.progress("Fill in AspectJ template")
    aspect_file_paths = glob.glob(os.path.join(gen_monitor_dir, "*.aj"))
    if len(aspect_file_paths) != 1:
        raise Exception("Expected a single aspectJ template")
    aspect_file_path = aspect_file_paths[0]
    aspectContent = tools.readFile(aspect_file_path)
    aspectContent = aspectContent.replace("// add your own imports.",
                                          tools.readFile("import.txt"))
    aspectContent = aspectContent.replace("// Implement your code here.",
                                          tools.readFile("ajcode.txt"))
    aspect_file_name = os.path.basename(aspect_file_path)
    tools.writeFile(os.path.join(aspectj_dir, aspect_file_name), aspectContent)
Beispiel #20
0
 def closeAndPrintFailure(self):
     assert not self.isRunning()
     self.close()
     tools.progress("Test %s failed!" % self.__name)
     print('')
     tools.progress("Stdout:")
     print(''.join(self.__stdout))
     print('')
     tools.progress("Stderr:")
     print(''.join(self.__stderr))
     print('')
     print('')
Beispiel #21
0
def train_epoch(_epoch, dataloader, model, loss_function):
    # switch to train mode -> enable regularization layers, such as Dropout
    model.train()
    running_loss = 0.0

    for i_batch, sample_batched in enumerate(dataloader, 1):

        # get the inputs (batch)
        inputs, topics, labels, lengths, topic_lengths, weights, indices = sample_batched

        # sort batch (for handling inputs of variable length)
        lengths, (inputs, labels, topics) = sort_batch(lengths, (inputs, labels, topics))

        # convert to CUDA Variables
        if torch.cuda.is_available():
            inputs = Variable(inputs.cuda())
            topics = Variable(topics.cuda())
            labels = Variable(labels.cuda())
            lengths = Variable(lengths.cuda())
            topic_lengths = Variable(topic_lengths.cuda())
        else:
            inputs = Variable(inputs)
            topics = Variable(topics)
            labels = Variable(labels)
            topic_lengths = Variable(topic_lengths)

        # 1 - zero the gradients
        optimizer.zero_grad()

        # 2 - forward pass: compute predicted y by passing x to the model
        outputs = model(inputs, topics, lengths, topic_lengths)

        # 3 - compute loss
        loss = loss_function(outputs, labels)

        # 4 - backward pass: compute gradient wrt model parameters
        loss.backward()

        # 5 - update weights
        optimizer.step()

        running_loss += loss.data[0]

        # print statistics
        progress(loss=loss.data[0],
                 epoch=_epoch,
                 batch=i_batch,
                 batch_size=BATCH_SIZE,
                 dataset_size=len(loader_train.sampler.indices))

        # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
        torch.nn.utils.clip_grad_norm(model.parameters(), clip)
        try:
            for p in model.parameters():
                if p.grad is not None:
                    p.data.add_(-lr, p.grad.data)

        except ValueError:
            print("'NoneType' object has no attribute 'data'")

    return running_loss / i_batch
Beispiel #22
0
def runTwoPhaseCommit():
    tools.progress("Running the twophasecommit tests...")
    tools.runNoError(
        ["mvn", "clean", "test", "-Dtest=twophasecommit.TwoPhaseCommitTest"])
    tools.runNoError(["mvn", "clean"])
Beispiel #23
0
    def fit_network(self,
                    e_b,
                    e_n,
                    distance_toremove_edge,
                    l,
                    a,
                    d,
                    passes=1,
                    plot_evolution=False):
        # logging variables
        accumulated_local_error = []
        global_error = []
        network_order = []
        network_size = []
        total_units = []
        self.units_created = 0
        # 0. start with two units a and b at random position w_a and w_b
        w_a = [np.random.uniform(-2, 2) for _ in range(np.shape(self.data)[1])]
        w_b = [np.random.uniform(-2, 2) for _ in range(np.shape(self.data)[1])]

        self.network = nx.Graph()
        self.network.add_node(self.units_created, vector=w_a, error=0)
        self.units_created += 1
        self.network.add_node(self.units_created, vector=w_b, error=0)
        self.units_created += 1

        # 1. iterate through the data
        sequence = 0
        for p in range(passes):
            tools.progress(p, passes)
            np.random.shuffle(self.data)
            steps = 0
            for observation in self.data:
                # 2. find the nearest unit s_1 and the second nearest unit s_2
                nearest_units = self.find_nearest_units(observation)
                s_1 = nearest_units[0]
                s_2 = nearest_units[1]
                # 3. increment the age of all edges emanating from s_1
                for u, v in self.network.edges(nbunch=[s_1]):
                    attributes = self.network[u][v]
                    self.network.add_edge(u, v, age=attributes['age'] + 1)
                # 4. add the squared distance between the observation and the nearest unit in input space
                self.network.node[s_1]['error'] += spatial.distance.euclidean(
                    observation, self.network.node[s_1]['vector'])**2
                # 5 .move s_1 and its direct topological neighbors towards the observation by the fractions
                #    e_b and e_n, respectively, of the total distance
                update_w_s_1 = e_b * (np.subtract(
                    observation, self.network.node[s_1]['vector']))
                self.network.node[s_1]['vector'] = np.add(
                    self.network.node[s_1]['vector'], update_w_s_1)
                update_w_s_n = e_n * (np.subtract(
                    observation, self.network.node[s_1]['vector']))
                for neighbor in self.network.neighbors(s_1):
                    self.network.node[neighbor]['vector'] = np.add(
                        self.network.node[neighbor]['vector'], update_w_s_n)
                # 6. if s_1 and s_2 are connected by an edge, set the age of this edge to zero
                #    if such an edge doesn't exist, create it
                self.network.add_edge(s_1, s_2, age=0)
                # 7. remove edges with an age larger than a_max
                #    if this results in units having no emanating edges, remove them as well
                self.prune_connections(distance_toremove_edge)
                # 8. if the number of steps so far is an integer multiple of parameter l, insert a new unit
                steps += 1
                if steps % l == 0:
                    if plot_evolution and len(self.network.nodes) > 3:
                        self.plot_network2("pass" + str(sequence))
                    sequence += 1
                    # 8.a determine the unit q with the maximum accumulated error
                    q = 0
                    error_max = 0
                    for u in self.network.nodes():
                        if self.network.node[u]['error'] > error_max:
                            error_max = self.network.node[u]['error']
                            q = u
                    # 8.b insert a new unit r halfway between q and its neighbor f with the largest error variable
                    f = -1
                    largest_error = -1
                    for u in self.network.neighbors(q):
                        if self.network.node[u]['error'] > largest_error:
                            largest_error = self.network.node[u]['error']
                            f = u
                    w_r = 0.5 * (np.add(self.network.node[q]['vector'],
                                        self.network.node[f]['vector']))
                    r = self.units_created
                    self.units_created += 1
                    # 8.c insert edges connecting the new unit r with q and f
                    #     remove the original edge between q and f
                    self.network.add_node(r, vector=w_r, error=0)
                    self.network.add_edge(r, q, age=0)
                    self.network.add_edge(r, f, age=0)
                    self.network.remove_edge(q, f)
                    # 8.d decrease the error variables of q and f by multiplying them with a
                    #     initialize the error variable of r with the new value of the error variable of q
                    self.network.node[q]['error'] *= a
                    self.network.node[f]['error'] *= a
                    self.network.node[r]['error'] = self.network.node[q][
                        'error']
                # 9. decrease all error variables by multiplying them with a constant d
                error = 0
                for u in self.network.nodes:
                    error += self.network.node[u]['error']
                accumulated_local_error.append(error)
                network_order.append(self.network.order())
                network_size.append(self.network.size())
                total_units.append(self.units_created)
                for u in self.network.nodes():
                    self.network.node[u]['error'] *= d
                    if self.network.degree(nbunch=[u]) == 0:
                        print(u)
            global_error.append(self.compute_global_error())
        plt.clf()
        plt.title('Accumulated local error')
        plt.xlabel('iterations')
        plt.plot(range(len(accumulated_local_error)), accumulated_local_error)
        plt.savefig('visualization/accumulated_local_error.png')
        plt.clf()
        plt.title('Global error')
        plt.xlabel('passes')
        plt.plot(range(len(global_error)), global_error)
        plt.savefig('visualization/global_error.png')
        plt.clf()
        plt.title('Neural network properties')
        plt.plot(range(len(network_order)),
                 network_order,
                 label='Network order')
        plt.plot(range(len(network_size)), network_size, label='Network size')
        plt.legend()
        plt.savefig('visualization/network_properties.png')
Beispiel #24
0
def runGetConsistency():
    tools.progress("Running the getconsistency tests...")
    tools.runNoError(["mvn", "clean", "test", "-Dtest=TwoPCTest"])
    tools.runNoError(["mvn", "clean", "test", "-Dtest=TwoPCPhase1FailureTest"])
    tools.runNoError(["mvn", "clean"])
Beispiel #25
0
def main():
    """Main"""
    print("============ Prep ===========")

    print("Reading Directories")

    a = pathlib.Path(set_a)
    b = pathlib.Path(set_b)
    i = pathlib.Path(images)

    a_infos = list(a.glob(f"*.txt"))
    b_infos = list(b.glob(f"*.txt"))
    all_imgs = list(i.glob(f"*.png"))

    print("Finding Intersection...")

    a_stems = [path.stem for path in a_infos] if not ignore_visit else [
        path.stem.split("_")[0] for path in a_infos
    ]
    b_stems = [path.stem for path in b_infos] if not ignore_visit else [
        path.stem.split("_")[0] for path in b_infos
    ]
    im_stems = [path.stem for path in all_imgs] if not ignore_visit else [
        path.stem.split("_")[0] for path in b_infos
    ]

    if set_eq:
        e = pathlib.Path(set_eq)
        e_infos = list(e.glob(f"*.txt"))
        e_stems = [path.stem for path in e_infos] if not ignore_visit else [
            path.stem.split("_")[0] for path in e_infos
        ]
        intersection = (((set(a_stems) & set(b_stems)) & set(im_stems))
                        & set(e_stems))
    else:
        intersection = (set(a_stems) & set(b_stems)) & set(im_stems)

    a_infos = [
        inf for inf in a_infos
        if (inf.stem if not ignore_visit else inf.stem.split("_")[0]
            ) in intersection
    ]
    b_infos = [
        inf for inf in b_infos
        if (inf.stem if not ignore_visit else inf.stem.split("_")[0]
            ) in intersection
    ]
    all_imgs = [
        im for im in all_imgs
        if (im.stem if not ignore_visit else im.stem.split("_")[0]
            ) in intersection
    ]

    print("Building Scan Objects...")

    pairs = []

    for a, b, im in zip(a_infos, b_infos, all_imgs):
        a_scan = Scan.from_files(im, a)
        b_scan = Scan.from_files(im, b)
        pairs.append((a_scan, b_scan))

    print(f"{len(pairs)} Scans will be considered.")

    if euclidean:

        print("====== Euclidean Distance ======")

        print("Finding Distances Between Scans...")

        e_dists = []

        for idx, pair in enumerate(pairs):
            progress(idx, len(pairs))
            dist = pair[0].euclidean_distance(pair[1])
            e_dists.append(dist)

        print("Statistics...")

        e_dists = np.array(list(zip(e_dists, pairs)), dtype=object)
        srt = e_dists[e_dists[:, 0].argsort()]

        print(f"Top {top} Farthest:")

        for idx, p in enumerate(srt[-top:][::-1]):
            dist = p[0]
            a = p[1][0]
            b = p[1][1]
            print(
                f"- {dist: 0.2f} : {a.patient} ({a.info_path} <> {b.info_path})"
            )

        print(f"Average: {np.average(srt[:, 0])}")
        print(f"Min: {np.min(srt[:, 0])}")
        print(f"Max: {np.max(srt[:, 0])}")
        print(f"Median: {np.median(srt[:, 0])}")

    if dice:
        print("======== Sørensen–Dice =========")

        print("Finding Distances Between Scans...")

        if mp:
            with Pool(cpu_count() - 1) as pool:
                results = pool.starmap(_mp_work, pairs)
            d_dists = [r[0] for r in results]
            joint_threshold_count = [r[1] for r in results]
        else:
            d_dists = []
            joint_threshold_count = []
            for idx, pair in enumerate(pairs):
                progress(idx, len(pairs))
                dist, tpr_count = pair[0].dice_similarity(
                    pair[1], count_over=tpr_threshold)
                d_dists.append(dist)
                joint_threshold_count.append(tpr_count)

        print("Statistics...")

        d_dists = np.array(list(zip(d_dists, pairs)), dtype=object)
        srt = d_dists[d_dists[:, 0].argsort()]

        joint_threshold_sum = np.sum(np.array(joint_threshold_count))
        joint_tpr = joint_threshold_sum / (len(pairs) * 12
                                           )  # 12 joints per hand
        print(f"Joint TPR: {joint_tpr}")

        hand_threshold_sum = np.count_nonzero(
            np.array(joint_threshold_count) == 12)  # All joints over threshold
        hand_tpr = hand_threshold_sum / len(pairs)
        print(f"Hand TPR: {hand_tpr}")

        print()

        print(f"Top {top} Dissimilar:")

        for idx, p in enumerate(srt[:top]):
            dist = p[0]
            a = p[1][0]
            b = p[1][1]
            print(
                f"- {dist: 0.2f} : {a.patient} ({a.info_path} <> {b.info_path})"
            )

        print(f"Top {top} Similar:")

        for idx, p in enumerate(srt[-top:]):
            dist = p[0]
            a = p[1][0]
            b = p[1][1]
            print(
                f"- {dist: 0.2f} : {a.patient} ({a.info_path} <> {b.info_path})"
            )

        print(f"Average: {np.average(srt[:, 0])}")
        print(f"Min: {np.min(srt[:, 0])}")
        print(f"Max: {np.max(srt[:, 0])}")
        print(f"Median: {np.median(srt[:, 0])}")

    if meta:
        print("====== Meta Statistics =========")
        e_list = list(e_dists[:, 0])
        d_list = list(d_dists[:, 0])

        print(
            f"Coef. Corr. Dice & Euclidean: {np.corrcoef(e_list, d_list)[0][1]}"
        )

    if joint_stats:
        print("====== Joint < TPR Threshold Statistics ======")

        joint_pairs = []
        for pair in pairs:
            size = pair[0].image.size
            for a, b in zip(pair[0].joints, pair[1].joints):
                joint_pairs.append((a, b, size))

        if mp:
            with Pool(cpu_count() - 1) as pool:
                results = pool.starmap(_joint_mp_work, joint_pairs)
            d_j_dists = [r[0] for r in results]
            d_j_labels = [r[1] for r in results]
        else:
            d_j_dists = []
            d_j_labels = []
            for idx, pair in enumerate(joint_pairs):
                progress(idx, len(joint_pairs))
                dist, name = pair[0].dice_similarity(pair[1],
                                                     pair[2],
                                                     return_name=True)
                d_j_dists.append(dist)
                d_j_labels.append(name)

        miss_counts = {}
        for dist, label in zip(d_j_dists, d_j_labels):
            if dist < tpr_threshold:
                if label not in miss_counts.keys():
                    miss_counts[label] = 1
                else:
                    miss_counts[label] += 1
        print("Miss Counts Below TPR Threshold: ")
        print(str(miss_counts))

    if save:
        print("Saving arrays...")
        with open('euclidean.npy', 'wb') as f:
            np.save(f, e_dists)
        with open('dice.npy', 'wb') as f:
            np.save(f, d_dists)
Beispiel #26
0
def runBtm():
    tools.progress("Running the btm tests...")
    tools.runNoError(["mvn", "clean", "test", "-Dtest=TwoPCTest"])
    tools.runNoError(["mvn", "clean", "test", "-Dtest=TwoPCPhase1FailureTest"])
    tools.runNoError(["mvn", "clean"])
Beispiel #27
0
def setupTests(test_dir, root_dir, rvmonitor_bin_dir, framework_dir, setup_dir,
               test_name):
    """
    Full setup for a RVM unit-test.

    Args:
        test_dir (str): Input directory containing the test.
            It should contain a P spec and one or more Java test
            files using that spec.

        root_dir (str): The root directory for the P compiler
            repository.

        rvmonitor_bin_dir (str): Directory containing the rv-monitor
            binary.

        framework_dir (str): Directory containing the test framework files.

        setup_dir (str): Output directory that will contain the test
            setup, ready for testing with Maven.

        test_name (str): The name of the test, used for user-friendly
            messages.

    Raises:
        SubprocessError if one of the tools used (the P compiler and
            rv-monitor) returns an error code.
    """
    tools.progress("Setup for test %s..." % test_name)
    if not os.path.exists(setup_dir):
        os.makedirs(setup_dir)

    shutil.copy(os.path.join(framework_dir, "pom.xml"), setup_dir)

    src_setup_dir = os.path.join(setup_dir, "src")
    if os.path.exists(src_setup_dir):
        shutil.rmtree(src_setup_dir)
    shutil.copytree(os.path.join(framework_dir, "src"), src_setup_dir)

    test_setup_dir = os.path.join(src_setup_dir, "test", "java", "unittest")
    if not os.path.exists(test_setup_dir):
        os.makedirs(test_setup_dir)
    for f in glob.glob(os.path.join(test_dir, "*.java")):
        shutil.copy(f, test_setup_dir)

    shutil.copy(os.path.join(test_dir, "spec.p"), setup_dir)

    monitor_setup_dir = os.path.join(setup_dir, "monitor")
    if not os.path.exists(monitor_setup_dir):
        os.makedirs(monitor_setup_dir)
    for f in glob.glob(os.path.join(framework_dir, "monitor", "*.txt")):
        shutil.copy(f, monitor_setup_dir)

    gen_monitor_setup_dir = os.path.join(monitor_setup_dir, "generated")
    if not os.path.exists(gen_monitor_setup_dir):
        os.makedirs(gen_monitor_setup_dir)
    translate(test_dir, root_dir, gen_monitor_setup_dir)

    mop_setup_dir = os.path.join(src_setup_dir, "main", "java", "mop")
    aspectj_setup_dir = os.path.join(mop_setup_dir, "aspectJ")
    if not os.path.exists(aspectj_setup_dir):
        os.makedirs(aspectj_setup_dir)
    fillAspect(aspectj_setup_dir, monitor_setup_dir, gen_monitor_setup_dir)

    createRvm(rvmonitor_bin_dir, gen_monitor_setup_dir, mop_setup_dir)

    for f in glob.glob(os.path.join(gen_monitor_setup_dir, "*.java")):
        shutil.copy(f, mop_setup_dir)
Beispiel #28
0
    def initialize(self):
        f = open(self.ret_file, 'r')
        data_q = json.load(f)
        abstracts_needed = set()
        for i in range(len(data_q["questions"])):
            abstracts_needed = abstracts_needed | set(
                data_q["questions"][i]["retrieved"])
        f.close()

        print "Collecting Abstracts.."
        f = open(self.corpus_file, 'r')
        corpus = ijson.items(f, 'articles.item')
        for article in corpus:
            pmid = article["pmid"]
            if pmid in abstracts_needed:
                self.corpus_index[
                    pmid] = article["title"] + ' ' + article["abstractText"]
                abstracts_needed.remove(pmid)
                if not abstracts_needed:
                    break
        f.close()

        print len(self.corpus_index)
        q_array_q = []
        q_array_d = []
        q_array_max = []
        print "Reranking.."
        n_questions = len(data_q["questions"])
        for i in range(n_questions):
            #print i
            progress(i + 1, n_questions, 'questions')
            q_id = data_q["questions"][i]["id"]
            q_body = data_q["questions"][i]["body"]
            q_retrieved = data_q["questions"][i]["retrieved"]

            retr_array_q, retr_array_d, retr_array_max = self.rerank(
                q_body, q_retrieved)

            q_array_q.append(
                Question(q_body, q_id, [x[0] for x in retr_array_q],
                         [x[1] for x in retr_array_q]))
            q_array_d.append(
                Question(q_body, q_id, [x[0] for x in retr_array_d],
                         [x[1] for x in retr_array_d]))
            q_array_max.append(
                Question(q_body, q_id, [x[0] for x in retr_array_max],
                         [x[1] for x in retr_array_max]))

        with open('.'.join(self.ret_file.split('.')[:-1]) + '_RWMD_Q.json',
                  'w+') as outfile:
            outfile.write(
                json.dumps({"questions": [ob.__dict__ for ob in q_array_q]},
                           indent=2))

        with open('.'.join(self.ret_file.split('.')[:-1]) + '_RWMD_D.json',
                  'w+') as outfile:
            outfile.write(
                json.dumps({"questions": [ob.__dict__ for ob in q_array_d]},
                           indent=2))

        with open('.'.join(self.ret_file.split('.')[:-1]) + '_RWMD_MAX.json',
                  'w+') as outfile:
            outfile.write(
                json.dumps({"questions": [ob.__dict__ for ob in q_array_max]},
                           indent=2))