Ejemplo n.º 1
0
    def testMemoizedInferenceResults(self):
        inferrer = inference.Inferrer(test_util.savedmodel_path(),
                                      memoize_inference_results=True)
        activations = inferrer._get_activations_for_batch(('ADE', ))
        memoized_activations = inferrer._get_activations_for_batch(('ADE', ))

        self.assertIs(activations, memoized_activations)
Ejemplo n.º 2
0
 def testStringInput(self):
     inferrer = inference.Inferrer(test_util.savedmodel_path())
     # Simulate failure to use a list.
     with self.assertRaisesRegex(
             ValueError, '`list_of_seqs` should be convertible to a '
             'numpy vector of strings. Got *'):
         inferrer.get_activations('QP')
Ejemplo n.º 3
0
    def testBatchedInference(self):
        inferrer = inference.Inferrer(self.saved_model_path, batch_size=5)

        input_seq = 'AP'
        for total_size in range(15):
            full_list = [input_seq] * total_size
            activations = inferrer.get_activations(full_list)
            self.assertLen(full_list, activations.shape[0])
Ejemplo n.º 4
0
    def testSortUnsortInference(self):
        inferrer = inference.Inferrer(test_util.savedmodel_path(),
                                      batch_size=1)

        input_seqs = ['AP', 'APP', 'AP']
        # Sorting will move long sequence to the end.
        activations = inferrer.get_activations(input_seqs)
        # Make sure it gets moved back to the middle.
        self.assertAllClose(activations[0], activations[2])
        self.assertNotAllClose(activations[0], activations[1])
Ejemplo n.º 5
0
def run_fitting(folder):

    samples = []
    tendencies = [5]  #1, 3, 5, 10, 30, 50, 100
    for tendency in tendencies:
        for trans in [99]:
            for prob in [75]:
                for train in [100]:
                    print(tendency, trans)

                    run_name = "h" + str(tendency) + "_t" + str(
                        trans) + "_p" + str(prob) + "_train" + str(
                            train) + ".json"
                    fname = os.path.join(folder, run_name)

                    jsonpickle_numpy.register_handlers()

                    with open(fname, 'r') as infile:
                        data = json.load(infile)

                    worlds_old = pickle.decode(data)

                    test_trials = list(range(0, 50)) + list(range(train, 150))

                    inferrer = infer.Inferrer(worlds_old[:20],
                                              0.01,
                                              1.,
                                              test_trials=test_trials)
                    # print(1./inferrer.sample_space)
                    # print(inferrer.likelihood.mean(axis=0))
                    # plt.figure()
                    # plt.plot(inferrer.likelihood.mean(axis=0), '.')
                    # plt.show()

                    inferrer.run_single_inference(ndraws=15000,
                                                  nburn=5000,
                                                  cores=4)
                    samples.append(inferrer.samples)

                    fname = os.path.join(folder,
                                         run_name[:-5] + "_samples.json")

                    jsonpickle_numpy.register_handlers()
                    pickled = pickle.encode(
                        [samples[-1], inferrer.sample_space])
                    with open(fname, 'w') as outfile:
                        json.dump(pickled, outfile)

                    pickled = 0

                    gc.collect()
Ejemplo n.º 6
0
 def testGetVariable(self):
     inferrer = inference.Inferrer(self.saved_model_path)
     output = inferrer.get_variable('conv1d/bias:0')
     self.assertNotEmpty(output)
Ejemplo n.º 7
0
 def testStringInput(self):
     inferrer = inference.Inferrer(self.saved_model_path)
     # Simulate failure to use a list.
     with self.assertRaisesRegexp(ValueError, 'must be a list of strings'):
         inferrer.get_activations('QP')
Ejemplo n.º 8
0
def load_models(model_cache_path, num_ensemble_elements):
    """Load models from cache path into inferrerLists.

  Args:
    model_cache_path: path that contains downloaded SavedModels and associated
      metadata. Same path that was used when installing the models via
      install_models.
    num_ensemble_elements: number of ensemble elements of each type to load.

  Returns:
    (list_of_pfam_inferrers, list_of_ec_inferrers, list_of_go_inferrers)

  Raises:
    ValueError if the models were not found. The exception message describes
    that install_models.py needs to be rerun.
  """
    try:
        pfam_inferrer_paths = _get_inferrer_paths(
            utils.OSS_PFAM_ZIPPED_MODELS_URLS, model_cache_path)
        ec_inferrer_paths = _get_inferrer_paths(
            utils.OSS_EC_ZIPPED_MODELS_URLS, model_cache_path)
        go_inferrer_paths = _get_inferrer_paths(
            utils.OSS_GO_ZIPPED_MODELS_URLS, model_cache_path)

        to_return = []
        inferrer_list_paths_for_all_models = [
            pfam_inferrer_paths, ec_inferrer_paths, go_inferrer_paths
        ]
        pbar = tqdm.tqdm(desc='Loading models',
                         position=0,
                         total=len(inferrer_list_paths_for_all_models) *
                         num_ensemble_elements,
                         leave=True,
                         dynamic_ncols=True)
        for inferrer_list_paths in inferrer_list_paths_for_all_models:
            inner_itr = inferrer_list_paths[:num_ensemble_elements]
            inferrer_list = []
            for p in inner_itr:
                inferrer_list.append(inference.Inferrer(p, use_tqdm=True))
                pbar.update()
            to_return.append(inferrer_list)

        pfam_inferrers = to_return[0]
        ec_inferrers = to_return[1]
        go_inferrers = to_return[2]

        return pfam_inferrers, ec_inferrers, go_inferrers

    except tf.errors.NotFoundError as exc:
        err_msg = 'Unable to find cached models in {}.'.format(
            model_cache_path)
        if num_ensemble_elements > 1:
            err_msg += (
                ' Make sure you have installed the entire ensemble of models by '
                'running\n    install_models.py --install_ensemble '
                '--model_cache_path={}'.format(model_cache_path))
        else:
            err_msg += (
                ' Make sure you have installed the models by running\n    '
                'install_models.py --model_cache_path={}'.format(
                    model_cache_path))
        err_msg += '\nThen try rerunning this script.'

        raise ValueError(err_msg, exc)