Example #1
1
def pycloud_unpickle(file_name):
    # type: (Text) -> Any
    """Unpickle an object from file using cloudpickle."""
    from future.utils import PY2
    import cloudpickle

    with io.open(file_name, 'rb') as f:  # pragma: no test
        if PY2:
            return cloudpickle.load(f)
        else:
            return cloudpickle.load(f, encoding="latin-1")
Example #2
0
    def load(cls, model_dir, ngram_featurizer):
        # type: (Text, Text) -> NGramFeaturizer
        import cloudpickle

        if model_dir and ngram_featurizer:
            classifier_file = os.path.join(model_dir, ngram_featurizer)
            with io.open(classifier_file, 'rb') as f:  # pramga: no cover
                if PY3:
                    return cloudpickle.load(f, encoding="latin-1")
                else:
                    return cloudpickle.load(f)
        else:
            return NGramFeaturizer()
    def load(cls, model_dir=None, model_metadata=None, cached_component=None, **kwargs):
        # type: (Text, Metadata, Optional[Component], **Any) -> SklearnIntentClassifier
        import cloudpickle

        if model_dir and model_metadata.get("intent_classifier_sklearn"):
            classifier_file = os.path.join(model_dir, model_metadata.get("intent_classifier_sklearn"))
            with io.open(classifier_file, 'rb') as f:  # pragma: no test
                if PY3:
                    return cloudpickle.load(f, encoding="latin-1")
                else:
                    return cloudpickle.load(f)
        else:
            return SklearnIntentClassifier()
    def load(cls, model_dir, intent_classifier_sklearn):
        # type: (Text, Text) -> SklearnIntentClassifier
        import cloudpickle

        if model_dir and intent_classifier_sklearn:
            classifier_file = os.path.join(model_dir, intent_classifier_sklearn)
            with io.open(classifier_file, 'rb') as f:   # pragma: no test
                if PY3:
                    return cloudpickle.load(f, encoding="latin-1")
                else:
                    return cloudpickle.load(f)
        else:
            return SklearnIntentClassifier()
Example #5
0
    def load(cls, model_dir=None, model_metadata=None, cached_component=None, **kwargs):
        # type: (Text, Metadata, Optional[Component], **Any) -> NGramFeaturizer
        import cloudpickle

        if model_dir and model_metadata.get("ngram_featurizer"):
            classifier_file = os.path.join(model_dir, model_metadata.get("ngram_featurizer"))
            with io.open(classifier_file, 'rb') as f:   # pramga: no cover
                if PY3:
                    return cloudpickle.load(f, encoding="latin-1")
                else:
                    return cloudpickle.load(f)
        else:
            return NGramFeaturizer()
def postprocess(input, output):
    vocab = pickle.load(open('vocab.pkl', 'rb'))
    entity_vocab = pickle.load(open('entity_vocab.pkl', 'rb'))
    entity_inv = {v:k for k,v in entity_vocab.items()}
    vocab_inv = {v:k for k,v in vocab.items()}

    # this has the shape (timesteps, batches, data), so swap axes to (batches, timesteps, data)
    character_probs = numpy.swapaxes(output, 0, 1)
    # now extract the guessed entities
    predictions = numpy.argmax(output, axis=2)

    # find contiguous entity characters across timesteps
    non_entity_label = entity_vocab.get('O')
    entities = []
    for i, query in enumerate(predictions):
        previous_label = non_entity_label
        entity_string = ""
        used_indices = set()
        for j, label in enumerate(query):
            # find entity start point (expand to space character) and extract the continuous entity
            if label != non_entity_label and label != previous_label and j not in used_indices:
                entity_start = j
                while vocab_inv.get(
                        numpy.argmax(input[i, entity_start])) not in string.whitespace and entity_start >= 0:
                    entity_start -= 1
                # move start point forward one to get out of whitespace or back to 0 index
                entity_start += 1
                # now from the start point, extract continuous until whitespace or punctuation
                entity_idx = entity_start
                while entity_idx < len(query) and \
                        (
                            query[entity_idx] == label or
                            entity_idx == entity_start or
                            (
                                entity_idx > entity_start and
                                vocab_inv.get(numpy.argmax(input[i, entity_idx])) not in string.whitespace + string.punctuation and
                                vocab_inv.get(numpy.argmax(input[i, entity_idx - 1])) not in string.whitespace + string.punctuation
                            )
                        ):
                    entity_string += vocab_inv.get(numpy.argmax(input[i, entity_idx]))
                    used_indices.add(entity_idx)
                    entity_idx += 1
                # get rid of trailing matched punctuation
                if entity_string[-1] in string.punctuation:
                    entity_string = entity_string[:-1]
                # add the entity stripped of whitespace in beginning and end, and reset the string
                entities.append((entity_string.strip(), entity_inv.get(label)))
                entity_string = ""

            previous_label = label
    return entities
Example #7
0
    def _calc_Tx(self, name, x, lambdify=True):
        """ Uses Sympy to transform x from the reference frame of a joint
        or link to the origin (world) coordinates.

        name string: name of the joint or link, or end-effector
        x list: the [x,y,z] position of interest in "name"'s reference frame
        lambdify boolean: if True returns a function to calculate
                          the transform. If False returns the Sympy
                          matrix
        """

        # check to see if we have our transformation saved in file
        if (os.path.isfile('%s/%s.T' % (self.config_folder, name))):
            Tx = cloudpickle.load(open('%s/%s.T' %
                                       (self.config_folder, name), 'rb'))
        else:
            T = self._calc_T(name=name)
            # transform x into world coordinates
            Tx = T * sp.Matrix(self.x + [1])

            # save to file
            cloudpickle.dump(Tx, open('%s/%s.T' %
                                      (self.config_folder, name), 'wb'))

        if lambdify is False:
            return Tx
        return sp.lambdify(self.q + self.x, Tx)
Example #8
0
    def _calc_T_inv(self, name, x, lambdify=True):
        """ Return the inverse transform matrix, which converts from
        world coordinates into the robot's end-effector reference frame

        name string: name of the joint or link, or end-effector
        x list: the [x,y,z] position of interest in "name"'s reference frame
        lambdify boolean: if True returns a function to calculate
                          the transform. If False returns the Sympy
                          matrix
        """

        # check to see if we have our transformation saved in file
        if (os.path.isfile('%s/%s.T_inv' % (self.config_folder,
                                                name))):
            T_inv = cloudpickle.load(open('%s/%s.T_inv' %
                                          (self.config_folder, name), 'rb'))
        else:
            T = self._calc_T(name=name)
            rotation_inv = T[:3, :3].T
            translation_inv = -rotation_inv * T[:3, 3]
            T_inv = rotation_inv.row_join(translation_inv).col_join(
                sp.Matrix([[0, 0, 0, 1]]))

            # save to file
            cloudpickle.dump(T_inv, open('%s/%s.T_inv' %
                                         (self.config_folder, name), 'wb'))

        if lambdify is False:
            return T_inv
        return sp.lambdify(self.q + self.x, T_inv)
def create_model(init_config_file=None, vocab={}, label_vocab={}):
    # load from a configuration file, or define the model configuration
    if init_config_file is not None:
        with open(init_config_file, 'rb') as f:
            init_config = pickle.load(f)
    else:
        init_config = {
            'input_size': len(vocab),
            'hidden_size': 128,
            'output_size': len(label_vocab),
            'hidden_activation': 'tanh',
            'inner_hidden_activation': 'sigmoid',
            'activation': 'softmax',
            'weights_init': 'uniform',
            'weights_interval': 'montreal',
            'r_weights_init': 'orthogonal',
            'clip_recurrent_grads': 5.,
            'noise': 'dropout',
            'noise_level': 0.5,
            'direction': 'bidirectional',
            'cost_function': 'nll',
            'cost_args': {'one_hot': True}
        }

    # instantiate the model!
    lstm = LSTM(**init_config)

    return lstm
Example #10
0
File: rnn.py Project: Pinafore/qb
    def load(cls, directory: str):
        with open(os.path.join(directory, 'rnn.pkl'), 'rb') as f:
            params = cloudpickle.load(f)

        guesser = RnnGuesser(params['config_num'])
        guesser.page_field = params['page_field']
        guesser.qanta_id_field = params['qanta_id_field']

        guesser.text_field = params['text_field']

        guesser.n_classes = params['n_classes']
        guesser.gradient_clip = params['gradient_clip']
        guesser.n_hidden_units = params['n_hidden_units']
        guesser.n_hidden_layers = params['n_hidden_layers']
        guesser.nn_dropout = params['nn_dropout']
        guesser.use_wiki = params['use_wiki']
        guesser.n_wiki_sentences = params['n_wiki_sentences']
        guesser.wiki_title_replace_token = params['wiki_title_replace_token']
        guesser.lowercase = params['lowercase']
        guesser.random_seed = params['random_seed']
        guesser.model = RnnModel(
            guesser.n_classes,
            text_field=guesser.text_field,
            init_embeddings=False, emb_dim=300,
            n_hidden_layers=guesser.n_hidden_layers,
            n_hidden_units=guesser.n_hidden_units
        )
        guesser.model.load_state_dict(torch.load(
            os.path.join(directory, 'rnn.pt'), map_location=lambda storage, loc: storage
        ))
        guesser.model.eval()
        if CUDA:
            guesser.model = guesser.model.cuda()
        return guesser
 def test_load_namespace(self):
     obj = 1, 2, 3, 4
     bio = BytesIO()
     cloudpickle.dump(obj, bio)
     bio.seek(0)
     returned_obj = cloudpickle.load(bio)
     self.assertEqual(obj, returned_obj)
Example #12
0
    def _calc_Mq_g(self, lambdify=True):
        """ Uses Sympy to generate the force of gravity in
        joint space for the ur5

        lambdify boolean: if True returns a function to calculate
                          the Jacobian. If False returns the Sympy
                          matrix
        """

        # check to see if we have our gravity term saved in file
        if os.path.isfile('%s/Mq_g' % self.config_folder):
            Mq_g = cloudpickle.load(open('%s/Mq_g' % self.config_folder,
                                         'rb'))
        else:
            # get the Jacobians for each link's COM
            J = [self._calc_J('link%s' % ii, x=[0, 0, 0], lambdify=False)
                 for ii in range(self.num_links)]

            # transform each inertia matrix into joint space and
            # sum together the effects of arm segments' inertia on each motor
            Mq_g = sp.zeros(self.num_joints, 1)
            for ii in range(self.num_joints):
                Mq_g += J[ii].T * self._M[ii] * self.gravity
            Mq_g = sp.Matrix(Mq_g)

            # save to file
            cloudpickle.dump(Mq_g, open('%s/Mq_g' % self.config_folder,
                                        'wb'))

        if lambdify is False:
            return Mq_g
        return sp.lambdify(self.q + self.x, Mq_g)
Example #13
0
	def read(self, filepath):
		with open(filepath, 'rb') as fo:
			match = cloudpickle.load(fo)
			self.for_worker = match.for_worker
			self.gamename = match.gamename
			self.map_file = match.map_file
			self.uuid = match.uuid
			self.bots = match.bots
Example #14
0
 def get_state(self):
     if self.latest_only:
         try:
             with open(self.get_snapshot_path(0), "rb") as f:
                 return cloudpickle.load(f)
         except EOFError:
             pass
     else:
         snapshot_files = os.listdir(self.snapshots_folder)
         snapshot_files = sorted(
             snapshot_files, key=lambda x: int(x.split(".")[0]))[::-1]
         for file in snapshot_files:
             file_path = os.path.join(self.snapshots_folder, file)
             try:
                 with open(file_path, "rb") as f:
                     return cloudpickle.load(f)
             except EOFError:
                 pass
Example #15
0
def test_cloudpickle_to_file(EN):
    f = tempfile.NamedTemporaryFile(delete=False)
    p = cloudpickle.CloudPickler(f)
    p.dump(EN)
    f.close()
    loaded_en = cloudpickle.load(open(f.name))
    os.unlink(f.name)
    doc = loaded_en(unicode('test parse'))
    assert len(doc) == 2
def classifier(test_data):
    """
    :param test_data: data that needs prediction list or single parameter
    :return: predicted class rotten or fresh
    """
    # Load the pickle data
    model = pickle.load(open('movie_model.pkl','rb'))
    vectorizer = pickle.load(open('vectorizer.pkl','rb'))

    # Check for the type
    if type(test_data) != list:
        test_data = list(test_data)
    # Transform the test data
    transformed = vectorizer.transform(test_data).toarray()
    # Predict the class
    predicted = model.predict(transformed).tolist()

    return predicted
 def load(self, PATH):
     """
     load trained model froom given path
     :param PATH:
     :return:
     """
     try:
         with open(PATH, 'rb') as f:
             self.model = cloudpickle.load(f)
     except IOError:
         return False
 def wrapper(args):
     kwargs = {k: v for k, v in zip(search.keys(), args)}
     result = fun(**kwargs)
     try:
         with open('/tmp/results.pkl', 'r') as f: data = pickle.load(f)
     except (IOError, EOFError):
         data = {"kwargs": [], "results": []}
     with open('/tmp/results.pkl', 'w') as f:
         data["kwargs"].append(kwargs)
         data["results"].append(result)
         pickle.dump(data, f)
     return result
Example #19
0
 def _load_cache_from_file(self, file_name):
     try:
         with open(file_name, 'rb') as io:
             deterministic_cache = pickle.load(io)
         self._deterministic_cache = deterministic_cache.cache
         self._cache_valid_for_turns = deterministic_cache.turns
         self._logger.debug(
             'Loaded cache with %d entries' % len(self._deterministic_cache))
         return True
     except IOError:
         self._logger.debug('Cache file not found. Starting with empty cache')
         return False
    def load(self, models_dir):
        try:
            del self.model
            tf.keras.backend.clear_session()
            self.model = tf.keras.models.load_model(os.path.join(models_dir, "tf_intent_model.hd5"),compile=True)
            self.graph = tf.get_default_graph()
            print("Tf model loaded")
            with open(os.path.join(models_dir, "labels.pkl"), 'rb') as f:
                self.label_encoder = cloudpickle.load(f)
                print("Labels model loaded")

        except IOError:
            return False
Example #21
0
	def read(self, filepath):
		try:
			with open(filepath, 'rb') as fo:
				match = cloudpickle.load(fo)
				self.for_worker = match.for_worker
				self.gamename = match.gamename
				self.map_file = match.map_file
				self.uuid = match.uuid
				self.bots = match.bots
				return True
		except FileNotFoundError as e:
			# FIXME logging
			return False
Example #22
0
    def load(path):
        with open(path, "rb") as f:
            model_data, act_params = cloudpickle.load(f)
        act = deepq.build_act(**act_params)
        sess = tf.Session()
        sess.__enter__()
        with tempfile.TemporaryDirectory() as td:
            arc_path = os.path.join(td, "packed.zip")
            with open(arc_path, "wb") as f:
                f.write(model_data)

            zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
            load_state(os.path.join(td, "model"))

        return ActWrapper(act, act_params)
def string_to_data(query):
    vocab = pickle.load(open('vocab.pkl', 'rb'))
    # process the raw input data string
    data = []
    # get the integer encodings
    for data_char in query:
        data.append(vocab.get(data_char, 0))

    # convert the integers to one-hot arrays
    data = numpy_one_hot(numpy.asarray(data), n_classes=numpy.amax(vocab.values()) + 1)

    # make 3D for model input
    seq, dim = data.shape
    data = numpy.reshape(data, (1, seq, dim))

    return data
Example #24
0
File: elmo.py Project: Pinafore/qb
    def load(cls, directory: str):
        with open(os.path.join(directory, 'elmo.pkl'), 'rb') as f:
            params = cloudpickle.load(f)

        guesser = ElmoGuesser(params['config_num'])
        guesser.class_to_i = params['class_to_i']
        guesser.i_to_class = params['i_to_class']
        guesser.random_seed = params['random_seed']
        guesser.dropout = params['dropout']
        guesser.model = ElmoModel(len(guesser.i_to_class))
        guesser.model.load_state_dict(torch.load(
            os.path.join(directory, 'elmo.pt'), map_location=lambda storage, loc: storage
        ))
        guesser.model.eval()
        if CUDA:
            guesser.model = guesser.model.cuda()
        return guesser
def get_data(name, force=False, read=True):
    """ remember that the stuff is here
        d._rightmove_data__request_object = _GetDataFromUrl()
        d._rightmove_data__url

        And that weblinks go into df.url. That is what we need to recurse into.
    """
    url = urls[name]
    filename = get_hash_pickle_name(name, url)
    if os.path.exists(filename) and not force:
        print("found {}".format(filename))
    else:
        rightmove_object = rightmove_data(url)
        pickle.dump(rightmove_object, open(filename, 'wb'))
    if read:
        print("reading {}".format(filename))
        return pickle.load(open(filename, 'rb'))
Example #26
0
    def _calc_J(self, name, x, lambdify=True):
        """ Uses Sympy to generate the Jacobian for a joint or link

        name string: name of the joint or link, or end-effector
        lambdify boolean: if True returns a function to calculate
                          the Jacobian. If False returns the Sympy
                          matrix
        """

        # check to see if we have our Jacobian saved in file
        if os.path.isfile('%s/%s.J' % (self.config_folder, name)):
            J = cloudpickle.load(open('%s/%s.J' %
                                 (self.config_folder, name), 'rb'))
        else:
            Tx = self._calc_Tx(name, x=x, lambdify=False)
            J = []
            # calculate derivative of (x,y,z) wrt to each joint
            for ii in range(self.num_joints):
                J.append([])
                J[ii].append(Tx[0].diff(self.q[ii]))  # dx/dq[ii]
                J[ii].append(Tx[1].diff(self.q[ii]))  # dy/dq[ii]
                J[ii].append(Tx[2].diff(self.q[ii]))  # dz/dq[ii]

            end_point = name.strip('link').strip('joint')
            if end_point != 'EE':
                end_point = min(int(end_point) + 1, self.num_joints)
                # add on the orientation information up to the last joint
                for ii in range(end_point):
                    J[ii] = J[ii] + self.J_orientation[ii]
                # fill in the rest of the joints orientation info with 0
                for ii in range(end_point, self.num_joints):
                    J[ii] = J[ii] + [0, 0, 0]

            # save to file
            cloudpickle.dump(J, open('%s/%s.J' %
                                     (self.config_folder, name), 'wb'))

        J = sp.Matrix(J).T  # correct the orientation of J
        if lambdify is False:
            return J
        return sp.lambdify(self.q + self.x, J)
Example #27
0
 def load(self, PATH):
     try:
         with open(PATH, 'rb') as f:
             self.model = cloudpickle.load(f)
     except IOError:
         return False
Example #28
0
def main():

  args = argparser.parse_args()
  n_pts = int(args.points)

  print 'Loading data'
  with open(os.path.abspath(args.in_file), 'rb') as f:
    data = cloudpickle.load(f)

  print 'subtracting mean'
  time_arr = data['time_arr'] - np.mean(data['time_arr'])
  gyr_arr = data['gyr_arr']   - np.tile( np.mean(data['gyr_arr'],  axis=1).reshape((3,1)), (1,data['gyr_arr'].shape[1]) )
  acc_arr = data['acc_arr']   - np.tile( np.mean(data['acc_arr'],  axis=1).reshape((3,1)), (1,data['acc_arr'].shape[1]) )
  
  # M: number of axes
  # N: number of epochs
  if acc_arr.shape != gyr_arr.shape:
    raise Exception('different sizes')
  M, N = gyr_arr.shape

  # automate this?
  print 'Computing mean dt'
  t0 = np.mean(np.diff(time_arr))
  fs = np.float64(1.0)/t0

  n = np.power(2, np.arange(np.floor(np.log2(N/2.))))
  end_log_inc = np.log10(n[-1])
  m = shared_from_array( np.unique(np.ceil(np.logspace(0, end_log_inc, n_pts))).astype(np.int64) )
  T = m*t0

  if (T < 0).any():
    print 'T < 0'
    set_trace()

  # setup input/output shared memory arrays
  theta_gyr = shared_from_array( np.cumsum(gyr_arr, axis=1) )
  theta_acc = shared_from_array( np.cumsum(acc_arr, axis=1) )
  sigma2_gyr = shared_from_array( np.zeros((M, len(m))) )
  sigma2_acc = shared_from_array( np.zeros((M, len(m))) )

  # shared memory/serialization workaround: define calculation functions here so
  # that the shared memory arrays are in scope

  def adev_at_tau(i):
    """worker function for parallelization. first part of the Allan deviation 
    equation.
    There is potentially a way to do the Allan deviation calculation without any
    for loop whatsoever, but I haven't figured it out yet. It would require 2D
    array indexing in NumPy.
    """
    k = range(N - 2*m[i])
    sigma2_gyr[:,i] = np.sum( np.power( theta_gyr[:,k+2*m[i]] - 2*theta_gyr[:,k+m[i]] + theta_gyr[:,k] , 2 ), axis=1)
    sigma2_acc[:,i] = np.sum( np.power( theta_acc[:,k+2*m[i]] - 2*theta_acc[:,k+m[i]] + theta_acc[:,k] , 2 ), axis=1)


  def adev_at_tau_wrapper(idxs):
    if idxs[0] == 0:
      for i in trange(len(idxs)):
        adev_at_tau(idxs[i])
    else:
      for i in idxs:
        adev_at_tau(i)


  print 'creating procs'
  idx_chunks = chunk(range(len(m)), int(args.cores))
  procs = [multiprocessing.Process(target=adev_at_tau_wrapper, args=(ichnk,)) for ichnk in idx_chunks]
  print '# chunks: ', len(procs)
  for proc in procs:
    proc.start()
  for proc in procs:
    proc.join()

  div = np.tile(2*np.multiply(np.power(T,2), N-2*m), (M,1))
  sigma2_gyr = np.divide(sigma2_gyr, div)
  sigma2_acc = np.divide(sigma2_acc, div)
  sigma_gyr = np.sqrt(sigma2_gyr)
  sigma_acc = np.sqrt(sigma2_acc)

  data_dir, in_name = os.path.split(os.path.abspath(args.in_file))
  set_name, ext = in_name.split(os.extsep)
  out_file_name = os.path.join(data_dir, set_name+'_adev'+os.extsep+ext)
  print 'saving to: ', out_file_name
  with open(out_file_name, 'wb') as f:
    cloudpickle.dump(
      {
        'T': T,
        'sigma2_gyr': sigma2_gyr,
        'sigma2_acc': sigma2_acc,
        'sigma_gyr': sigma_gyr,
        'sigma_acc': sigma_acc,
      },
      f, -1
    )
Example #29
0
def load_pickle(filepath):
    filestream = open(filepath, "rb")

    obj = cloudpickle.load(filestream)
    return obj
Example #30
0
def load_custom(file_path: str):
    with open(file_path, "rb") as file:
        return cloudpickle.load(file)
Example #31
0
def arl_load(name: str):
    """Load an object from a pre-existing pickle
    """
    with open(name, 'rb') as f:
        return cloudpickle.load(f)
                        ncols=3,
                        sharex=True,
                        sharey=True,
                        figsize=(9, 9))
ax = axs.reshape(-1)
normalize = 0
fig.subplots_adjust(hspace=0)
fig.subplots_adjust(wspace=0)

for loop in range(0, 9):
    try:
        bin_centres, median, per_50, per_16, per_84, per_25, per_75 = np.loadtxt(
            './binned_data/SM_DTM_z' + str(loop) + '.txt',
            unpack=True,
            comments='#')
        ax[loop] = cloudpickle.load(
            open('./pkl_hists/SM_DTM_z' + str(loop) + '.pkl', 'rb'))
    except IOError:
        bin_centres, median, per_50, per_16, per_84, per_25, per_75, normalize = bin_data(
            'SM', 'DTM', ax, normalize, loop, 'SM_DTM', nbins=30)

    ax[loop].set_xlim([8., 11.98])
    ax[loop].set_ylim([-3.98, 2])

    plot_params(ax[loop], loop, 'SM', 'DTM')

    plot_observations(ax[loop], loop, "DTM_SM")
    #ax[loop].errorbar(x_bins,y_median,yerr=(y_mederr),color='k',label='L-Galaxies Median',linewidth=2)

    ax[loop].plot(bin_centres,
                  per_50,
                  c='k',
Example #33
0
def load_checkpoint(filename='checkpoint.pkl'):
    with open(filename, 'rb') as fi:
        return cloudpickle.load(fi)
Example #34
0
def cloudpickle_load(filename, **kwargs):
    with open(filename, 'rb') as f:
        return cloudpickle.load(f, **kwargs)
Example #35
0
def main(args):
    use_cuda = torch.cuda.is_available() and True
    device = torch.device("cuda" if use_cuda else "cpu")
    """ Slice module. """

    image = sitk.ReadImage(args.image_path)
    if args.mask_path is not None:
        mask = sitk.ReadImage(args.mask_path)
    else:
        mask = None
    """ Dummy image """
    label = sitk.Image(image.GetSize(), sitk.sitkInt8)
    label.SetOrigin(image.GetOrigin())
    label.SetDirection(image.GetDirection())
    label.SetSpacing(image.GetSpacing())
    """ Get the patch size from string."""
    matchobj = re.match("([0-9]+)-([0-9]+)-([0-9]+)", args.image_patch_size)
    if matchobj is None:
        print("[ERROR] Invalid patch size : {}.".fotmat(args.image_patch_size))
        sys.exit()

    image_patch_size = [int(s) for s in matchobj.groups()]
    """ Get the patch size from string."""
    matchobj = re.match("([0-9]+)-([0-9]+)-([0-9]+)", args.label_patch_size)
    if matchobj is None:
        print("[ERROR] Invalid patch size : {}.".fotmat(args.label_patch_size))
        sys.exit()

    label_patch_size = [int(s) for s in matchobj.groups()]
    """ Get the slide size from string."""
    if args.slide is not None:
        matchobj = re.match("([0-9]+)-([0-9]+)-([0-9]+)", args.slide)
        if matchobj is None:
            print("[ERROR] Invalid patch size : {}.".fotmat(args.slide))
            sys.exit()

        slide = [int(s) for s in matchobj.groups()]
    else:
        slide = None

    extractor = extor(image=image,
                      label=label,
                      mask=mask,
                      image_patch_size=image_patch_size,
                      label_patch_size=label_patch_size,
                      slide=slide,
                      phase="segmentation")

    extractor.execute()
    image_array_list, mask_array_list = extractor.output("Array")
    """ Load model. """

    with open(args.modelweightfile, 'rb') as f:
        model = cloudpickle.load(f)
        model = torch.nn.DataParallel(model, device_ids=args.gpuid)

    model.eval()
    """ Segmentation module. """

    segmented_array_list = []
    for image_array, mask_array in tqdm(zip(image_array_list, mask_array_list),
                                        desc="Segmenting images...",
                                        ncols=60):
        if args.mask_path is not None and (mask_array == 0).all():
            segmented_array_list.append(mask_array)
            continue

        image_array = image_array.transpose(2, 0, 1)
        image_array = torch.from_numpy(image_array[np.newaxis, np.newaxis,
                                                   ...]).to(device,
                                                            dtype=torch.float)

        segmented_array = model(image_array)
        segmented_array = segmented_array.to("cpu").detach().numpy().astype(
            np.float)
        segmented_array = np.squeeze(segmented_array)
        segmented_array = np.argmax(segmented_array, axis=0).astype(np.uint8)
        segmented_array = segmented_array.transpose(1, 2, 0)

        segmented_array_list.append(segmented_array)
    """ Restore module. """
    segmented = extractor.restore(segmented_array_list)

    createParentPath(args.save_path)
    print("Saving image to {}".format(args.save_path))
    sitk.WriteImage(segmented, args.save_path, True)
Example #36
0
def load_remote(folder: str):
    file_path = os.path.join(folder, DefaultRemoteFilename)
    with open(file_path, "rb") as file:
        return cloudpickle.load(file)
Example #37
0
    def __call__(self, state, reward):
        if not self.rendered:
            self.kinematic = cloudpickle.load(
                open('./model/swing_kinematic.dll', 'rb'))
            self.fig, self.ax = plt.subplots(ncols=1, figsize=(6, 6))
            self.ax.set_xlim([-0.3, 0.3])
            self.ax.set_ylim([-0.3, 0.15])
            self.ax.set_xlabel('X')
            self.ax.set_ylabel('Y')
            # self.ax[0].set_xlim([-0.3, 0.3])
            # self.ax[0].set_ylim([-0.3, 0.3])
            # self.ax[0].set_xlabel('X')
            # self.ax[0].set_ylabel('Y')
            # self.ax[1].set_xlim([-2, 2])
            # self.ax[1].set_ylim([-15, 15])
            # self.ax[1].set_xlabel('$q_1$')
            # self.ax[1].set_ylabel('$\dot{q_1}$')

            self.line1, = self.ax.plot([], [],
                                       lw=2,
                                       color='k',
                                       linestyle='-',
                                       marker='o',
                                       ms=10)
            self.line2, = self.ax.plot([], [],
                                       lw=4,
                                       color='b',
                                       linestyle='-',
                                       marker='o',
                                       ms=6)

            # self.line1, = self.ax[0].plot([], [], lw=1, color='k', linestyle='-', marker='o', ms=5)
            # self.line2, = self.ax[0].plot([], [], lw=2, color='b', linestyle='-', marker='o', ms=3)
            # self.tau,   = self.ax[1].plot([], [], color='b', marker='o', ms=3)
            # self.traj = None
            # self.time_text = self.ax.text(-0.2, 0.23, '')
            # self.reward_text = self.ax.text(-0.2, 0.2, '')

            # self.time_text = self.ax[0].text(-0.2, 0.23, '')
            # self.reward_text = self.ax[0].text(-0.2, 0.2, '')
            self.fig.canvas.draw()
            self.rendered = True
            if not self.savefig:
                plt.show(block=False)
        else:
            p1, p2, p3, p21, p31 = kinematic(next_state, myParam)
            # if self.traj is None:
            #     self.traj = np.expand_dims(next_state[[0, 3]], axis=0)
            # else:
            #     self.traj = np.vstack([self.traj, next_state[[0, 3]]])
            self.line1.set_data([0, p1[0]], [0, p1[1]])
            self.line2.set_data([p21[0], p2[0], p3[0], p31[0]],
                                [p21[1], p2[1], p3[1], p31[1]])
            # self.tau.set_data(self.traj[:, 0], self.traj[:, 1])
            # self.time_text.set_text('t = %.2f'%(self.step/100.0))
            # self.reward_text.set_text('r = %.2f'%reward)

            # update canvas
            self.ax.draw_artist(self.ax.patch)
            #self.ax.draw_artist(self.ax[1].patch)
            self.ax.draw_artist(self.line1)
            self.ax.draw_artist(self.line2)
            # self.ax.draw_artist(self.tau)
            # self.ax.draw_artist(self.time_text)
            # self.ax.draw_artist(self.reward_text)

            # self.ax[0].draw_artist(self.ax[0].patch)
            # self.ax[1].draw_artist(self.ax[1].patch)
            # self.ax[0].draw_artist(self.line1)
            # self.ax[0].draw_artist(self.line2)
            # self.ax[1].draw_artist(self.tau)
            # self.ax[0].draw_artist(self.time_text)
            # self.ax[0].draw_artist(self.reward_text)
            self.fig.canvas.flush_events()
            self.fig.canvas.update()
        plt.axis('off')
        if self.savefig:
            self.fig.savefig(self.dir_name + '/anim/{}.png'.format(self.step))
        self.step += 1
        return self.fig
Example #38
0
def cli(onto_fp, dcell_fp, costanzo_2010_fp, cpkl_output_fmt, tsv_output):

    onto_raw_preds_fp = onto_fp
    df = pd.read_csv(onto_raw_preds_fp, sep='\t')
    df.columns = ['A', 'B', 'onto_pred', 'true', 'p']
    df = df[~df['true'].isna()].reset_index(drop=True)
    onto_df = df.sort_values(by=['A', 'B'])
    print(onto_df.head())
    print('Ontotype prediction data shape:', onto_df.shape)

    raw_dcell_preds = dcell_fp
    dcell_df = pd.read_csv(raw_dcell_preds, sep='\t', header=None)
    dcell_df.columns = ['A', 'B', 'dcell_pred']
    dcell_df = dcell_df.sort_values(by=['A', 'B'])
    print(dcell_df.head())
    print('DCell prediction data shape:', dcell_df.shape)
    onto_df = sort_cols(onto_df)
    dcell_df = sort_cols(dcell_df)

    keys = list(zip(onto_df['A'], onto_df['B']))
    keys = [a + ':' + b for (a, b) in keys]
    print('Ontotype preds have duplicate preds for gene pairs:',
          not len(keys) == len(set(keys)))

    keys = list(zip(dcell_df['A'], dcell_df['B']))
    keys = [a + ':' + b for (a, b) in keys]
    print('DCell preds have duplicate preds for gene pairs:',
          not len(keys) == len(set(keys)))

    print('Dropping duplicates in DCell preds')
    dcell_df = dcell_df.drop_duplicates(['A', 'B'])

    keys = list(zip(dcell_df['A'], dcell_df['B']))
    keys = [a + ':' + b for (a, b) in keys]
    print('DCell preds have duplicate preds for gene pairs:',
          not len(keys) == len(set(keys)))

    print(
        'Performing inner join to merge DCell and Ontotype predictions... (validating that join is 1-1)'
    )
    merged_df = onto_df.merge(dcell_df,
                              how='inner',
                              left_on=['A', 'B'],
                              right_on=['A', 'B'],
                              validate='1:1')
    print(merged_df.head())
    print('** Ontotype: **')
    report(merged_df, 'onto_pred')

    print('** DCell: **')
    report(merged_df, 'dcell_pred')

    merged_df.to_csv(tsv_output, sep='\t', index=False)

    value_cols = ['onto_pred', 'dcell_pred', 'true', 'p']

    #TODO: we need to make this data match the shape of the data with our own processed Gs.
    # That is, we need to figure whether a pair is
    # A) query - array
    # b) array - query (so we swap)
    # c) or both array and query, then we add two values to the matrix.

    # check for duplicates
    # keys = list(zip(merged_df['A'], merged_df['B']))
    # keys = [frozenset((a,b)) for (a,b) in keys]
    # assert(len(keys) == len(set(keys)))
    # print('Merged scores are indexed by unique keys:', len(keys) == len(set(keys)))

    with open(costanzo_2010_fp, 'rb') as f:
        costanzo10_data = cpkl.load(f)

    costanzo10_rows = set(costanzo10_data['rows'])
    costanzo10_cols = set(costanzo10_data['cols'])
    print(len(costanzo10_rows), len(costanzo10_cols))

    needs_duplicates = df['A'].isin(costanzo10_rows) & df['A'].isin(costanzo10_cols) & \
                       df['B'].isin(costanzo10_rows) & df['B'].isin(costanzo10_cols)
    print(sum(needs_duplicates))

    # Incorrectly oriented (i.e. A is not in rows, or B is not in cols)
    needs_flipping = (~df['A'].isin(costanzo10_rows)) | (
        ~df['B'].isin(costanzo10_cols))
    print(sum(needs_flipping))

    # Correcly oriented and does not need duplicating
    # keep =  df['A'].isin(costanzo10_rows) & (~df['A'].isin(costanzo10_cols)) & \
    #         df['B'].isin(costanzo10_cols) & (~df['B'].isin(costanzo10_rows))

    keep = ~(needs_flipping | needs_duplicates)
    print(sum(keep))

    dup_df = merged_df[needs_duplicates]
    dup_df.rename(columns={'A': 'B', 'B': 'A'}, inplace=True)

    flip_df = merged_df[needs_flipping]
    flip_df.rename(columns={'A': 'B', 'B': 'A'}, inplace=True)

    new_df = merged_df[needs_duplicates]
    new_df = new_df.append(dup_df)
    new_df = new_df.append(flip_df)
    new_df = new_df.append(merged_df[keep])
    print(new_df.head())
    print('row genes', len(set(new_df['A'])))
    print('col genes', len(set(new_df['B'])))

    print(len(new_df))

    new_df = new_df.pivot(index='A', columns='B')

    for value_name in value_cols:
        gi_mat = new_df[value_name]
        print('\t-Extracting {} values'.format(value_name))
        print('\t\t- GIs shape:', gi_mat.shape)

        gi_mat = gi_mat.apply(pd.to_numeric)

        print('\t\t- GIs shape:', gi_mat.shape)
        print('\t\t- Sparsity:', sparsity(gi_mat.values))
        print('\t\t- Interactions measured :',
              np.sum(~np.isnan(gi_mat.values)))
        print('\t\t- Interactions missing:', np.sum(np.isnan(gi_mat.values)))

        gi_data = GIData(values=gi_mat.values.astype(float),
                         rows=gi_mat.index.values.astype(str),
                         cols=gi_mat.columns.values.astype(str),
                         check_symmetric=False)
        fp = cpkl_output_fmt.format(value_name)

        gi_data.save(fp)
        print('\t\t- Saved values to:', fp)
Example #39
0
def load(filename):
    '''Load a coffea file from disk
    '''
    with lz4.frame.open(filename) as fin:
        output = cloudpickle.load(fin)
    return output
Example #40
0
def main():
    args = parse_args()
    setup_logging(args.logfile)

    log = get_logger()

    assert (0 <= args.hidden_fraction <= 1)

    np.random.seed(args.random_seed)
    tf.set_random_seed(args.random_seed)
    log.info('*' * 100)
    log.info('[Starting MC experiment]')
    log_dict(log.info, vars(args))
    log.info('[Loading target GIs]')
    with open(args.target_gis, 'rb') as f:
        tgt_gis = cpkl.load(f)

    log.info('[Loading source GIs]')
    with open(args.source_gis, 'rb') as f:
        src_gis = cpkl.load(f)

    log.info('[Loading sim scores]')
    with open(args.sim_scores, 'rb') as f:
        sim_scores_data = cpkl.load(f)
    sim_scores = sim_scores_data['values']
    sim_scores = sim_scores / np.max(sim_scores)  # Normalize

    log.info('[Loading target PPI]')
    L_tgt = get_laplacian(tgt_gis['rows'], args.target_ppi)
    log.info('[Loading source PPI]')
    L_src = get_laplacian(src_gis['rows'], args.source_ppi)

    hp_param_space = kxsmf_param_space(args)

    results, models, training_curves, trials = \
        run_kxsmf_experiment(tgt_gis=tgt_gis,
                            src_gis=src_gis,
                            L_tgt=L_tgt,
                            L_src=L_src,
                            space=hp_param_space,
                            sim_scores=sim_scores,
                            val_hf=args.val_hidden_fraction,
                            test_hf=args.hidden_fraction,
                            n_repeats=args.n_repeats,
                            hp_iters=args.n_hyperopt_iters,
                            hp_seed=args.random_seed)
    # Save results and other information
    log_results(results['summary'])
    with open(args.results_output, 'w') as f:
        json.dump(results, f, indent=2)

    with open(args.training_curve_output, 'wb') as f:
        cpkl.dump(training_curves, f)

    # TODO: save models the models cannot be pickled at the moment
    # We will need to implement a from dict and a to dict method
    with open(args.models_output, 'wb') as f:
        cpkl.dump(trials, f)

    with open(args.trials_output, 'wb') as f:
        cpkl.dump(trials, f)
    encoded_file = base64.b64encode(buf.read()).decode('ascii')
    return (encoded_file)


# load data
engine = create_engine('sqlite:///../data/DisasterResponse.db')
df = pd.read_sql_table('Main', engine)

# load model
#random_state=1234
#np.random_state=random_state
#estimator=ExtraTreeClassifier(random_state=random_state,class_weight='balanced')
#model = build_model(estimator)

with open("../models/classifier.pkl", 'rb') as f:
    model = cp.load(f)
nlp = spacy.load("en_vectors_web_lg")
seq_lens = np.unique(df.message.apply(lambda x: len(nlp(x))).values,
                     return_counts=True)
counts_df = pd.DataFrame(np.array(seq_lens).T)


# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():

    ax = counts_df.plot.hist(x=1,
                             bins=50,
                             legend=False,
                             title="Sequence Length Distribution",
Example #42
0
#!/usr/bin/python
import numpy as np
import cloudpickle as cp

sample = np.random.zipf(2, 10)
print(sample)
for i in range(10):
    print(sample[i])
msg = b'127.0.0.1:anna:1000'
msg = cp.load(msg)
print(msg)
    def load_bytes(fname):
        import cloudpickle

        with open(fname, "rb") as f:
            obj = cloudpickle.load(f)
        return obj(1, 2, 3)
Example #44
0
 def __init__(self, model_path: str):
     with open(model_path, "rb") as rf:
         self.model = pickle.load(rf)
Example #45
0
def main():
    args = parse_args()
    setup_logging(args.logfile)

    log = get_logger()

    assert (0 <= args.hidden_fraction <= 1)

    np.random.seed(args.random_seed)
    tf.set_random_seed(args.random_seed)
    log.info('*' * 100)
    log.info('[Starting MC experiment]')
    log_dict(log.info, vars(args))
    log.info('[Loading input data]')

    with open(args.input_file, 'rb') as f:
        obj = cpkl.load(f)

    # Set up experiments
    fit_params = None
    if args.mc_alg == 'PMF':
        param_space = pmf_param_space(args)
        run_experiment = run_pmf
    elif args.mc_alg == 'PMF_b':
        param_space = pmfb_param_space(args)
        run_experiment = run_pmfb
    elif args.mc_alg in ['KPMF', 'NGMC', 'KPMF_b']:
        # Experiments that need PPI network
        if args.ppi is not None:
            ppi = nx.read_edgelist(args.ppi)

        if args.mc_alg == 'KPMF':
            L = get_ppi_data(obj['rows'], ppi, mode='laplacian')
            param_space = kpmf_param_space(args)
            run_experiment = run_kpmf
            fit_params = dict(L=L)
        elif args.mc_alg == 'KPMF_b':
            L = get_ppi_data(obj['rows'], ppi, mode='laplacian')
            param_space = kpmfb_param_space(args)
            run_experiment = run_kpmfb
            fit_params = dict(L=L)
        elif args.mc_alg == 'NGMC':
            fit_params = dict(P=None)
            P = get_ppi_data(obj['rows'], ppi, mode='normalized_adjacency')
            fit_params['P'] = P
            param_space = ngmc_param_space(args)
            run_experiment = run_ngmc
        else:
            raise (NotImplementedError(
                '{} option is invalid or not implemented'.format(args.mc_alg)))

    else:
        raise (NotImplementedError(
            '{} option is invalid or not implemented'.format(args.mc_alg)))

    # Run experimental protocol
    results, models, training_curves, trials = \
        run_experiment(obj,
                        param_space = param_space,
                        fit_params = fit_params,
                        val_hidden_fraction=args.val_hidden_fraction,
                        hidden_fraction=args.hidden_fraction,
                        n_repeats=args.n_repeats,
                        hyperopt_iters=args.n_hyperopt_iters,
                        seed=args.random_seed,
                        logistic=args.logistic)

    # Save results and other information
    log_results(results['summary'])
    with open(args.results_output, 'w') as f:
        json.dump(results, f, indent=2)

    with open(args.training_curve_output, 'wb') as f:
        cpkl.dump(training_curves, f)

    # TODO: save models the models cannot be pickled at the moment
    # We will need to implement a from dict and a to dict method
    with open(args.models_output, 'wb') as f:
        cpkl.dump(trials, f)

    with open(args.trials_output, 'wb') as f:
        cpkl.dump(trials, f)
    def _load_from_folder(model_folder: str,
                          install_requirements: bool = False):

        code_temp_dir = os.path.join(model_folder, UnifiedModel._CODE_BASE_DIR)
        data_temp_dir = os.path.join(model_folder, UnifiedModel._DATA_BASE_DIR)
        model_pkl_path = os.path.join(data_temp_dir,
                                      UnifiedModel._PICKLE_FILENAME)

        if not os.path.isfile(model_pkl_path):
            log.error("Model pickle file does not exist at path: " +
                      str(model_pkl_path))
            return None

        # TODO remove modules from _requirements...?
        if install_requirements:
            log.info("Installing model requirements")
            # Execute setup.sh script if available
            setup_script = os.path.join(model_folder,
                                        UnifiedModel._SETUP_SCRIPT_FILENAME)
            if os.path.isfile(setup_script):
                os.chmod(setup_script, 0o777)
                if subprocess.check_call([setup_script]) > 0:
                    log.warning("Failed to execute setup script")
            else:
                log.debug("Setup script does not exist.")

            # Install dependencies from requirements.txt if available
            requirements_file = os.path.join(
                model_folder, UnifiedModel._REQUIREMENTS_FILENAME)
            if os.path.isfile(requirements_file):
                if subprocess.check_call([
                        sys.executable, '-m', 'pip', 'install', '-r',
                        requirements_file
                ]) > 0:
                    log.warning("Failed to install requirements")
            else:
                log.debug("requirements file does not exist.")

        # extract code modules into temp folder

        if os.path.isdir(code_temp_dir):
            # append core modules to python path
            sys.path.append(code_temp_dir)
        else:
            log.debug("Code directory does not exist.")

        # add all data to stored files
        stored_files = {}

        if os.path.isdir(data_temp_dir):
            for dir_, _, files in os.walk(data_temp_dir):
                for file_name in files:
                    rel_dir = os.path.relpath(dir_, data_temp_dir)
                    if rel_dir != '.':
                        file_name = os.path.join(rel_dir, file_name)

                    if file_name != UnifiedModel._PICKLE_FILENAME:
                        # don't save the unified_model pickle file as stored file
                        stored_files[file_name] = os.path.join(dir_, file_name)
        else:
            log.debug("Data directory does not exist.")

        try:
            # ,  encoding='latin1', fix_imports=True
            model = pickle.load(open(model_pkl_path, "rb"))
            # provide information if requirements should be installed to loaded model
            model._install_requirements = install_requirements
        except Exception as e:
            log.error("Failed to unpickle model: " + str(e), e)
            if not install_requirements:
                log.info("Try to set install_requirements flag to true.")
            return None

        if not hasattr(model, 'predict'):
            log.warning("Model is not valid: predict method is missing!")

        model._stored_files = stored_files

        model._after_load()

        return model
def _safe_deserialize(filename, mode="rb"):
    with open(filename, mode) as pfile:
        return cloudpickle.load(pfile)
            else:
                hout['sumw'][dataset] += np.sum(df['scale1fb'])
        return hout

    def postprocess(self, accumulator):
        # set everything to 1/fb scale
        lumi = 1000  # [1/pb]

        scale = {}
        for dataset, dataset_sumw in accumulator['sumw'].items():
            scale[dataset] = lumi * self._corrections['xsections'][
                dataset] / dataset_sumw.value

        for h in accumulator.values():
            if isinstance(h, hist.Hist):
                h.scale(scale, axis="dataset")

        return accumulator


if __name__ == '__main__':
    with lz4f.open("corrections.cpkl.lz4", mode="rb") as fin:
        corrections = cloudpickle.load(fin)

    processor_instance = BoostedHbbProcessor(corrections=corrections)

    with lz4f.open('boostedHbbProcessor.cpkl.lz4',
                   mode='wb',
                   compression_level=5) as fout:
        cloudpickle.dump(processor_instance, fout)
Example #49
0
def load_object(path):
    import cloudpickle
    with gzip.open(path, mode='rb') as file:
        return cloudpickle.load(file)
Example #50
0
 def load(byte_array):
     buf = io.BytesIO(byte_array.tobytes())
     return cloudpickle.load(buf)
Example #51
0
def read(name, task):
    base_dir = '{}/{}'.format(DATA_BASE, name)
    os.makedirs(base_dir, exist_ok=True)
    with open(os.path.join(base_dir, task), 'rb') as f:
        return pickle.load(f)
Example #52
0
	def get_run_command(self, filepath):
		""" Load the submission object and get the command to run
		the compiled bot """
		with open (filepath, 'rb') as fo:
			subm = cloudpickle.load(fo)
			return subm.get_command(config.worker_compiled + subm.sub_id)
Example #53
0
 def __init__(self, artifacts):
     with open(artifacts[key], 'rb') as f:  # should not KeyError
         if cloudpickle.load(f) != val:
             raise ValueError  # should not ValueError
Example #54
0
from model_utils import *

from PIL import Image
import cloudpickle
import tensorflow_datasets
import tensorflow as tf
import cv2
import numpy as np

with open("weights/int2str.pkl", "rb") as f:
    int2str = cloudpickle.load(f)


def load_model():
    resnet50_backbone = get_backbone()
    loss_fn = RetinaNetLoss(80)
    model = RetinaNet(80, resnet50_backbone)
    optimizer = tf.optimizers.SGD(learning_rate=learning_rate_fn, momentum=0.9)
    model.compile(loss=loss_fn, optimizer=optimizer)
    latest_checkpoint = tf.train.latest_checkpoint("weights")
    model.load_weights(latest_checkpoint)
    image = tf.keras.Input(shape=[None, None, 3], name="image")
    predictions = model(image, training=False)
    detections = DecodePredictions(confidence_threshold=0.5)(image,
                                                             predictions)
    inference_model = tf.keras.Model(inputs=image, outputs=detections)
    return inference_model


def predict(image_b, inference_model):
    image_t = tf.io.decode_image(image_b)
def main():
    import neptune

    parser = argparse.ArgumentParser(argument_default=None)
    parser.add_argument('--config', action='append', help='Gin config files.')
    parser.add_argument('--debug', action='store_true', default=False)
    cmd_args, unknown = parser.parse_known_args()
    debug = cmd_args.debug
    spec_path = cmd_args.config[0]

    if not debug:
        try:
            with open(spec_path, 'rb') as f:
                import cloudpickle
                specification = cloudpickle.load(f)
        except pickle.UnpicklingError:
            with open(spec_path) as f:
                vars_ = {'script': os.path.basename(spec_path)}
                exec(f.read(), vars_)  # pylint: disable=exec-used
                specification = vars_['experiments_list'][0].to_dict()
                print(
                    'NOTE: Only the first experiment from the list will be run!'
                )
        parameters = specification['parameters']
    else:
        print("debug run")
        parameters = dict(env_id="toy_mr", env_size=None)

    class MockArgs(object):
        def add(self, key, value):
            setattr(self, key, value)

    args = MockArgs()

    args.add('env', parameters["env_id"])  # 'chain_env' 'toy_mr'
    args.add('env_size', parameters["env_size"])
    args.add('seed', 0)
    args.add('max_episode_steps', 300)

    args.add('num_timesteps', int(1e12))
    args.add('num_env', 32)
    args.add('use_news', 0)
    args.add('gamma', 0.99)
    args.add('gamma_ext', 0.999)
    args.add('lam', 0.95)
    args.add('update_ob_stats_every_step', 0)
    args.add('update_ob_stats_independently_per_gpu', 0)
    args.add('update_ob_stats_from_random_agent', 1)
    args.add('proportion_of_exp_used_for_predictor_update', 1.)
    args.add('tag', '')
    args.add(
        'policy',
        'cnn',
    )
    args.add('int_coeff', 1.)
    args.add('ext_coeff', 2.)
    args.add('dynamics_bonus', 0)

    if not debug:
        # TODO read more from specification
        print("running with neptune")
        neptune.init(
            project_qualified_name="pmtest/planning-with-learned-models")
        neptune.create_experiment(
            name=specification['name'],
            tags=specification['tags'],
            params=specification['parameters'],
            upload_stdout=False,
            upload_stderr=False,
        )
        neptune.send_metric("test", 777)
        baselines_format_strs = ['log', 'csv']
    else:
        print("running without neptune")
        baselines_format_strs = ['stdout', 'log', 'csv']

    logger.configure(dir="out", format_strs=baselines_format_strs)

    seed = 10000 * args.seed  # + MPI.COMM_WORLD.Get_rank()
    set_global_seeds(seed)

    hps = dict(frame_stack=4,
               nminibatches=4,
               nepochs=4,
               lr=0.0001,
               max_grad_norm=0.0,
               env_size=args.env_size,
               use_news=args.use_news,
               gamma=args.gamma,
               gamma_ext=args.gamma_ext,
               max_episode_steps=args.max_episode_steps,
               lam=args.lam,
               update_ob_stats_every_step=args.update_ob_stats_every_step,
               update_ob_stats_independently_per_gpu=args.
               update_ob_stats_independently_per_gpu,
               update_ob_stats_from_random_agent=args.
               update_ob_stats_from_random_agent,
               proportion_of_exp_used_for_predictor_update=args.
               proportion_of_exp_used_for_predictor_update,
               policy=args.policy,
               int_coeff=args.int_coeff,
               ext_coeff=args.ext_coeff,
               dynamics_bonus=args.dynamics_bonus)

    tf_util.make_session(make_default=True)
    train(env_id=args.env,
          num_env=args.num_env,
          seed=seed,
          num_timesteps=args.num_timesteps,
          hps=hps,
          use_neptune=(not debug))
solvermap = {'optunity': 'Optunity',
             'tpe': 'Hyperopt',
             'smac': 'SMAC',
             'bayesopt': 'BayesOpt',
             'random': 'random'}

all_results = {k: [] for k in datasets}
performance = {}
random90 = []

for repetition in range(start_index, stop_index + 1):
    random90.append({})
    for dataset in datasets:
        all_results[dataset].append({})
        with open('results-repeated/%s-all.pkl-%d' % (dataset, repetition), 'r') as f:
            results = pickle.load(f)
            performance = {}
            for solver in solvers:
                if solver == 'optunity':
                    perf = max(results[solver]['results'][:budget])
                else:
                    perf = -min(results[solver]['results'][:budget])
                all_results[dataset][-1][solver] = perf

                if solver == 'random':
                    srtd = sorted(results[solver]['results'][:budget], reverse=True)
                    random90[-1][dataset] = -srtd[int(0.75 * len(srtd))]


ranks = {solver: {data: [] for data in datasets} for solver in solvers}
bests = {dataset: [] for dataset in datasets}
Example #57
0
    a, b, c = y_test.shape
    y_test = np.reshape(y_test, [a, b, c, 1])

    # Get the mean image files
    mean_img_x = []
    mean_img_y = []

    if (NORMALIZED == True):
        MEANS_FILE = MODEL_NAME + '_data_means.p'
        MEANS_PATH = MODEL_ARCHIVE + MEANS_FILE
        try:
            ff = open(MEANS_PATH, 'rb')
        except:
            print(
                'ResBrowswer: failed to open the file ' + MEANS_PATH + '.' +
                ' Attempting to run without the image means.', sys.stderr)
            NORMALIZED = False
        else:
            mean_img_x, mean_img_y = pickle.load(ff)
            ff.close()

    rB = res_browser(loaded_model,
                     x_test,
                     y_test,
                     normalized=NORMALIZED,
                     mean_img_x=mean_img_x,
                     mean_img_y=mean_img_y)
    rB.initialize()
    plt.show()