コード例 #1
0
def merge_data(data_dicts: List[Dict[str, Tensor]]):
    """
    Group together the data returned from calling ``make_transitions`` in several \
    remote :class:`Environment`.
    """

    kwargs = {}
    for k in data_dicts[0].keys():
        grouped = judo.concatenate([ddict[k] for ddict in data_dicts])
        kwargs[k] = grouped
    return kwargs
コード例 #2
0
ファイル: env.py プロジェクト: Guillemdb/fragile
 def _merge_data(data_dicts: List[Dict[str, Tensor]]):
     kwargs = {}
     for k in data_dicts[0].keys():
         try:
             grouped = judo.concatenate(
                 [judo.to_backend(ddict[k]) for ddict in data_dicts])
         except Exception:
             val = str([ddict[k].shape for ddict in data_dicts])
             raise ValueError(val)
         kwargs[k] = grouped
     return kwargs
コード例 #3
0
ファイル: env.py プロジェクト: Guillemdb/fragile
 def distribute(self, name, **kwargs):
     chunk_data = self._split_inputs_in_chunks(**kwargs)
     results = [
         env.execute(name=name, blocking=self._blocking, **chunk)
         for env, chunk in zip(self._envs, chunk_data)
     ]
     split_results = [
         result if self._blocking else result() for result in results
     ]
     if isinstance(split_results[0], dict):
         merged = self._merge_data(split_results)
     else:  # Assumes batch of tensors
         split_results = [judo.to_backend(res) for res in split_results]
         merged = judo.concatenate(split_results)
     return merged
コード例 #4
0
ファイル: env.py プロジェクト: Guillemdb/fragile
    def distribute(self, name, **kwargs):
        """Execute the target function in all the different workers."""
        chunk_data = self._split_inputs_in_chunks(**kwargs)
        from fragile.distributed.ray import ray

        results = [
            env.execute.remote(name=name, **chunk)
            for env, chunk in zip(self.envs, chunk_data)
        ]
        split_results = ray.get(results)
        if isinstance(split_results[0], dict):
            merged = self._merge_data(split_results)
        else:  # Assumes batch of tensors
            split_results = [judo.to_backend(res) for res in split_results]
            merged = judo.concatenate(split_results)
        return merged
コード例 #5
0
ファイル: memory.py プロジェクト: Guillemdb/judo
 def append(self, **kwargs):
     for name, val in kwargs.items():
         if name not in self.names:
             raise KeyError("%s not in self.names: %s" % (name, self.names))
         # Scalar vectors are transformed to columns
         val = judo.to_backend(val)
         if len(val.shape) == 0:
             val = judo.unsqueeze(val)
         if len(val.shape) == 1:
             val = val.reshape(-1, 1)
         try:
             processed = (val if getattr(self, name) is None else
                          judo.concatenate([getattr(self, name), val]))
             if len(processed) > self.max_size:
                 processed = processed[:self.max_size]
         except Exception as e:
             print(name, val.shape, getattr(self, name).shape)
             raise e
         setattr(self, name, processed)
     self._log.info("Memory now contains %s samples" % len(self))
コード例 #6
0
ファイル: states.py プロジェクト: Guillemdb/judo
 def merge_one_name(states_list, name):
     vals = []
     for state in states_list:
         data = state[name]
         # Attributes that are not numpy arrays are not stacked.
         if not judo.is_tensor(data):
             return data
         state_len = len(state)
         if len(data.shape) == 0 and state_len == 1:
             # Name is scaler vector. Data is typing.Scalar value. Transform to array first
             value = tensor([data]).flatten()
         elif len(data.shape) == 1 and state_len == 1:
             if data.shape[0] == 1:
                 # Name is typing.Scalar vector. Data already transformed to an array
                 value = data
             else:
                 # Name is a matrix of vectors. Data needs an additional dimension
                 value = tensor([data])
         elif len(data.shape) == 1 and state_len > 1:
             # Name is a typing.Scalar vector. Data already has is a one dimensional array
             value = data
         elif (
             len(data.shape) > 1
             and state_len > 1
             or len(data.shape) > 1
             and len(state) == 1
         ):
             # Name is a matrix of vectors. Data has the correct shape
             value = data
         else:
             raise ValueError(
                 "Could not infer data concatenation for attribute %s  with shape %s"
                 % (name, data.shape)
             )
         vals.append(value)
     return judo.concatenate(vals)
コード例 #7
0
ファイル: env.py プロジェクト: Guillemdb/fragile
 def _merge_data(data_dicts: List[Dict[str, Tensor]]):
     kwargs = {}
     for k in data_dicts[0].keys():
         grouped = judo.concatenate([ddict[k] for ddict in data_dicts])
         kwargs[k] = grouped
     return kwargs