コード例 #1
0
ファイル: common.py プロジェクト: bhuWenDongchao/pytorch
    def assertNotEqual(self, x, y, prec=None, message=''):
        if prec is None:
            prec = self.precision

        x, y = self.unwrapVariables(x, y)

        if torch.is_tensor(x) and torch.is_tensor(y):
            if x.size() != y.size():
                super(TestCase, self).assertNotEqual(x.size(), y.size())
            self.assertGreater(x.numel(), 0)
            y = y.type_as(x)
            y = y.cuda(device=x.get_device()) if x.is_cuda else y.cpu()
            nan_mask = x != x
            if torch.equal(nan_mask, y != y):
                diff = x - y
                if diff.is_signed():
                    diff = diff.abs()
                diff[nan_mask] = 0
                max_err = diff.max()
                self.assertGreaterEqual(max_err, prec, message)
        elif type(x) == str and type(y) == str:
            super(TestCase, self).assertNotEqual(x, y)
        elif is_iterable(x) and is_iterable(y):
            super(TestCase, self).assertNotEqual(x, y)
        else:
            try:
                self.assertGreaterEqual(abs(x - y), prec, message)
                return
            except (TypeError, AssertionError):
                pass
            super(TestCase, self).assertNotEqual(x, y, message)
コード例 #2
0
ファイル: classerrormeter.py プロジェクト: elanmart/tnt
    def add(self, output, target):
        if torch.is_tensor(output):
            output = output.cpu().squeeze().numpy()
        if torch.is_tensor(target):
            target = target.cpu().squeeze().numpy()
        elif isinstance(target, numbers.Number):
            target = np.asarray([target])
        if np.ndim(output) == 1:
            output = output[np.newaxis]
        else:
            assert np.ndim(output) == 2, \
                    'wrong output size (1D or 2D expected)'
            assert np.ndim(target) == 1, \
                    'target and output do not match'
        assert target.shape[0] == output.shape[0], \
            'target and output do not match'
        topk = self.topk
        maxk = int(topk[-1])  # seems like Python3 wants int and not np.int64
        no = output.shape[0]

        pred = torch.from_numpy(output).topk(maxk, 1, True, True)[1].numpy()
        correct = pred == target[:, np.newaxis].repeat(pred.shape[1], 1)

        for k in topk:
            self.sum[k] += no - correct[:, 0:k].sum()
        self.n += no
コード例 #3
0
ファイル: monte_carlo.py プロジェクト: saschwan/botorch
    def __init__(
        self,
        model: Model,
        best_f: Union[float, Tensor],
        sampler: Optional[MCSampler] = None,
        objective: Optional[MCAcquisitionObjective] = None,
        tau: float = 1e-3,
    ) -> None:
        r"""q-Probability of Improvement.

        Args:
            model: A fitted model.
            best_f: The best (feasible) function value observed so far (assumed
                noiseless).
            sampler: The sampler used to draw base samples. Defaults to
                `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
            objective: The MCAcquisitionObjective under which the samples are
                evaluated. Defaults to `IdentityMCObjective()`.
            tau: The temperature parameter used in the sigmoid approximation
                of the step function. Smaller values yield more accurate
                approximations of the function, but result in gradients
                estimates with higher variance.
        """
        super().__init__(model=model, sampler=sampler, objective=objective)
        if not torch.is_tensor(best_f):
            best_f = torch.tensor(float(best_f))
        self.register_buffer("best_f", best_f)
        if not torch.is_tensor(tau):
            tau = torch.tensor(float(tau))
        self.register_buffer("tau", tau)
コード例 #4
0
ファイル: common.py プロジェクト: Jsmilemsj/pytorch
    def assertEqual(self, x, y, prec=None, message='', allow_inf=False):
        if isinstance(prec, str) and message == '':
            message = prec
            prec = None
        if prec is None:
            prec = self.precision

        x, y = self.unwrapVariables(x, y)

        if isinstance(x, torch.Tensor) and isinstance(y, Number):
            self.assertEqual(x.item(), y, prec, message, allow_inf)
        elif isinstance(y, torch.Tensor) and isinstance(x, Number):
            self.assertEqual(x, y.item(), prec, message, allow_inf)
        elif torch.is_tensor(x) and torch.is_tensor(y):
            def assertTensorsEqual(a, b):
                super(TestCase, self).assertEqual(a.size(), b.size(), message)
                if a.numel() > 0:
                    b = b.type_as(a)
                    b = b.cuda(device=a.get_device()) if a.is_cuda else b.cpu()
                    # check that NaNs are in the same locations
                    nan_mask = a != a
                    self.assertTrue(torch.equal(nan_mask, b != b), message)
                    diff = a - b
                    diff[nan_mask] = 0
                    # TODO: implement abs on CharTensor
                    if diff.is_signed() and 'CharTensor' not in diff.type():
                        diff = diff.abs()
                    max_err = diff.max()
                    self.assertLessEqual(max_err, prec, message)
            super(TestCase, self).assertEqual(x.is_sparse, y.is_sparse, message)
            if x.is_sparse:
                x = self.safeCoalesce(x)
                y = self.safeCoalesce(y)
                assertTensorsEqual(x._indices(), y._indices())
                assertTensorsEqual(x._values(), y._values())
            else:
                assertTensorsEqual(x, y)
        elif isinstance(x, string_classes) and isinstance(y, string_classes):
            super(TestCase, self).assertEqual(x, y, message)
        elif type(x) == set and type(y) == set:
            super(TestCase, self).assertEqual(x, y, message)
        elif is_iterable(x) and is_iterable(y):
            super(TestCase, self).assertEqual(len(x), len(y), message)
            for x_, y_ in zip(x, y):
                self.assertEqual(x_, y_, prec, message)
        elif isinstance(x, bool) and isinstance(y, bool):
            super(TestCase, self).assertEqual(x, y, message)
        elif isinstance(x, Number) and isinstance(y, Number):
            if abs(x) == float('inf') or abs(y) == float('inf'):
                if allow_inf:
                    super(TestCase, self).assertEqual(x, y, message)
                else:
                    self.fail("Expected finite numeric values - x={}, y={}".format(x, y))
                return
            super(TestCase, self).assertLessEqual(abs(x - y), prec, message)
        else:
            super(TestCase, self).assertEqual(x, y, message)
コード例 #5
0
ファイル: utils.py プロジェクト: Northrend/pytorch
def recursiveCopy(t1, t2):
    if isinstance(t2, list):
        t1 = t1 if isinstance(t1, list) else [t1]
        for i, _ in enumerate(t2):
            t1[i], t2[i] = recursiveCopy(t1[i], t2[i])
    elif torch.is_tensor(t2):
        t1 = t1 if torch.is_tensor(t1) else t2.new()
        t1.resize_as_(t2).copy_(t2)
    else:
        raise RuntimeError("expecting nested tensors or tables. Got " +
                           type(t1).__name__ + " and " + type(t2).__name__ + " instead")
    return t1, t2
コード例 #6
0
ファイル: utils.py プロジェクト: Northrend/pytorch
def recursiveAdd(t1, val=1, t2=None):
    if t2 is None:
        t2 = val
        val = 1
    if isinstance(t2, list):
        t1 = t1 if isinstance(t1, list) else [t1]
        for i, _ in enumerate(t2):
            t1[i], t2[i] = recursiveAdd(t1[i], val, t2[i])
    elif torch.is_tensor(t1) and torch.is_tensor(t2):
        t1.add_(val, t2)
    else:
        raise RuntimeError("expecting nested tensors or tables. Got " +
                           type(t1).__name__ + " and " + type(t2).__name__ + " instead")
    return t1, t2
コード例 #7
0
ファイル: utils.py プロジェクト: Northrend/pytorch
def recursiveResizeAs(t1, t2):
    if isinstance(t2, list):
        t1 = t1 if isinstance(t1, list) else [t1]
        if len(t1) < len(t2):
            t1 += [None] * (len(t2) - len(t1))
        for i, _ in enumerate(t2):
            t1[i], t2[i] = recursiveResizeAs(t1[i], t2[i])
        t1 = t1[:len(t2)]
    elif torch.is_tensor(t2):
        t1 = t1 if torch.is_tensor(t1) else t2.new()
        t1.resize_as_(t2)
    else:
        raise RuntimeError("Expecting nested tensors or tables. Got " +
                           type(t1).__name__ + " and " + type(t2).__name__ + "instead")
    return t1, t2
コード例 #8
0
ファイル: torchloader.py プロジェクト: zgsxwsdxg/mxbox
def default_collate(batch):
    "Puts each data field into a tensor with outer dimension batch size"
    if torch.is_tensor(batch[0]):
        out = None
        if _use_shared_memory:
            # If we're in a background process, concatenate directly into a
            # shared memory tensor to avoid an extra copy
            numel = sum([x.numel() for x in batch])
            storage = batch[0].storage()._new_shared(numel)
            out = batch[0].new(storage)
        return torch.stack(batch, 0, out=out)
    elif type(batch[0]).__module__ == 'numpy':
        elem = batch[0]
        if type(elem).__name__ == 'ndarray':
            return torch.stack([torch.from_numpy(b) for b in batch], 0)
        if elem.shape == ():  # scalars
            py_type = float if elem.dtype.name.startswith('float') else int
            return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
    elif isinstance(batch[0], int):
        return torch.LongTensor(batch)
    elif isinstance(batch[0], float):
        return torch.DoubleTensor(batch)
    elif isinstance(batch[0], string_classes):
        return batch
    elif isinstance(batch[0], collections.Mapping):
        return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
    elif isinstance(batch[0], collections.Sequence):
        transposed = zip(*batch)
        return [default_collate(samples) for samples in transposed]

    raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}"
                     .format(type(batch[0]))))
コード例 #9
0
ファイル: test_caffe2.py プロジェクト: gtgalone/pytorch
 def convert_cuda(self, model, input):
     cuda_model = model.cuda()
     # input might be nested - we want to move everything to GPU
     cuda_input = function._nested_map(
         lambda o: isinstance(o, Variable) or torch.is_tensor(o),
         lambda o: o.cuda())(input)
     return cuda_model, cuda_input
コード例 #10
0
ファイル: read_lua_file.py プロジェクト: Northrend/pytorch
 def updateOutput_patch(*args):
     input = args[0]
     while not torch.is_tensor(input):
         input = input[0]
     obj._backend = type2backend[type(input)]
     obj.updateOutput = updateOutput_orig
     return obj.updateOutput(*args)
コード例 #11
0
ファイル: utils.py プロジェクト: saschwan/botorch
def _expand_bounds(
    bounds: Optional[Union[float, Tensor]], X: Tensor
) -> Optional[Tensor]:
    r"""Expands a tensor representing bounds.

    Expand the dimension of bounds if necessary such that the last dimension of
    bounds is the same as the last dimension of `X`.

    Args:
        bounds: a bound (either upper or lower) of each column (last dimension)
            of `X`. If this is a single float, then all columns have the same bound.
        X: `... x d` tensor

    Returns:
        A tensor of bounds expanded to be compatible with the size of `X` if
        bounds is not None, and None if bounds is None
    """
    if bounds is not None:
        if not torch.is_tensor(bounds):
            bounds = torch.tensor(bounds)
        if len(bounds.shape) == 0:
            ebounds = bounds.expand(1, X.shape[-1])
        elif len(bounds.shape) == 1:
            ebounds = bounds.view(1, -1)
        else:
            ebounds = bounds
        if ebounds.shape[1] != X.shape[-1]:
            raise RuntimeError(
                "Bounds must either be a single value or the same dimension as X"
            )
        return ebounds.to(dtype=X.dtype, device=X.device)
    else:
        return None
コード例 #12
0
ファイル: common_nn.py プロジェクト: Northrend/pytorch
 def _unpack(self, value):
     if isinstance(value, Variable):
         return value.data
     elif torch.is_tensor(value):
         return value
     else:
         return type(value)(self._unpack(v) for v in value)
コード例 #13
0
ファイル: common_nn.py プロジェクト: Northrend/pytorch
 def map_tensor_sizes(sizes):
     if isinstance(sizes, list):
         return [map_tensor_sizes(s) for s in sizes]
     elif torch.is_tensor(sizes):
         return sizes.double()
     else:
         return torch.randn(*sizes)
コード例 #14
0
ファイル: common_nn.py プロジェクト: athiwatp/pytorch
 def _unpack_input(self, input):
     if isinstance(input, Variable):
         return input.data
     elif torch.is_tensor(input):
         return input
     else:
         return type(input)(self._unpack_input(i) for i in input)
コード例 #15
0
ファイル: distributed.py プロジェクト: Henley13/imagenet-fast
    def __init__(self, module):
        super(DistributedDataParallel, self).__init__()
        self.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False

        self.module = module
        param_list = [param for param in self.module.state_dict().values() if torch.is_tensor(param)]
        if dist._backend == dist.dist_backend.NCCL:
            for param in param_list:
                assert param.is_cuda, "NCCL backend only supports model parameters to be on GPU."
                
        #broadcast parameters
        flat_dist_call(param_list, dist.broadcast, (0,) )

        #all reduce gradient hook
        def allreduce_params():
            if(self.needs_reduction):
                self.needs_reduction = False
            else:
                return
            grads = [param.grad.data for param in self.module.parameters() if param.grad is not None]
            flat_dist_call(grads, dist.all_reduce)
            
        for param in list(self.module.parameters()):
            def allreduce_hook(*unused):
                torch.autograd.Variable._execution_engine.queue_callback(allreduce_params)
            if param.requires_grad:
                param.register_hook(allreduce_hook)
コード例 #16
0
ファイル: distribution.py プロジェクト: Jsmilemsj/pytorch
    def _validate_log_prob_arg(self, value):
        """
        Argument validation for `log_prob` methods. The rightmost dimensions
        of a value to be scored via `log_prob` must agree with the distribution's
        batch and event shapes.

        Args:
            value (Tensor or Variable): the tensor whose log probability is to be
                computed by the `log_prob` method.
        Raises
            ValueError: when the rightmost dimensions of `value` do not match the
                distribution's batch and event shapes.
        """
        if not (torch.is_tensor(value) or isinstance(value, Variable)):
            raise ValueError('The value argument to log_prob must be a Tensor or Variable instance.')

        event_dim_start = len(value.size()) - len(self._event_shape)
        if value.size()[event_dim_start:] != self._event_shape:
            raise ValueError('The right-most size of value must match event_shape: {} vs {}.'.
                             format(value.size(), self._event_shape))

        actual_shape = value.size()
        expected_shape = self._batch_shape + self._event_shape
        for i, j in zip(reversed(actual_shape), reversed(expected_shape)):
            if i != 1 and j != 1 and i != j:
                raise ValueError('Value is not broadcastable with batch_shape+event_shape: {} vs {}.'.
                                 format(actual_shape, expected_shape))
コード例 #17
0
ファイル: train.py プロジェクト: athiwatp/DrQA
def eval_accuracies(pred_s, target_s, pred_e, target_e):
    """An unofficial evalutation helper.
    Compute exact start/end/complete match accuracies for a batch.
    """
    # Convert 1D tensors to lists of lists (compatibility)
    if torch.is_tensor(target_s):
        target_s = [[e] for e in target_s]
        target_e = [[e] for e in target_e]

    # Compute accuracies from targets
    batch_size = len(pred_s)
    start = utils.AverageMeter()
    end = utils.AverageMeter()
    em = utils.AverageMeter()
    for i in range(batch_size):
        # Start matches
        if pred_s[i] in target_s[i]:
            start.update(1)
        else:
            start.update(0)

        # End matches
        if pred_e[i] in target_e[i]:
            end.update(1)
        else:
            end.update(0)

        # Both start and end match
        if any([1 for _s, _e in zip(target_s[i], target_e[i])
                if _s == pred_s[i] and _e == pred_e[i]]):
            em.update(1)
        else:
            em.update(0)
    return start.avg * 100, end.avg * 100, em.avg * 100
コード例 #18
0
ファイル: utils.py プロジェクト: Northrend/pytorch
def recursiveType(param, type, tensorCache={}):
    from .Criterion import Criterion
    from .Module import Module
    if isinstance(param, list):
        for i, p in enumerate(param):
            param[i] = recursiveType(p, type, tensorCache)
    elif isinstance(param, Module) or isinstance(param, Criterion):
        param.type(type, tensorCache)
    elif torch.is_tensor(param):
        if torch.typename(param) != type:
            key = param._cdata
            if key in tensorCache:
                newparam = tensorCache[key]
            else:
                newparam = torch.Tensor().type(type)
                storageType = type.replace('Tensor', 'Storage')
                param_storage = param.storage()
                if param_storage:
                    storage_key = param_storage._cdata
                    if storage_key not in tensorCache:
                        tensorCache[storage_key] = torch._import_dotted_name(
                            storageType)(param_storage.size()).copy_(param_storage)
                    newparam.set_(
                        tensorCache[storage_key],
                        param.storage_offset(),
                        param.size(),
                        param.stride()
                    )
                tensorCache[key] = newparam
            param = newparam
    return param
コード例 #19
0
ファイル: utils.py プロジェクト: codealphago/3DKeypoints-DA
def collate_fn_cat(batch):
  "Puts each data field into a tensor with outer dimension batch size"
  if torch.is_tensor(batch[0]):
    out = None
    return torch.cat(batch, 0, out=out)
  elif type(batch[0]).__module__ == 'numpy':
    elem = batch[0]
    if type(elem).__name__ == 'ndarray':
      return torch.cat([torch.from_numpy(b) for b in batch], 0)
    if elem.shape == ():  # scalars
      py_type = float if elem.dtype.name.startswith('float') else int
      return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
  elif isinstance(batch[0], int):
    return torch.LongTensor(batch)
  elif isinstance(batch[0], float):
    return torch.DoubleTensor(batch)
  elif isinstance(batch[0], string_classes):
    return batch
  elif isinstance(batch[0], collections.Mapping):
    return {key: collate_fn_cat([d[key] for d in batch]) for key in batch[0]}
  elif isinstance(batch[0], collections.Sequence):
    transposed = zip(*batch)
    return [collate_fn_cat(samples) for samples in transposed]

  raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}"
                     .format(type(batch[0]))))
コード例 #20
0
ファイル: common.py プロジェクト: Northrend/pytorch
    def safeCoalesce(self, t):
        tc = t.coalesce()

        value_map = {}
        for idx, val in zip(t._indices().t(), t._values()):
            idx_tup = tuple(idx)
            if idx_tup in value_map:
                value_map[idx_tup] += val
            else:
                value_map[idx_tup] = val.clone() if torch.is_tensor(val) else val

        new_indices = sorted(list(value_map.keys()))
        new_values = [value_map[idx] for idx in new_indices]
        if t._values().ndimension() < 2:
            new_values = t._values().new(new_values)
        else:
            new_values = torch.stack(new_values)

        new_indices = t._indices().new(new_indices).t()
        tg = t.new(new_indices, new_values, t.size())

        self.assertEqual(tc._indices(), tg._indices())
        self.assertEqual(tc._values(), tg._values())

        return tg
コード例 #21
0
ファイル: trace_struct.py プロジェクト: lewisKit/pyro
def _warn_if_nan(name, value):
    if torch.is_tensor(value):
        value = value.item()
    if torch_isnan(value):
        warnings.warn("Encountered NAN log_prob_sum at site '{}'".format(name))
    if torch_isinf(value) and value > 0:
        warnings.warn("Encountered +inf log_prob_sum at site '{}'".format(name))
コード例 #22
0
ファイル: util.py プロジェクト: lewisKit/pyro
def torch_backward(x):
    """
    Like ``x.backward()`` for a :class:`~torch.Tensor`, but also accepts
    numbers (a no-op if given a number).
    """
    if torch.is_tensor(x):
        x.backward()
コード例 #23
0
ファイル: __init__.py プロジェクト: athiwatp/pytorch
def _make_grads(outputs, grads, user_create_graph):
    if user_create_graph is not None:
        create_graph = user_create_graph
    else:
        create_graph = any(isinstance(grad, Variable) and not grad.volatile
                           for grad in grads)

    new_grads = []
    for out, grad in zip(outputs, grads):
        if isinstance(grad, Variable):
            new_grads.append(grad)
        elif torch.is_tensor(grad):
            new_grads.append(Variable(grad, volatile=not create_graph))
        elif grad is None:
            if out.requires_grad:
                if out.numel() != 1:
                    raise RuntimeError("grad can be implicitly created only for scalar outputs")
                data = out.data
                new_grads.append(
                    Variable(data.new().resize_as_(data).fill_(1), volatile=not create_graph))
            else:
                new_grads.append(None)
        else:
            raise TypeError("gradients can be either Tensors, Variables or None, but got " +
                            type(grad).__name__)
    return tuple(new_grads), create_graph
コード例 #24
0
ファイル: transforms.py プロジェクト: zbxzc35/pytorch_CAM
    def __call__(self, pic):
        """
        Args:
            pic (Tensor or numpy.ndarray): Image to be converted to PIL.Image.

        Returns:
            PIL.Image: Image converted to PIL.Image.

        """
        npimg = pic
        mode = None
        if isinstance(pic, torch.FloatTensor):
            pic = pic.mul(255).byte()
        if torch.is_tensor(pic):
            npimg = np.transpose(pic.numpy(), (1, 2, 0))
        assert isinstance(npimg, np.ndarray), 'pic should be Tensor or ndarray'
        if npimg.shape[2] == 1:
            npimg = npimg[:, :, 0]

            if npimg.dtype == np.uint8:
                mode = 'L'
            if npimg.dtype == np.int16:
                mode = 'I;16'
            if npimg.dtype == np.int32:
                mode = 'I'
            elif npimg.dtype == np.float32:
                mode = 'F'
        else:
            if npimg.dtype == np.uint8:
                mode = 'RGB'
        assert mode is not None, '{} is not supported'.format(npimg.dtype)
        return Image.fromarray(npimg, mode=mode)
コード例 #25
0
ファイル: tensordataset.py プロジェクト: elanmart/tnt
 def __len__(self):
     if isinstance(self.data, dict):
         return len(list(self.data.values())[0])
     elif isinstance(self.data, list):
         return len(self.data[0])
     elif torch.is_tensor(self.data) or isinstance(self.data, np.ndarray):
         return len(self.data)
コード例 #26
0
    def load_model(self):
        if len(glob.glob(os.path.join(args.save_dir, args.corpus) + '-selector-*.pth')) == 0:
            return

        if args.load_iter is None:
            f_list = glob.glob(os.path.join(args.save_dir, args.corpus) + '-selector-*.pth')
            iter_list = [int(i.split('-')[-1].split('.')[0]) for i in f_list]
            start_iter = sorted(iter_list)[-1]
        else:
            start_iter = args.load_iter

        name = args.corpus + '-selector-{}.pth'.format(start_iter)
        model_file_path = os.path.join(args.save_dir, name)
        print("loading model", model_file_path)

        if opt.device == torch.device('cuda'):
            state = torch.load(model_file_path)
        else:
            state = torch.load(model_file_path, map_location=opt.device)

        self._epoch = state['epoch']
        self._iter = state['iter']
        self.running_avg_loss = state['current_loss']
        self.min_loss = state['min_loss']

        self.model.sentence_selector.load_state_dict(state['selector_state_dict'])

        if not args.is_coverage:
            self.optimizer.load_state_dict(state['optimizer'])
            if opt.device == torch.device('cuda'):
                for state in list(self.optimizer.state.values()):
                    for k, v in list(state.items()):
                        if torch.is_tensor(v):
                            state[k] = v.cuda()
コード例 #27
0
ファイル: __init__.py プロジェクト: AsuradaYuci/video_reid
def to_torch(ndarray):
    if type(ndarray).__module__ == 'numpy':
        return torch.from_numpy(ndarray)
    elif not torch.is_tensor(ndarray):
        raise ValueError("Cannot convert {} to torch tensor"
                         .format(type(ndarray)))
    return ndarray
コード例 #28
0
ファイル: traceenum_elbo.py プロジェクト: lewisKit/pyro
def _compute_dice_elbo(model_trace, guide_trace):
    # y depends on x iff ordering[x] <= ordering[y]
    # TODO refine this coarse dependency ordering.
    ordering = {name: frozenset(f for f in site["cond_indep_stack"] if f.vectorized)
                for trace in (model_trace, guide_trace)
                for name, site in trace.nodes.items()
                if site["type"] == "sample"}

    costs = {}
    for name, site in model_trace.nodes.items():
        if site["type"] == "sample":
            _dict_iadd(costs, ordering[name], site["log_prob"])
    for name, site in guide_trace.nodes.items():
        if site["type"] == "sample":
            _dict_iadd(costs, ordering[name], -site["log_prob"])

    dice = Dice(guide_trace, ordering)
    elbo = 0.0
    for ordinal, cost in costs.items():
        dice_prob = dice.in_context(cost.shape, ordinal)
        mask = dice_prob > 0
        if torch.is_tensor(mask) and not mask.all():
            dice_prob = dice_prob[mask]
            cost = cost[mask]
        # TODO use score_parts.entropy_term to "stick the landing"
        elbo = elbo + (dice_prob * cost).sum()
    return elbo
コード例 #29
0
ファイル: __init__.py プロジェクト: AsuradaYuci/video_reid
def to_numpy(tensor):
    if torch.is_tensor(tensor):
        return tensor.cpu().numpy()
    elif type(tensor).__module__ != 'numpy':
        raise ValueError("Cannot convert {} to numpy array"
                         .format(type(tensor)))
    return tensor
コード例 #30
0
    def add(self, output, target):
        """
        Args:
            output (Tensor): NxK tensor that for each of the N examples
                indicates the probability of the example belonging to each of
                the K classes, according to the model. The probabilities should
                sum to one over all classes
            target (Tensor): binary NxK tensort that encodes which of the K
                classes are associated with the N-th input
                    (eg: a row [0, 1, 0, 1] indicates that the example is
                         associated with classes 2 and 4)
            weight (optional, Tensor): Nx1 tensor representing the weight for
                each example (each weight > 0)
        """
        if not torch.is_tensor(output):
            output = torch.from_numpy(output)
        if not torch.is_tensor(target):
            target = torch.from_numpy(target)

        if output.dim() == 1:
            output = output.view(-1, 1)
        else:
            assert output.dim() == 2, \
                'wrong output size (should be 1D or 2D with one column \
                per class)'
        if target.dim() == 1:
            target = target.view(-1, 1)
        else:
            assert target.dim() == 2, \
                'wrong target size (should be 1D or 2D with one column \
                per class)'
        if self.scores.numel() > 0:
            assert target.size(1) == self.targets.size(1), \
                'dimensions for output should match previously added examples.'

        # make sure storage is of sufficient size
        if self.scores.storage().size() < self.scores.numel() + output.numel():
            new_size = math.ceil(self.scores.storage().size() * 1.5)
            self.scores.storage().resize_(int(new_size + output.numel()))
            self.targets.storage().resize_(int(new_size + output.numel()))

        # store scores and targets
        offset = self.scores.size(0) if self.scores.dim() > 0 else 0
        self.scores.resize_(offset + output.size(0), output.size(1))
        self.targets.resize_(offset + target.size(0), target.size(1))
        self.scores.narrow(0, offset, output.size(0)).copy_(output)
        self.targets.narrow(0, offset, target.size(0)).copy_(target)
コード例 #31
0
    def __getitem__(self, idx):
        if torch.is_tensor(idx):
            idx = idx.tolist()

        sample = {'x': self.x[idx], 'y': self.y[idx]}
        return sample
コード例 #32
0
ファイル: beam.py プロジェクト: longhuang318/dpdp
    def update(self, mask, current, cost, parent=None, score=None, compute_unique_device=None, remaining_capacity=None, last_action=None, potential_info=None, batch_ids=0):
        # We will group all the entries in the beam by the mask (for each batch_id)

        device = mask[0].device
        mask_cols = [col for col_group in mask for col in col_group]  # Flatten all columns in the mask
        if torch.is_tensor(batch_ids) and len(batch_ids) > 0 and (batch_ids != batch_ids[0]).any():
            # Prepend batch id to mask_cols so we get unique masks per graph
            mask_cols.insert(0, batch_ids)
        group_idx, mask_idx_per_group = unique_inverse(mask_cols, return_index=True, device=compute_unique_device)
        # Get back all results to the current device
        mask = [msk.to(device) for msk in mask]
        group_idx = group_idx.to(device)
        mask_idx_per_group = mask_idx_per_group.to(device)
        mask_idx = None  # We don't need as mask_idx unless we sort by the mask 'lazily'
        current_csr = None  # We don't have a current csr representation unless we sort by current
        current_counts = None
        if self.sort_by is not None:
            if self.sort_by == 'current':
                assert self.batch_size == 1, "Sorting by current not compatible with batch ids"
                current, argsort = torch.sort(current)
                # If we sort by current, since there are only very few current we also store a compact 'csr' representation
                current_csr = coo_to_csr(current, minlength=self.num_nodes)
                current_counts = current_csr[1:] - current_csr[:-1]
            elif self.sort_by == 'group_idx':
                group_idx, argsort = torch.sort(group_idx)
            else:
                assert False, "Unknown sort by"

            parent = torch.gather(parent, 0, argsort) if parent is not None else None
            current = current if self.sort_by == 'current' else torch.gather(current, 0, argsort)
            group_idx = group_idx if self.sort_by == 'group_idx' else torch.gather(group_idx, 0, argsort)
            cost = torch.gather(cost, 0, argsort)
            if score is not None:
                score = torch.gather(score, 0, argsort)
            if remaining_capacity is not None:
                remaining_capacity = torch.gather(remaining_capacity, 0, argsort)
            if last_action is not None:
                last_action = torch.gather(last_action, 0, argsort)
            if potential_info is not None:
                potential_info = tuple(p_info.index_select(0, argsort) for p_info in potential_info)
            if self.batch_size > 1:
                assert self.sort_by == 'group_idx'
                # When sorting by group_idx should already be sorted by batch_id
                batch_ids = torch.gather(batch_ids, 0, argsort)
            # If mask_idx is None, then it means the mask is aligned with the sorting
            if self.gather_mask == 'eager':
                mask = [col_group.index_select(-1, argsort) for col_group in mask]
            elif self.gather_mask == 'lazy':
                # Lazy, will get the mask in order when needed
                # We want all entries of the same group to point to the same mask_idx so we can 'abuse' mask_idx as
                # group idx as well
                mask_idx = torch.gather(mask_idx_per_group, 0, group_idx)
            else:
                assert False, "Unknown gather mask option"

        self.mask = mask
        self.mask_idx = mask_idx
        self.group_idx = group_idx
        self.current = current
        self.current_csr = current_csr
        self.current_counts = current_counts
        self.cost = cost
        self.score = score
        self.remaining_capacity = remaining_capacity
        self.last_action = last_action if last_action is not None else current  # For TSP, last action is current
        self.potential_info = potential_info
        self.parent = parent
        self.batch_ids = batch_ids

        self.size = cost.size(0)
コード例 #33
0
ファイル: test_serialization.py プロジェクト: mojotech/arrow
def assert_equal(obj1, obj2):
    try:
        import torch
        if torch.is_tensor(obj1) and torch.is_tensor(obj2):
            assert torch.equal(obj1, obj2)
            return
    except ImportError:
        pass
    module_numpy = (type(obj1).__module__ == np.__name__ or
                    type(obj2).__module__ == np.__name__)
    if module_numpy:
        empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ()) or
                       (hasattr(obj2, "shape") and obj2.shape == ()))
        if empty_shape:
            # This is a special case because currently np.testing.assert_equal
            # fails because we do not properly handle different numerical
            # types.
            assert obj1 == obj2, ("Objects {} and {} are "
                                  "different.".format(obj1, obj2))
        else:
            np.testing.assert_equal(obj1, obj2)
    elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
        special_keys = ["_pytype_"]
        assert (set(list(obj1.__dict__.keys()) + special_keys) ==
                set(list(obj2.__dict__.keys()) + special_keys)), ("Objects {} "
                                                                  "and {} are "
                                                                  "different."
                                                                  .format(
                                                                      obj1,
                                                                      obj2))
        try:
            # Workaround to make comparison of OrderedDicts work on Python 2.7
            if obj1 == obj2:
                return
        except Exception:
            pass
        if obj1.__dict__ == {}:
            print("WARNING: Empty dict in ", obj1)
        for key in obj1.__dict__.keys():
            if key not in special_keys:
                assert_equal(obj1.__dict__[key], obj2.__dict__[key])
    elif type(obj1) is dict or type(obj2) is dict:
        assert_equal(obj1.keys(), obj2.keys())
        for key in obj1.keys():
            assert_equal(obj1[key], obj2[key])
    elif type(obj1) is list or type(obj2) is list:
        assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
                                        "different lengths."
                                        .format(obj1, obj2))
        for i in range(len(obj1)):
            assert_equal(obj1[i], obj2[i])
    elif type(obj1) is tuple or type(obj2) is tuple:
        assert len(obj1) == len(obj2), ("Objects {} and {} are tuples with "
                                        "different lengths."
                                        .format(obj1, obj2))
        for i in range(len(obj1)):
            assert_equal(obj1[i], obj2[i])
    elif (pa.lib.is_named_tuple(type(obj1)) or
          pa.lib.is_named_tuple(type(obj2))):
        assert len(obj1) == len(obj2), ("Objects {} and {} are named tuples "
                                        "with different lengths."
                                        .format(obj1, obj2))
        for i in range(len(obj1)):
            assert_equal(obj1[i], obj2[i])
    else:
        assert obj1 == obj2, ("Objects {} and {} are different."
                              .format(obj1, obj2))
コード例 #34
0
def optimizer_to_device(op, device):
    for state in op.state.values():
        for k, v in state.items():
            if torch.is_tensor(v):
                state[k] = v.to(device)
コード例 #35
0
def _is_tensor_image(img):
    return torch.is_tensor(img) and img.ndimension() == 3
コード例 #36
0
ファイル: train_DNN_torch.py プロジェクト: zhenglz/deepunion
def debug_memory():
    import collections, gc, torch
    tensors = collections.Counter((str(o.device), o.dtype, tuple(o.shape))
                                  for o in gc.get_objects()
                                  if torch.is_tensor(o))
コード例 #37
0
ファイル: plot_utils.py プロジェクト: zengwang430521/expose
    def __call__(self,
                 vertices: Tensor,
                 faces: Union[Tensor, Array],
                 focal_length: Union[Tensor, Array],
                 camera_translation: Union[Tensor, Array],
                 camera_center: Union[Tensor, Array],
                 bg_imgs: Array,
                 render_bg: bool = True,
                 deg: float = 0,
                 return_with_alpha: bool = False,
                 body_color: List[float] = None,
                 **kwargs):
        '''
            Parameters
            ----------
            vertices: BxVx3, torch.Tensor
                The torch Tensor that contains the current vertices to be drawn
            faces: Fx3, np.array
                The faces of the meshes to be drawn. Right now only support a
                batch of meshes with the same topology
            focal_length: B, torch.Tensor
                The focal length used by the perspective camera
            camera_translation: Bx3, torch.Tensor
                The translation of the camera estimated by the network
            camera_center: Bx2, torch.Tensor
                The center of the camera in pixels
            bg_imgs: np.ndarray
                Optional background images used for overlays
            render_bg: bool, optional
                Render on top of the background image
            deg: float, optional
                Degrees to rotate the mesh around itself. Used to render the
                same mesh from multiple viewpoints. Defaults to 0 degrees
            return_with_alpha: bool, optional
                Whether to return the rendered image with an alpha channel.
                Default value is False.
            body_color: list, optional
                The color used to render the image.
        '''
        if torch.is_tensor(vertices):
            vertices = vertices.detach().cpu().numpy()
        if torch.is_tensor(faces):
            faces = faces.detach().cpu().numpy()
        if torch.is_tensor(focal_length):
            focal_length = focal_length.detach().cpu().numpy()
        if torch.is_tensor(camera_translation):
            camera_translation = camera_translation.detach().cpu().numpy()
        if torch.is_tensor(camera_center):
            camera_center = camera_center.detach().cpu().numpy()
        batch_size = vertices.shape[0]

        output_imgs = []
        for bidx in range(batch_size):
            if body_color is None:
                body_color = COLORS['N']

            _, H, W = bg_imgs[bidx].shape
            # Update the renderer's viewport
            self.renderer.viewport_height = H
            self.renderer.viewport_width = W

            self.update_camera(
                focal_length=focal_length[bidx],
                translation=camera_translation[bidx],
                center=camera_center[bidx],
            )
            self.update_mesh(vertices[bidx],
                             faces,
                             body_color=body_color,
                             deg=deg)

            flags = (pyrender.RenderFlags.RGBA
                     | pyrender.RenderFlags.SKIP_CULL_FACES)
            color, depth = self.renderer.render(self.scene, flags=flags)
            color = np.transpose(color, [2, 0, 1]).astype(np.float32) / 255.0
            color = np.clip(color, 0, 1)

            if render_bg:
                if return_with_alpha:
                    valid_mask = (color[3] > 0)[np.newaxis]

                    if bg_imgs[bidx].shape[0] < 4:
                        curr_bg_img = np.concatenate([
                            bg_imgs[bidx],
                            np.ones_like(bg_imgs[bidx, [0], :, :])
                        ],
                                                     axis=0)
                    else:
                        curr_bg_img = bg_imgs[bidx]

                    output_img = (color * valid_mask +
                                  (1 - valid_mask) * curr_bg_img)
                    output_imgs.append(np.clip(output_img, 0, 1))
                else:
                    valid_mask = (color[3] > 0)[np.newaxis]

                    output_img = (color[:-1] * valid_mask +
                                  (1 - valid_mask) * bg_imgs[bidx])
                    output_imgs.append(np.clip(output_img, 0, 1))
            else:
                if return_with_alpha:
                    output_imgs.append(color)
                else:
                    output_imgs.append(color[:-1])
        return np.stack(output_imgs, axis=0)
コード例 #38
0
ファイル: epic.py プロジェクト: balbasty/nitorch
def run_epic(echoes,
             reverse_echoes=None,
             voxshift=None,
             extrapolate=True,
             lam=1,
             sigma=1,
             max_iter=(10, 32),
             tol=1e-5,
             verbose=False):
    """Run EPIC on pre-loaded tensors.

    Parameters
    ----------
    echoes : (N, *spatial) tensor
        Echoes acquired with bipolar readout, Readout direction should be last.
    reverse_echoes : (N, *spatial)
        Echoes acquired with reverse bipolar readout. Else: synthesized.
    voxshift : (*spatial) tensor
        Voxel shift map used to deform towards even (0, 2, ...) echoes.
        Its inverse is used to deform towards odd (1, 3, ...) echoes.
    extrapolate : bool
        Extrapolate first/last echo when reverse_echoes is None.
        Otherwise, only use interpolated echoes.
    lam : [list of] float
        Regularization factor (per echo)
    sigma : float
        Noise standard deviation
    max_iter : [pair of] int
        Maximum number of RLS and CG iterations
    tol : float
        Tolerance for early stopping
    verbose : int,
        Verbosity level

    Returns
    -------
    echoes : (N, *spatial) tensor
        Undistorted + denoised echoes

    """
    if reverse_echoes is False:
        return run_epic_noreverse(echoes, voxshift, lam, sigma, max_iter, tol,
                                  verbose)

    ne = len(echoes)  # number of echoes
    nv = echoes.shape[1:].numel()  # number of voxels
    nd = echoes.dim() - 1  # number of dimensions

    # synthesize echoes
    synth = not torch.is_tensor(reverse_echoes)
    if synth:
        neg = synthesize_neg(echoes[0::2])
        pos = synthesize_pos(echoes[1::2])
        reverse_echoes = torch.stack([x for y in zip(pos, neg) for x in y])
        del pos, neg
    else:
        extrapolate = True

    # initialize denoised echoes
    fit = (echoes + reverse_echoes).div_(2)
    if not extrapolate:
        fit[0] = echoes[0]
        fit[-1] = echoes[-1]
    fwd_fit = torch.zeros_like(fit)
    bwd_fit = torch.zeros_like(fit)

    # prepare voxel shift maps
    if voxshift is not None:
        ivoxshift = add_identity_1d(-voxshift)
        voxshift = add_identity_1d(voxshift)
    else:
        ivoxshift = None

    # prepare parameters
    max_iter, sub_iter = py.make_list(max_iter, 2)
    tol, sub_tol = py.make_list(tol, 2)
    lam = [l / ne for l in py.make_list(lam, ne)]
    isigma2 = 1 / (sigma * sigma)

    # compute hessian once and for all
    if voxshift is not None:
        one = torch.ones_like(voxshift)[None]
        if extrapolate:
            h = push1d(pull1d(one, voxshift), voxshift)
            h += push1d(pull1d(one, ivoxshift), ivoxshift)
            weight_ = lambda x: x.mul_(0.5)
            halfweight_ = lambda x: x.mul_(math.sqrt(0.5))
        else:
            h = torch.zeros_like(fit)
            h[:-1] += push1d(pull1d(one, voxshift), voxshift)
            h[1:] += push1d(pull1d(one, ivoxshift), ivoxshift)
            weight_ = lambda x: x[1:-1].mul_(0.5)
            halfweight_ = lambda x: x[1:-1].mul_(math.sqrt(0.5))
        del one
        weight_(h)
    else:
        h = fit.new_ones([ne] + [1] * nd)
        if extrapolate:
            h *= 2
            weight_ = lambda x: x.mul_(0.5)
            halfweight_ = lambda x: x.mul_(math.sqrt(0.5))
        else:
            h[1:-1] *= 2
            weight_ = lambda x: x[1:-1].mul_(0.5)
            halfweight_ = lambda x: x[1:-1].mul_(math.sqrt(0.5))
    weight_(h)
    h *= isigma2

    loss = float('inf')
    for n_iter in range(max_iter):

        # update weights
        w, jtv = membrane_weights(fit, factor=lam, return_sum=True)

        # gradient of likelihood (forward)
        pull_forward(fit, voxshift, ivoxshift, out=fwd_fit)
        fwd_fit.sub_(echoes)
        halfweight_(fwd_fit)
        ll = ssq(fwd_fit)
        halfweight_(fwd_fit)
        push_forward(fwd_fit, voxshift, ivoxshift, out=fwd_fit)

        # gradient of likelihood (reversed)
        pull_backward(fit, voxshift, ivoxshift, extrapolate, out=bwd_fit)
        if extrapolate:
            bwd_fit.sub_(reverse_echoes)
        else:
            bwd_fit[1:-1].sub_(reverse_echoes[1:-1])
        halfweight_(bwd_fit)
        ll += ssq(bwd_fit)
        halfweight_(bwd_fit)
        push_backward(bwd_fit, voxshift, ivoxshift, extrapolate, out=bwd_fit)

        g = fwd_fit.add_(bwd_fit).mul_(isigma2)
        ll *= 0.5 * isigma2

        # gradient of prior
        g += regulariser(fit, membrane=1, factor=lam, weights=w)

        # solve
        fit -= solve_field(h,
                           g,
                           w,
                           membrane=1,
                           factor=lam,
                           max_iter=sub_iter,
                           tolerance=sub_tol)

        # track objective
        ll, jtv = ll.item() / (ne * nv), jtv.item() / (ne * nv)
        loss, loss_prev = ll + jtv, loss
        if n_iter:
            gain = (loss_prev - loss) / max((loss_max - loss), 1e-8)
        else:
            gain = float('inf')
            loss_max = loss
        if verbose:
            end = '\n' if verbose > 1 else '\r'
            print(
                f'{n_iter+1:02d} | {ll:12.6g} + {jtv:12.6g} = {loss:12.6g} '
                f'| gain = {gain:12.6g}',
                end=end)
        if gain < tol:
            break

    if verbose == 1:
        print('')

    return fit
コード例 #39
0
ファイル: epic.py プロジェクト: balbasty/nitorch
def epic(echoes,
         reverse_echoes=True,
         fieldmap=None,
         extrapolate=False,
         bandwidth=1,
         polarity=1,
         readout=-1,
         slicewise=False,
         lam=1e2,
         max_iter=(10, 32),
         tol=1e-5,
         verbose=False,
         device=None):
    """Edge-Preserving B0 inhomogeneity correction (EPIC)

    References
    ----------
    .. "A new distortion correction approach for multi-contrast MRI",
        Divya Varadarajan, et al. ISMRM (2020)

    Parameters
    ----------
    echoes : list[file_like] or (N, *spatial) tensor,
        Echoes acquired with a bipolar readout.
    reverse_echoes : bool or list[file_like] or (N, *spatial) tensor
        Echoes acquired with reverse bipolar readout. If True: synthesized.
    fieldmap : file_like or (*spatial) tensor, Fieldmap or voxel shift map
    extrapolate : bool, Extrapolate first/last echo when reverse_echoes is None
    bandwidth : float, Bandwidth of the input echoes, in Hz/pixel
    polarity : +1 or -1, Readout polarity of the first echo
    readout : int, Index of the readout dimension
    slicewise : bool or int, Run the algorithm slicewise. If int, chunk size.
    lam : [list of] float, Regularization factor (per echo)
    max_iter : [pair of] int, Maximum number of RLS and CG iterations
    tol : float, Tolerance for early stopping
    verbose : int, Verbosity level
    device : {'cpu', 'cuda'} or torch.device

    Returns
    -------
    echoes : (N, *spatial) tensor
        Undistorted + denoised echoes

    """
    device = torch.device('cuda' if device == 'gpu' else device)
    backend = dict(dtype=torch.float32, device=device)

    echoes = map_files(echoes)
    reverse_echoes = map_files(reverse_echoes)
    fieldmap = map_files(fieldmap, nobatch=True)
    ndim = len(echoes.shape) - 1

    # estimate noise variance + scale regularization
    noise, tissue = 1, []
    for echo in echoes:
        noise1, tissue1 = estimate_noise(load(echo, **backend))
        noise *= noise1['sd']
        tissue.append(tissue1['mean'])
    noise = noise**(1 / len(echoes))
    lam = py.make_list(lam, len(echoes))
    lam = [l / mu for l, mu in zip(lam, tissue)]

    # ensure readout dimension is last
    readout = readout - ndim if readout > 0 else readout
    echoes = movedim(echoes, readout, -1)
    fieldmap = movedim(fieldmap, readout, -1)
    reverse_echoes = movedim(reverse_echoes, readout, -1)

    if slicewise:
        # --- loop over slices -----------------------------------------

        # ensure slice direction is second to last
        dz = -2 if echoes.shape[-2] < echoes.shape[-3] else -3
        echoes = movedim(echoes, dz, -2)
        fieldmap = movedim(fieldmap, dz, -2)
        reverse_echoes = movedim(reverse_echoes, dz, -2)
        nz = echoes.shape[-2]

        # allocate output
        if torch.is_tensor(echoes):
            out_backend = dict(dtype=echoes.dtype, device=echoes.device)
        else:
            out_backend = dict(dtype=torch.float32, device='cpu')
        fit = torch.zeros(echoes.shape, **out_backend)

        # prepare runner
        slicewise = int(slicewise)
        run_slicewise = RunSlicewise(slicewise, echoes, reverse_echoes,
                                     fieldmap, bandwidth, polarity, lam, noise,
                                     extrapolate, max_iter, tol, backend,
                                     out_backend, verbose)

        # parallel process
        with multiprocessing.Pool(torch.get_num_threads()) as pool:
            slices = pool.imap_unordered(run_slicewise,
                                         range(0, nz, slicewise))
            for chunk in slices:
                chunk, fitchunk = chunk
                fit[..., chunk, :] = fitchunk

        # unpermute slice
        fit = movedim(fit, -2, dz)

    else:
        # --- process full volume --------------------------------------

        # load data
        echoes = load(echoes, **backend)
        fieldmap = load(fieldmap, **backend)
        reverse_echoes = load(reverse_echoes, **backend)

        # rescale fieldmap
        if fieldmap is not None:
            fieldmap = fieldmap / bandwidth
            if polarity < 0:
                fieldmap = -fieldmap

        # run EPIC
        fit = run_epic(echoes,
                       reverse_echoes,
                       fieldmap,
                       extrapolate=extrapolate,
                       lam=lam,
                       sigma=noise,
                       max_iter=max_iter,
                       tol=tol,
                       verbose=verbose)

    # unpermute readout
    fit = movedim(fit, -1, readout)

    return fit
コード例 #40
0
ファイル: train.py プロジェクト: topshik/Sentence-VAE
def main(args):
    ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime())

    splits = ['train', 'valid'] + (['test'] if args.test else [])

    datasets = OrderedDict()
    for split in splits:
        datasets[split] = PTB(data_dir=args.data_dir,
                              split=split,
                              create_data=args.create_data,
                              max_sequence_length=args.max_sequence_length,
                              min_occ=args.min_occ)

    model = SentenceVAE(vocab_size=datasets['train'].vocab_size,
                        sos_idx=datasets['train'].sos_idx,
                        eos_idx=datasets['train'].eos_idx,
                        pad_idx=datasets['train'].pad_idx,
                        unk_idx=datasets['train'].unk_idx,
                        max_sequence_length=args.max_sequence_length,
                        embedding_size=args.embedding_size,
                        rnn_type=args.rnn_type,
                        hidden_size=args.hidden_size,
                        word_dropout=args.word_dropout,
                        embedding_dropout=args.embedding_dropout,
                        latent_size=args.latent_size,
                        num_layers=args.num_layers,
                        bidirectional=args.bidirectional)

    if torch.cuda.is_available():
        model = model.cuda()

    print(model)

    if args.tensorboard_logging:
        writer = SummaryWriter(
            os.path.join(args.logdir, expierment_name(args, ts)))
        writer.add_text("model", str(model))
        writer.add_text("args", str(args))
        writer.add_text("ts", ts)

    save_model_path = os.path.join(args.save_model_path, ts)
    os.makedirs(save_model_path)

    def kl_anneal_function(anneal_function, step, k, x0):
        if anneal_function == 'logistic':
            return float(1 / (1 + np.exp(-k * (step - x0))))
        elif anneal_function == 'linear':
            return min(1, step / x0)

    NLL = torch.nn.NLLLoss(ignore_index=datasets['train'].pad_idx,
                           reduction='sum')

    def loss_fn(logp, target, length, mean, logv, anneal_function, step, k,
                x0):

        # cut-off unnecessary padding from target, and flatten
        target = target[:, :torch.max(length).item()].contiguous().view(-1)
        logp = logp.view(-1, logp.size(2))

        # Negative Log Likelihood
        NLL_loss = NLL(logp, target)

        # KL Divergence
        KL_loss = -0.5 * torch.sum(1 + logv - mean.pow(2) - logv.exp())
        KL_weight = kl_anneal_function(anneal_function, step, k, x0)

        return NLL_loss, KL_loss, KL_weight

    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)

    tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.Tensor
    step = 0
    for epoch in range(args.epochs):

        for split in splits:

            data_loader = DataLoader(dataset=datasets[split],
                                     batch_size=args.batch_size,
                                     shuffle=split == 'train',
                                     num_workers=cpu_count(),
                                     pin_memory=torch.cuda.is_available())

            tracker = defaultdict(tensor)

            # Enable/Disable Dropout
            if split == 'train':
                model.train()
            else:
                model.eval()

            for iteration, batch in enumerate(data_loader):

                batch_size = batch['input'].size(0)

                for k, v in batch.items():
                    if torch.is_tensor(v):
                        batch[k] = to_var(v)

                # Forward pass
                logp, mean, logv, z = model(batch['input'], batch['length'])

                # loss calculation
                NLL_loss, KL_loss, KL_weight = loss_fn(logp, batch['target'],
                                                       batch['length'], mean,
                                                       logv,
                                                       args.anneal_function,
                                                       step, args.k, args.x0)

                loss = (NLL_loss + KL_weight * KL_loss) / batch_size

                # backward + optimization
                if split == 'train':
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()
                    step += 1

                # bookkeepeing
                tracker['ELBO'] = torch.cat(
                    (tracker['ELBO'], loss.data.view(1, -1)), dim=0)

                if args.tensorboard_logging:
                    writer.add_scalar("%s/ELBO" % split.upper(), loss.item(),
                                      epoch * len(data_loader) + iteration)
                    writer.add_scalar("%s/NLL Loss" % split.upper(),
                                      NLL_loss.item() / batch_size,
                                      epoch * len(data_loader) + iteration)
                    writer.add_scalar("%s/KL Loss" % split.upper(),
                                      KL_loss.item() / batch_size,
                                      epoch * len(data_loader) + iteration)
                    writer.add_scalar("%s/KL Weight" % split.upper(),
                                      KL_weight,
                                      epoch * len(data_loader) + iteration)

                if iteration % args.print_every == 0 or iteration + 1 == len(
                        data_loader):
                    print(
                        "%s Batch %04d/%i, Loss %9.4f, NLL-Loss %9.4f, KL-Loss %9.4f, KL-Weight %6.3f"
                        % (split.upper(), iteration, len(data_loader) - 1,
                           loss.item(), NLL_loss.item() / batch_size,
                           KL_loss.item() / batch_size, KL_weight))

                if split == 'valid':
                    if 'target_sents' not in tracker:
                        tracker['target_sents'] = list()
                    tracker['target_sents'] += idx2word(
                        batch['target'].data,
                        i2w=datasets['train'].get_i2w(),
                        pad_idx=datasets['train'].pad_idx)
                    tracker['z'] = torch.cat((tracker['z'], z.data), dim=0)

            print("%s Epoch %02d/%i, Mean ELBO %9.4f" %
                  (split.upper(), epoch, args.epochs, tracker['ELBO'].mean()))

            if args.tensorboard_logging:
                writer.add_scalar("%s-Epoch/ELBO" % split.upper(),
                                  torch.mean(tracker['ELBO']), epoch)

            # save a dump of all sentences and the encoded latent space
            if split == 'valid':
                dump = {
                    'target_sents': tracker['target_sents'],
                    'z': tracker['z'].tolist()
                }
                if not os.path.exists(os.path.join('dumps', ts)):
                    os.makedirs('dumps/' + ts)
                with open(
                        os.path.join('dumps/' + ts +
                                     '/valid_E%i.json' % epoch),
                        'w') as dump_file:
                    json.dump(dump, dump_file)

            # save checkpoint
            if split == 'train':
                checkpoint_path = os.path.join(save_model_path,
                                               "E%i.pytorch" % epoch)
                torch.save(model.state_dict(), checkpoint_path)
                print("Model saved at %s" % checkpoint_path)
コード例 #41
0
        load_model_path = save_path + '/model_checkpoint_' + str(args.continue_from) + '.pth'
        package = torch.load(load_model_path, map_location=lambda storage, loc: storage)
        model = DeepSpeech.load_model_package(package)
        labels = DeepSpeech.get_labels(model)
        parameters = model.parameters()
        if args.optimizer == 'SGD':
            optimizer = torch.optim.SGD(parameters, lr=args.lr, momentum=args.momentum, nesterov=True)
        elif args.optimizer == 'Adam':
            optimizer = torch.optim.Adam(parameters, lr=args.lr)
        optimizer.load_state_dict(package['optim_dict'])
        # Temporary fix for pytorch #2830 & #1442 while pull request #3658 in not incorporated in a release
        # TODO : remove when a new release of pytorch include pull request #3658
        if args.cuda:
            for state in optimizer.state.values():
                for k, v in state.items():
                    if torch.is_tensor(v):
                        state[k] = v.cuda()
        start_epoch = int(package.get('epoch', 1)) - 1  # Index start at 0 for training
        start_iter = package.get('iteration', None)
        if start_iter is None:
            start_epoch += 1  # We saved model after epoch finished, start at the next epoch.
            start_iter = 0
        else:
            start_iter += 1
        avg_loss = int(package.get('avg_loss', 0))
        loss_results, per_results = package['loss_results'], package['per_results']
        #best_per = package['best_per']
    else:
        raise ValueError('shoud give integer to continue_from')

    if args.cuda:
コード例 #42
0
def test_log_works_in_train_callback(tmpdir):
    """
    Tests that log can be called within callback
    """

    os.environ['PL_DEV_DEBUG'] = '1'

    class TestCallback(callbacks.Callback):

        # helpers
        count = 1
        choices = [False, True]
        # used to compute expected values
        callback_funcs_called = collections.defaultdict(list)
        funcs_called_count = collections.defaultdict(int)
        funcs_attr = {}

        def make_logging(self, pl_module: pl.LightningModule, func_name, func_idx,
                         on_steps=[], on_epochs=[], prob_bars=[]):
            self.funcs_called_count[func_name] += 1
            for idx, (on_step, on_epoch, prog_bar) in enumerate(list(itertools.product(*[on_steps, on_epochs, prob_bars]))):
                # run logging
                custom_func_name = f"{func_idx}_{idx}_{func_name}"
                pl_module.log(custom_func_name, self.count * func_idx, on_step=on_step,
                              on_epoch=on_epoch, prog_bar=prog_bar)

                # catch information for verification

                # on on_train_start is outside the main loop. Won't be called
                if func_name == "on_train_start":
                    self.callback_funcs_called[func_name].append([self.count * func_idx])

                # Saved only values from second epoch, so we can compute its mean or latest.
                if pl_module.trainer.current_epoch == 1:
                    self.callback_funcs_called[func_name].append([self.count * func_idx])

                forked = on_step and on_epoch

                self.funcs_attr[custom_func_name] = {
                    "on_step": on_step,
                    "on_epoch": on_epoch,
                    "prog_bar": prog_bar,
                    "forked": forked,
                    "func_name": func_name}

                if on_step and on_epoch:
                    self.funcs_attr[f"{custom_func_name}_step"] = {
                        "on_step": True,
                        "on_epoch": False,
                        "prog_bar": prog_bar,
                        "forked": False,
                        "func_name": func_name}

                    self.funcs_attr[f"{custom_func_name}_epoch"] = {
                        "on_step": False,
                        "on_epoch": True,
                        "prog_bar": prog_bar,
                        "forked": False,
                        "func_name": func_name}

        def on_train_start(self, trainer, pl_module):
            self.make_logging(pl_module, 'on_train_start', 1, on_steps=self.choices,
                              on_epochs=self.choices, prob_bars=self.choices)

        def on_epoch_start(self, trainer, pl_module):
            self.make_logging(pl_module, 'on_epoch_start', 2, on_steps=self.choices,
                              on_epochs=self.choices, prob_bars=self.choices)

        def on_train_epoch_start(self, trainer, pl_module):
            self.make_logging(pl_module, 'on_train_epoch_start', 3, on_steps=self.choices,
                              on_epochs=self.choices, prob_bars=self.choices)

        def on_batch_start(self, trainer, pl_module):
            self.make_logging(pl_module, 'on_batch_start', 4, on_steps=self.choices,
                              on_epochs=self.choices, prob_bars=self.choices)

        def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
            self.make_logging(pl_module, 'on_train_batch_start', 5, on_steps=self.choices,
                              on_epochs=self.choices, prob_bars=self.choices)

        def on_batch_end(self, trainer, pl_module):
            self.make_logging(pl_module, 'on_batch_end', 6, on_steps=self.choices,
                              on_epochs=self.choices, prob_bars=self.choices)

        def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
            self.make_logging(pl_module, 'on_train_batch_end', 7, on_steps=self.choices,
                              on_epochs=self.choices, prob_bars=self.choices)
            # used to make sure aggregation works fine.
            # we should obtain func[value * c for c in range(1, max_epochs * limit_train_batches)])
            # with func = np.mean if on_epoch else func = np.max
            self.count += 1

        def on_epoch_end(self, trainer, pl_module):
            self.make_logging(pl_module, 'on_epoch_end', 8, on_steps=[False],
                              on_epochs=self.choices, prob_bars=self.choices)

        def on_train_epoch_end(self, trainer, pl_module, outputs):
            self.make_logging(pl_module, 'on_train_epoch_end', 9, on_steps=[False],
                              on_epochs=self.choices, prob_bars=self.choices)

    class TestModel(BoringModel):

        manual_loss = []

        def training_step(self, batch, batch_idx):
            output = self.layer(batch)
            loss = self.loss(batch, output)
            self.manual_loss.append(loss)
            self.log('train_loss', loss)
            return {"loss": loss}

    max_epochs = 2
    limit_train_batches = 2
    model = TestModel()
    test_callback = TestCallback()

    trainer = Trainer(
        default_root_dir=tmpdir,
        limit_train_batches=limit_train_batches,
        limit_val_batches=0,
        limit_test_batches=0,
        val_check_interval=0.,
        num_sanity_val_steps=0,
        max_epochs=max_epochs,
        callbacks=[test_callback]
    )
    trainer.fit(model)

    assert test_callback.funcs_called_count["on_train_start"] == 1
    assert test_callback.funcs_called_count["on_epoch_start"] == 2
    assert test_callback.funcs_called_count["on_train_epoch_start"] == 2
    assert test_callback.funcs_called_count["on_batch_start"] == 4
    assert test_callback.funcs_called_count["on_train_batch_start"] == 4
    assert test_callback.funcs_called_count["on_batch_end"] == 4
    assert test_callback.funcs_called_count["on_train_batch_end"] == 4
    assert test_callback.funcs_called_count["on_epoch_end"] == 2
    assert test_callback.funcs_called_count["on_train_epoch_end"] == 2

    # Make sure the func_name exists within callback_metrics. If not, we missed some
    callback_metrics_keys = [*trainer.callback_metrics.keys()]
    for func_name in test_callback.callback_funcs_called.keys():
        is_in = False
        for callback_metrics_key in callback_metrics_keys:
            if func_name in callback_metrics_key:
                is_in = True
        assert is_in, (func_name, callback_metrics_keys)

    # function used to describe expected return logic
    def get_expected_output(func_attr, original_values):
        if func_attr["on_epoch"] and not func_attr["on_step"]:
            # Apply mean on values
            expected_output = np.mean(original_values)
        else:
            # Keep the latest value
            expected_output = np.max(original_values)
        return expected_output

    # Make sure the func_name output equals the average from all logged values when on_epoch true
    # pop extra keys
    trainer.callback_metrics.pop("debug_epoch")
    assert trainer.logged_metrics["train_loss"] == model.manual_loss[-1]
    assert trainer.callback_metrics["train_loss"] == model.manual_loss[-1]
    trainer.callback_metrics.pop("train_loss")

    for func_name, output_value in trainer.callback_metrics.items():
        if torch.is_tensor(output_value):
            output_value = output_value.item()
        # get creation attr
        func_attr = test_callback.funcs_attr[func_name]

        # retrived orginal logged values
        original_values = test_callback.callback_funcs_called[func_attr["func_name"]]

        # compute expected output and compare to actual one
        expected_output = get_expected_output(func_attr, original_values)
        assert float(output_value) == float(expected_output)

    for func_name, func_attr in test_callback.funcs_attr.items():
        if func_attr["prog_bar"] and (func_attr["on_step"] or func_attr["on_epoch"]) and not func_attr["forked"]:
            assert func_name in trainer.logger_connector.progress_bar_metrics
        else:
            assert func_name not in trainer.logger_connector.progress_bar_metrics
コード例 #43
0
def fit_se_gn(cov, sqdist, max_iter=10000, tol=1e-8, verbose=False):
    """Fit the amplitude and length-scale of a squared-exponential kernel

    This function minimises the Frobenius norm of the difference
    between the experimental and fitted covariance matrices
    (i.e., it is a least-squares between the elements of the matrices).

    It performs a non-linear least-squares fit that uses the robust
    Hessian from Balbastre et al. (2021).

    Parameters
    ----------
    cov : (*batch, vox, vox)
        Log of the empirical covariance matrix
    sqdist : tuple[int] or (vox, vox) tensor
        If a tensor -> it is the pre-computed squared distance map
        If a tuple -> it is the shape and we build the distance map
    max_iter : int, default=100
    tol : float, default=1e-5
    verbose : {0, 1, 2}, default=0

    Returns
    -------
    sig : (*batch,) tensor
        Amplitude of the kernel
    lam : (*batch,) tensor
        Length-scale of the kernel

    References
    ----------
    ..[1] "Model-based multi-parameter mapping"
          Yael Balbastre, Mikael Brudfors, Michaela Azzarito,
          Christian Lambert, Martina F. Callaghan and John Ashburner
          Preprint, 2021

    """
    cov = torch.as_tensor(cov).clone()
    backend = utils.backend(cov)
    if not torch.is_tensor(sqdist):
        shape = sqdist
        sqdist = dist_map(shape, **backend)
    else:
        sqdist = sqdist.to(**backend).clone()

    sqdist = sqdist.flatten()

    # exponential fit
    a = cov.diagonal(0, -1, -2).abs().mean(-1).log()
    cov = cov.reshape([-1, py.prod(sqdist.shape)])
    b = torch.ones_like(a).div_(-2)
    ll0 = None
    ll1 = None
    for it in range(max_iter):

        # compute objective
        e = sqdist.mul(b[:, None]).add_(a[:, None]).exp_()
        ll = (e - cov).square().sum() * 0.5
        if ll0 is None:
            ll0 = ll
            gain = constants.inf
        else:
            gain = (ll1 - ll) / ll0
        ll1 = ll
        if verbose:
            end = '\n' if verbose > 1 else '\r'
            print(
                f'{it+1:3d} | ll = {ll:12.6g} | gain = {gain:12.6g} '
                f'| a = {a.mean():12.6g} | b = {b.mean():12.6g}',
                end=end)
            if it == 0:
                print('')
        if abs(gain) < tol:
            break

        # compute gradient
        r = (e - cov).abs_().mul_(sqdist + 1)
        ed = e * sqdist
        ha = (e.square() + e * r).sum(-1)
        hb = (ed.square() + ed * r).sum(-1)
        hab = (e * ed).sum(-1)
        h = torch.stack([ha, hb, hab], -1)
        del ha, hb, hab, ed, r
        ga = e * (e - cov)
        gb = (sqdist * ga).sum(-1)
        ga = ga.sum(-1)
        g = torch.stack([ga, gb], -1)
        del ga, gb

        # udpate
        h[..., :2] += 1e-3
        delta = linalg.sym_solve(h, g)
        del g, h
        a -= delta[..., 0]
        b -= delta[..., 1]
        del delta
    if verbose == 1:
        print('')

    lam = b.reciprocal_().mul_(-0.5).sqrt_()
    sig = a.div_(2).exp_()
    return sig, lam
コード例 #44
0
 def type_as(a, b):
     if torch.is_tensor(a) and torch.is_tensor(b):
         return a.to(b)
     else:
         return a
コード例 #45
0
def pipeline(  # noqa: C901
    *,
    # 1. Dataset
    dataset: Union[None, str, Dataset, Type[Dataset]] = None,
    dataset_kwargs: Optional[Mapping[str, Any]] = None,
    training: Hint[CoreTriplesFactory] = None,
    testing: Hint[CoreTriplesFactory] = None,
    validation: Hint[CoreTriplesFactory] = None,
    evaluation_entity_whitelist: Optional[Collection[str]] = None,
    evaluation_relation_whitelist: Optional[Collection[str]] = None,
    # 2. Model
    model: Union[None, str, Model, Type[Model]] = None,
    model_kwargs: Optional[Mapping[str, Any]] = None,
    interaction: Union[None, str, Interaction, Type[Interaction]] = None,
    interaction_kwargs: Optional[Mapping[str, Any]] = None,
    dimensions: Union[None, int, Mapping[str, int]] = None,
    # 3. Loss
    loss: HintType[Loss] = None,
    loss_kwargs: Optional[Mapping[str, Any]] = None,
    # 4. Regularizer
    regularizer: HintType[Regularizer] = None,
    regularizer_kwargs: Optional[Mapping[str, Any]] = None,
    # 5. Optimizer
    optimizer: HintType[Optimizer] = None,
    optimizer_kwargs: Optional[Mapping[str, Any]] = None,
    clear_optimizer: bool = True,
    # 6. Training Loop
    training_loop: HintType[TrainingLoop] = None,
    training_loop_kwargs: Optional[Mapping[str, Any]] = None,
    negative_sampler: HintType[NegativeSampler] = None,
    negative_sampler_kwargs: Optional[Mapping[str, Any]] = None,
    # 7. Training (ronaldo style)
    training_kwargs: Optional[Mapping[str, Any]] = None,
    stopper: HintType[Stopper] = None,
    stopper_kwargs: Optional[Mapping[str, Any]] = None,
    # 8. Evaluation
    evaluator: HintType[Evaluator] = None,
    evaluator_kwargs: Optional[Mapping[str, Any]] = None,
    evaluation_kwargs: Optional[Mapping[str, Any]] = None,
    # 9. Tracking
    result_tracker: HintType[ResultTracker] = None,
    result_tracker_kwargs: Optional[Mapping[str, Any]] = None,
    # Misc
    metadata: Optional[Dict[str, Any]] = None,
    device: Hint[torch.device] = None,
    random_seed: Optional[int] = None,
    use_testing_data: bool = True,
    evaluation_fallback: bool = False,
    filter_validation_when_testing: bool = True,
) -> PipelineResult:
    """Train and evaluate a model.

    :param dataset:
        The name of the dataset (a key from :data:`pykeen.datasets.datasets`) or the :class:`pykeen.datasets.Dataset`
        instance. Alternatively, the training triples factory (``training``), testing triples factory (``testing``),
        and validation triples factory (``validation``; optional) can be specified.
    :param dataset_kwargs:
        The keyword arguments passed to the dataset upon instantiation
    :param training:
        A triples factory with training instances or path to the training file if a a dataset was not specified
    :param testing:
        A triples factory with training instances or path to the test file if a dataset was not specified
    :param validation:
        A triples factory with validation instances or path to the validation file if a dataset was not specified
    :param evaluation_entity_whitelist:
        Optional restriction of evaluation to triples containing *only* these entities. Useful if the downstream task
        is only interested in certain entities, but the relational patterns with other entities improve the entity
        embedding quality.
    :param evaluation_relation_whitelist:
        Optional restriction of evaluation to triples containing *only* these relations. Useful if the downstream task
        is only interested in certain relation, but the relational patterns with other relations improve the entity
        embedding quality.

    :param model:
        The name of the model, subclass of :class:`pykeen.models.Model`, or an instance of
        :class:`pykeen.models.Model`. Can be given as None if the ``interaction`` keyword is used.
    :param model_kwargs:
        Keyword arguments to pass to the model class on instantiation
    :param interaction: The name of the interaction class, a subclass of :class:`pykeen.nn.modules.Interaction`,
        or an instance of :class:`pykeen.nn.modules.Interaction`. Can not be given when there is also a model.
    :param interaction_kwargs:
        Keyword arguments to pass during instantiation of the interaction class. Only use with ``interaction``.
    :param dimensions:
        Dimensions to assign to the embeddings of the interaction. Only use with ``interaction``.

    :param loss:
        The name of the loss or the loss class.
    :param loss_kwargs:
        Keyword arguments to pass to the loss on instantiation

    :param regularizer:
        The name of the regularizer or the regularizer class.
    :param regularizer_kwargs:
        Keyword arguments to pass to the regularizer on instantiation

    :param optimizer:
        The name of the optimizer or the optimizer class. Defaults to :class:`torch.optim.Adagrad`.
    :param optimizer_kwargs:
        Keyword arguments to pass to the optimizer on instantiation
    :param clear_optimizer:
        Whether to delete the optimizer instance after training. As the optimizer might have additional memory
        consumption due to e.g. moments in Adam, this is the default option. If you want to continue training, you
        should set it to False, as the optimizer's internal parameter will get lost otherwise.

    :param training_loop:
        The name of the training loop's training approach (``'slcwa'`` or ``'lcwa'``) or the training loop class.
        Defaults to :class:`pykeen.training.SLCWATrainingLoop`.
    :param training_loop_kwargs:
        Keyword arguments to pass to the training loop on instantiation
    :param negative_sampler:
        The name of the negative sampler (``'basic'`` or ``'bernoulli'``) or the negative sampler class.
        Only allowed when training with sLCWA.
        Defaults to :class:`pykeen.sampling.BasicNegativeSampler`.
    :param negative_sampler_kwargs:
        Keyword arguments to pass to the negative sampler class on instantiation

    :param training_kwargs:
        Keyword arguments to pass to the training loop's train function on call
    :param stopper:
        What kind of stopping to use. Default to no stopping, can be set to 'early'.
    :param stopper_kwargs:
        Keyword arguments to pass to the stopper upon instantiation.

    :param evaluator:
        The name of the evaluator or an evaluator class. Defaults to :class:`pykeen.evaluation.RankBasedEvaluator`.
    :param evaluator_kwargs:
        Keyword arguments to pass to the evaluator on instantiation
    :param evaluation_kwargs:
        Keyword arguments to pass to the evaluator's evaluate function on call

    :param result_tracker:
        The ResultsTracker class or name
    :param result_tracker_kwargs:
        The keyword arguments passed to the results tracker on instantiation

    :param metadata:
        A JSON dictionary to store with the experiment
    :param use_testing_data:
        If true, use the testing triples. Otherwise, use the validation triples. Defaults to true - use testing triples.
    :param device: The device or device name to run on. If none is given, the device will be looked up with
        :func:`pykeen.utils.resolve_device`.
    :param random_seed: The random seed to use. If none is specified, one will be assigned before any code
        is run for reproducibility purposes. In the returned :class:`PipelineResult` instance, it can be accessed
        through :data:`PipelineResult.random_seed`.
    :param evaluation_fallback:
        If true, in cases where the evaluation failed using the GPU it will fall back to using a smaller batch size or
        in the last instance evaluate on the CPU, if even the smallest possible batch size is too big for the GPU.
    :param filter_validation_when_testing:
        If true, during the evaluating of the test dataset, validation triples are added to the set of known positive
        triples, which are filtered out when performing filtered evaluation following the approach described by
        [bordes2013]_. This should be explicitly set to false only in the scenario that you are training a single
        model using the pipeline and evaluating with the testing set, but never using the validation set for
        optimization at all. This is a very atypical scenario, so it is left as true by default to promote
        comparability to previous publications.

    :returns: A pipeline result package.

    :raises ValueError:
        If a negative sampler is specified with LCWA
    :raises TypeError:
        If an invalid argument type is given for ``evaluation_kwargs["additional_filter_triples"]``
    """
    if training_kwargs is None:
        training_kwargs = {}
    training_kwargs = dict(training_kwargs)

    # To allow resuming training from a checkpoint when using a pipeline, the pipeline needs to obtain the
    # used random_seed to ensure reproducible results
    checkpoint_name = training_kwargs.get('checkpoint_name')
    if checkpoint_name is not None:
        checkpoint_directory = pathlib.Path(
            training_kwargs.get('checkpoint_directory', PYKEEN_CHECKPOINTS))
        checkpoint_directory.mkdir(parents=True, exist_ok=True)
        checkpoint_path = checkpoint_directory / checkpoint_name
        if checkpoint_path.is_file():
            checkpoint_dict = torch.load(checkpoint_path)
            _random_seed = checkpoint_dict['random_seed']
            logger.info('loaded random seed %s from checkpoint.', _random_seed)
            # We have to set clear optimizer to False since training should be continued
            clear_optimizer = False
        else:
            logger.info(
                f"=> no training loop checkpoint file found at '{checkpoint_path}'. Creating a new file."
            )
            if random_seed is None:
                _random_seed = random_non_negative_int()
                logger.warning(
                    f'No random seed is specified. Setting to {_random_seed}.')
            else:
                _random_seed = random_seed
    elif random_seed is None:
        _random_seed = random_non_negative_int()
        logger.warning(
            f'No random seed is specified. Setting to {_random_seed}.')
    else:
        _random_seed = random_seed  # random seed given successfully
    set_random_seed(_random_seed)

    _result_tracker = tracker_resolver.make(result_tracker,
                                            result_tracker_kwargs)

    if not metadata:
        metadata = {}
    title = metadata.get('title')

    # Start tracking
    _result_tracker.start_run(run_name=title)

    _device: torch.device = resolve_device(device)

    dataset_instance: Dataset = get_dataset(
        dataset=dataset,
        dataset_kwargs=dataset_kwargs,
        training=training,
        testing=testing,
        validation=validation,
    )
    if dataset is not None:
        _result_tracker.log_params(
            dict(dataset=dataset_instance.get_normalized_name()))
    else:  # means that dataset was defined by triples factories
        _result_tracker.log_params(
            dict(
                dataset=USER_DEFINED_CODE,
                training=training
                if isinstance(training, str) else USER_DEFINED_CODE,
                testing=testing
                if isinstance(training, str) else USER_DEFINED_CODE,
                validation=validation
                if isinstance(training, str) else USER_DEFINED_CODE,
            ))

    training, testing, validation = dataset_instance.training, dataset_instance.testing, dataset_instance.validation
    # evaluation restriction to a subset of entities/relations
    if any(f is not None for f in (evaluation_entity_whitelist,
                                   evaluation_relation_whitelist)):
        testing = testing.new_with_restriction(
            entities=evaluation_entity_whitelist,
            relations=evaluation_relation_whitelist,
        )
        if validation is not None:
            validation = validation.new_with_restriction(
                entities=evaluation_entity_whitelist,
                relations=evaluation_relation_whitelist,
            )

    model_instance: Model
    if model is not None and interaction is not None:
        raise ValueError('can not pass both a model and interaction')
    elif model is None and interaction is None:
        raise ValueError('must pass one of model or interaction')
    elif interaction is not None:
        if dimensions is None:
            raise ValueError('missing dimensions')
        model = make_model_cls(
            interaction=interaction,
            dimensions=dimensions,
            interaction_kwargs=interaction_kwargs,
        )

    if isinstance(model, Model):
        model_instance = model
        # TODO should training be reset?
        # TODO should kwargs for loss and regularizer be checked and raised for?
    else:
        model_instance = _build_model_helper(
            model=model,
            model_kwargs=model_kwargs,
            loss=loss,
            loss_kwargs=loss_kwargs,
            regularizer=regularizer,
            regularizer_kwargs=regularizer_kwargs,
            _device=_device,
            _random_seed=_random_seed,
            training_triples_factory=training,
        )

    # Log model parameters
    _result_tracker.log_params(
        params=dict(cls=model_instance.__class__.__name__,
                    kwargs=model_kwargs),
        prefix='model',
    )

    optimizer_instance = optimizer_resolver.make(
        optimizer,
        optimizer_kwargs,
        params=model_instance.get_grad_params(),
    )
    _result_tracker.log_params(
        params=dict(cls=optimizer_instance.__class__.__name__,
                    kwargs=optimizer_kwargs),
        prefix='optimizer',
    )

    training_loop_cls = training_loop_resolver.lookup(training_loop)
    if training_loop_kwargs is None:
        training_loop_kwargs = {}

    if negative_sampler is None:
        negative_sampler_cls = None
        training_loop_instance = training_loop_cls(
            model=model_instance,
            triples_factory=training,
            optimizer=optimizer_instance,
            **training_loop_kwargs,
        )
    elif not issubclass(training_loop_cls, SLCWATrainingLoop):
        raise ValueError('Can not specify negative sampler with LCWA')
    else:
        negative_sampler_cls = negative_sampler_resolver.lookup(
            negative_sampler)
        _result_tracker.log_params(
            params=dict(cls=negative_sampler_cls.__name__,
                        kwargs=negative_sampler_kwargs),
            prefix='negative_sampler',
        )
        training_loop_instance = SLCWATrainingLoop(
            model=model_instance,
            triples_factory=training,
            optimizer=optimizer_instance,
            negative_sampler=negative_sampler_cls,
            negative_sampler_kwargs=negative_sampler_kwargs,
            **training_loop_kwargs,
        )
    _result_tracker.log_params(
        params=dict(cls=training_loop_instance.__class__.__name__),
        prefix='training_loop',
    )

    if evaluator_kwargs is None:
        evaluator_kwargs = {}
    evaluator_kwargs = dict(evaluator_kwargs)
    evaluator_instance: Evaluator = evaluator_resolver.make(
        evaluator, evaluator_kwargs)

    if evaluation_kwargs is None:
        evaluation_kwargs = {}
    evaluation_kwargs = dict(evaluation_kwargs)

    # Stopping
    if 'stopper' in training_kwargs and stopper is not None:
        raise ValueError('Specified stopper in training_kwargs and as stopper')
    if 'stopper' in training_kwargs:
        stopper = training_kwargs.pop('stopper')
    if stopper_kwargs is None:
        stopper_kwargs = {}
    stopper_kwargs = dict(stopper_kwargs)

    # Load the evaluation batch size for the stopper, if it has been set
    _evaluation_batch_size = evaluation_kwargs.get('batch_size')
    if _evaluation_batch_size is not None:
        stopper_kwargs.setdefault('evaluation_batch_size',
                                  _evaluation_batch_size)

    stopper_instance: Stopper = stopper_resolver.make(
        stopper,
        model=model_instance,
        evaluator=evaluator_instance,
        training_triples_factory=training,
        evaluation_triples_factory=validation,
        result_tracker=_result_tracker,
        **stopper_kwargs,
    )

    training_kwargs.setdefault('num_epochs', 5)
    training_kwargs.setdefault('batch_size', 256)
    _result_tracker.log_params(params=training_kwargs, prefix='training')

    # Add logging for debugging
    logging.debug("Run Pipeline based on following config:")
    if dataset is not None:
        logging.debug(f"dataset: {dataset}")
        logging.debug(f"dataset_kwargs: {dataset_kwargs}")
    else:
        logging.debug('training: %s', training)
        logging.debug('testing: %s', testing)
        if validation:
            logging.debug('validation: %s', validation)
    logging.debug(f"model: {model_instance}")
    logging.debug(f"model_kwargs: {model_kwargs}")
    logging.debug(f"loss: {model_instance.loss}")
    logging.debug(f"loss_kwargs: {loss_kwargs}")
    logging.debug(f"regularizer: {regularizer}")
    logging.debug(f"regularizer_kwargs: {regularizer_kwargs}")
    logging.debug(f"optimizer: {optimizer}")
    logging.debug(f"optimizer_kwargs: {optimizer_kwargs}")
    logging.debug(f"training_loop: {training_loop_instance}")
    if negative_sampler_cls is not None:
        logging.debug(f"negative_sampler: {negative_sampler_cls}")
        logging.debug(f"_negative_sampler_kwargs: {negative_sampler_kwargs}")
    logging.debug(f"_training_kwargs: {training_kwargs}")
    logging.debug(f"stopper: {stopper_instance}")
    logging.debug(f"stopper_kwargs: {stopper_kwargs}")
    logging.debug(f"evaluator: {evaluator}")
    logging.debug(f"evaluator_kwargs: {evaluator_kwargs}")

    # Train like Cristiano Ronaldo
    training_start_time = time.time()
    losses = training_loop_instance.train(
        triples_factory=training,
        stopper=stopper_instance,
        result_tracker=_result_tracker,
        clear_optimizer=clear_optimizer,
        **training_kwargs,
    )
    assert losses is not None  # losses is only none if it's doing search mode
    training_end_time = time.time() - training_start_time

    if use_testing_data:
        mapped_triples = testing.mapped_triples
    elif validation is None:
        raise ValueError('no validation triples available')
    else:
        mapped_triples = validation.mapped_triples

    # Build up a list of triples if we want to be in the filtered setting
    if evaluator_instance.filtered:
        additional_filter_triples: List[MappedTriples] = [
            training.mapped_triples,
        ]

        # If the user gave custom "additional_filter_triples"
        popped_additional_filter_triples = evaluation_kwargs.pop(
            'additional_filter_triples', [])
        if isinstance(popped_additional_filter_triples, (list, tuple)):
            additional_filter_triples.extend(popped_additional_filter_triples)
        elif torch.is_tensor(
                popped_additional_filter_triples):  # a single MappedTriple
            additional_filter_triples.append(popped_additional_filter_triples)
        else:
            raise TypeError(
                f'Invalid type for `evaluation_kwargs["additional_filter_triples"]`:'
                f' {type(popped_additional_filter_triples)}', )

        # Determine whether the validation triples should also be filtered while performing test evaluation
        if (use_testing_data and filter_validation_when_testing
                and validation is not None):
            if isinstance(stopper, EarlyStopper):
                logging.info(
                    "When evaluating the test dataset after running the pipeline with early stopping, the validation"
                    " triples are added to the set of known positive triples which are filtered out when performing"
                    " filtered evaluation following the approach described by (Bordes et al., 2013).",
                )
            else:
                logging.info(
                    "When evaluating the test dataset, validation triples are added to the set of known positive"
                    " triples which are filtered out when performing filtered evaluation following the approach"
                    " described by (Bordes et al., 2013).", )
            additional_filter_triples.append(validation.mapped_triples)

        # TODO consider implications of duplicates
        evaluation_kwargs[
            'additional_filter_triples'] = additional_filter_triples

    # Evaluate
    # Reuse optimal evaluation parameters from training if available, only if the validation triples are used again
    if evaluator_instance.batch_size is not None or evaluator_instance.slice_size is not None and not use_testing_data:
        evaluation_kwargs['batch_size'] = evaluator_instance.batch_size
        evaluation_kwargs['slice_size'] = evaluator_instance.slice_size
    # Add logging about evaluator for debugging
    logging.debug("Evaluation will be run with following parameters:")
    logging.debug(f"evaluation_kwargs: {evaluation_kwargs}")
    evaluate_start_time = time.time()
    metric_results: MetricResults = _safe_evaluate(
        model=model_instance,
        mapped_triples=mapped_triples,
        evaluator=evaluator_instance,
        evaluation_kwargs=evaluation_kwargs,
        evaluation_fallback=evaluation_fallback,
    )
    evaluate_end_time = time.time() - evaluate_start_time
    _result_tracker.log_metrics(
        metrics=metric_results.to_dict(),
        step=training_kwargs.get('num_epochs'),
    )
    _result_tracker.end_run()

    return PipelineResult(
        random_seed=_random_seed,
        model=model_instance,
        training=training,
        training_loop=training_loop_instance,
        losses=losses,
        stopper=stopper_instance,
        metric_results=metric_results,
        metadata=metadata,
        train_seconds=training_end_time,
        evaluate_seconds=evaluate_end_time,
    )
コード例 #46
0
    def __getitem__(self, index):
        if self.num_blocks is None:
            res = super(BlockLazyTensor, self).__getitem__(index)
            return res

        # Cases for when there's an inner batch
        else:
            batch_index = index if not isinstance(index, tuple) else index[0]
            first_tensor_index_dim = None

            # Keeping all batch dimensions - recursion base case
            if isinstance(batch_index, slice) and batch_index == slice(
                    None, None, None):
                res = super(BlockLazyTensor, self).__getitem__(index)
                return res

            # Construct a new lazy tensor
            # Get rid of sum_batch_index if we're choosing one batch tensor
            if isinstance(batch_index, int):
                batch_index = slice(batch_index * self.num_blocks,
                                    (batch_index + 1) * self.num_blocks, None)
                num_blocks = None

            # Keep sum_batch_index, because we still have an inner batch
            elif isinstance(batch_index, slice):
                start, stop, step = batch_index.indices(self.size(0))
                batch_index = slice(start * self.num_blocks,
                                    stop * self.num_blocks, step)
                num_blocks = self.num_blocks

            # Keep sum_batch_index, because we still have an inner batch
            # Also keep track that there has been tensor indexing
            elif torch.is_tensor(batch_index):
                block_index = torch.arange(0,
                                           self.num_blocks,
                                           dtype=torch.long,
                                           device=self.device)
                batch_index = (batch_index.unsqueeze(1).mul(self.num_blocks) +
                               block_index.unsqueeze(0)).view(-1)
                num_blocks = self.num_blocks
                first_tensor_index_dim = 0

            else:
                raise RuntimeError("Unknown batch index type")

            # Now construct a new sum batch lazy tensor, and recurse
            components = tuple(component[batch_index]
                               for component in self._args)
            new_var = self.__class__(*components, num_blocks=num_blocks)

            # If the index was only on the batch index, we're done
            if not isinstance(index, tuple) or len(index) == 1:
                return new_var

            # Else - recurse
            else:
                left_index = index[1]
                right_index = index[2] if len(index) >= 3 else slice(
                    None, None, None)

                # Normal case if we're indexing the LT with ints or slices
                # Also squeeze dimensions if we're indexing with tensors
                squeeze_left = False
                squeeze_right = False
                if isinstance(left_index, int):
                    left_index = slice(left_index, left_index + 1, None)
                    squeeze_left = True
                elif torch.is_tensor(left_index):
                    squeeze_left = True
                if isinstance(right_index, int):
                    right_index = slice(right_index, right_index + 1, None)
                    squeeze_right = True
                elif torch.is_tensor(right_index):
                    squeeze_right = True

                if torch.is_tensor(left_index) and torch.is_tensor(
                        right_index):
                    if left_index.numel() != right_index.numel():
                        raise RuntimeError(
                            "Expected the tensor indices to be the same size: got {} and {}"
                            .format(left_index.numel(), right_index.numel()))

                    if new_var.ndimension() == 2:
                        return new_var._get_indices(left_index, right_index)

                    else:
                        batch_index = torch.arange(0,
                                                   new_var.size(0),
                                                   dtype=torch.long,
                                                   device=self.device)
                        if first_tensor_index_dim is not None:
                            if batch_index.numel() != left_index.numel():
                                raise RuntimeError(
                                    "Expected the tensor indices to be the same size: got {}, {} and {}"
                                    .format(batch_index.numel(),
                                            left_index.numel(),
                                            right_index.numel()))
                            return new_var._batch_get_indices(
                                batch_index, left_index, right_index)
                        else:
                            batch_size = batch_index.numel()
                            row_col_size = left_index.numel()
                            batch_index = batch_index.unsqueeze(1).repeat(
                                1, row_col_size).view(-1)
                            left_index = left_index.unsqueeze(1).repeat(
                                batch_size, 1).view(-1)
                            right_index = right_index.unsqueeze(1).repeat(
                                batch_size, 1).view(-1)
                            res = new_var._batch_get_indices(
                                batch_index, left_index, right_index)
                            return res.view(batch_size, row_col_size)

                # Normal case: we have to do some processing on eithe rthe rows or columns
                res = new_var._getitem_nonbatch(left_index, right_index,
                                                first_tensor_index_dim)
                if (squeeze_left or squeeze_right) and isinstance(
                        res, LazyTensor):
                    res = res.evaluate()
                if squeeze_left:
                    res = res.squeeze(-2)
                if squeeze_right:
                    res = res.squeeze(-1)

                return res
コード例 #47
0
ファイル: hvm.py プロジェクト: stothe2/SimNeuroController
 def __getitem__(self, idx):
     if torch.is_tensor(idx):
         idx = idx.tolist()
     return self.data[idx]
コード例 #48
0
 def _set_noise(self, value):
     if not torch.is_tensor(value):
         value = torch.tensor(value)
     self.initialize(raw_noise=self._inv_param_transform(value))
コード例 #49
0
def is_xla_tensor(tensor):
    return torch.is_tensor(tensor) and tensor.device.type == "xla"
コード例 #50
0
def make_grid(tensor, nrow=8, padding=2,
              normalize=False, range=None, scale_each=False, pad_value=0):
    """Make a grid of images.
    Args:
        tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
            or a list of images all of the same size.
        nrow (int, optional): Number of images displayed in each row of the grid.
            The Final grid size is (B / nrow, nrow). Default is 8.
        padding (int, optional): amount of padding. Default is 2.
        normalize (bool, optional): If True, shift the image to the range (0, 1),
            by subtracting the minimum and dividing by the maximum pixel value.
        range (tuple, optional): tuple (min, max) where min and max are numbers,
            then these numbers are used to normalize the image. By default, min and max
            are computed from the tensor.
        scale_each (bool, optional): If True, scale each image in the batch of
            images separately rather than the (min, max) over all images.
        pad_value (float, optional): Value for the padded pixels.
    Example:
        See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
    """
    if not (torch.is_tensor(tensor) or
            (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
        raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))

    # if list of tensors, convert to a 4D mini-batch Tensor
    if isinstance(tensor, list):
        tensor = torch.stack(tensor, dim=0)

    if tensor.dim() == 2:  # single image H x W
        tensor = tensor.view(1, tensor.size(0), tensor.size(1))
    if tensor.dim() == 3:  # single image
        if tensor.size(0) == 1:  # if single-channel, convert to 3-channel
            tensor = torch.cat((tensor, tensor, tensor), 0)
        return tensor
    if tensor.dim() == 4 and tensor.size(1) == 1:  # single-channel images
        tensor = torch.cat((tensor, tensor, tensor), 1)

    if normalize is True:
        tensor = tensor.clone()  # avoid modifying tensor in-place
        if range is not None:
            assert isinstance(range, tuple), \
                "range has to be a tuple (min, max) if specified. min and max are numbers"

        def norm_ip(img, min, max):
            img.clamp_(min=min, max=max)
            img.add_(-min).div_(max - min)

        def norm_range(t, range):
            if range is not None:
                norm_ip(t, range[0], range[1])
            else:
                norm_ip(t, t.min(), t.max())

        if scale_each is True:
            for t in tensor:  # loop over mini-batch dimension
                norm_range(t, range)
        else:
            norm_range(tensor, range)

    # make the mini-batch of images into a grid
    nmaps = tensor.size(0)
    xmaps = min(nrow, nmaps)
    ymaps = int(math.ceil(float(nmaps) / xmaps))
    height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
    grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(pad_value)
    k = 0
    for y in irange(ymaps):
        for x in irange(xmaps):
            if k >= nmaps:
                break
            grid.narrow(1, y * height + padding, height - padding)\
                .narrow(2, x * width + padding, width - padding)\
                .copy_(tensor[k])
            k = k + 1
    return grid
コード例 #51
0
def is_tensor(v):
    if type(v).__module__.startswith('torch'):
        import torch
        return torch.is_tensor(v)
    return False
コード例 #52
0
ファイル: adalam.py プロジェクト: youruncleda/AdaLAM
 def __to_torch(self, *args):
     return (a if a is None or torch.is_tensor(a) else torch.tensor(
         a, device=self.config['device'], dtype=torch.float32)
             for a in args)
コード例 #53
0
 def __init__(self, dataset_dir):
     self._dataset_dir = dataset_dir
     # files = [name for name in os.listdir(self._dataset_dir)]
     files = sorted(
         glob(os.path.join(self._dataset_dir, 'pyprob_traces_sorted_*')))
     if len(files) > 0:
         self._sorted_on_disk = True
     else:
         self._sorted_on_disk = False
         files = sorted(
             glob(os.path.join(self._dataset_dir, 'pyprob_traces_*')))
     if len(files) == 0:
         raise RuntimeError(
             'Cannot find any data set files at {}'.format(dataset_dir))
     datasets = []
     for file in files:
         try:
             dataset = OfflineDatasetFile(file)
             datasets.append(dataset)
         except Exception as e:
             print(e)
             warnings.warn(
                 'Dataset file potentially corrupt, omitting: {}'.format(
                     file))
     super().__init__(datasets)
     print('OfflineDataset at: {}'.format(self._dataset_dir))
     print('Num. traces      : {:,}'.format(len(self)))
     print('Sorted on disk   : {}'.format(self._sorted_on_disk))
     if self._sorted_on_disk:
         self._sorted_indices = list(range(len(self)))
     else:
         file_name = os.path.join(self._dataset_dir, 'pyprob_hashes')
         try:
             hashes_file = shelve.open(file_name, 'r')
             hashes_exist = 'hashes' in hashes_file
             hashes_file.close()
         except:
             hashes_exist = False
         if hashes_exist:
             print('Using pre-computed hashes in: {}'.format(file_name))
             hashes_file = shelve.open(file_name, 'r')
             self._hashes = hashes_file['hashes']
             self._sorted_indices = hashes_file['sorted_indices']
             hashes_file.close()
             if torch.is_tensor(self._hashes):
                 self._hashes = self._hashes.cpu().numpy()
             if len(self._sorted_indices) != len(self):
                 raise RuntimeError(
                     'Length of pre-computed hashes ({}) and length of offline dataset ({}) do not match. Dataset files have been altered. Delete and re-generate pre-computed hash file: {}'
                     .format(len(self._sorted_indices), len(self),
                             file_name))
         else:
             print('No pre-computed hashes found, generating: {}'.format(
                 file_name))
             hashes_file = shelve.open(file_name, 'c')
             hashes, sorted_indices = self._compute_hashes()
             hashes_file['hashes'] = hashes
             hashes_file['sorted_indices'] = sorted_indices
             hashes_file.close()
             self._sorted_indices = sorted_indices
             self._hashes = hashes
         print('Num. trace types : {:,}'.format(len(set(self._hashes))))
         hashes_and_counts = OrderedDict(
             sorted(Counter(self._hashes).items()))
         print('Trace hash\tCount')
         for hash, count in hashes_and_counts.items():
             print('{:.8f}\t{}'.format(hash, count))
     print()
コード例 #54
0
    def _set_period_length(self, value):
        if not torch.is_tensor(value):
            value = torch.as_tensor(value).to(self.raw_period_length)

        self.initialize(raw_period_length=self.raw_period_length_constraint.
                        inverse_transform(value))
コード例 #55
0
def roi_tanh_circular_restore(warped_images: torch.Tensor,
                              rois: torch.Tensor,
                              image_width: int,
                              image_height: int,
                              angular_offsets: Union[float,
                                                     torch.Tensor] = 0.0,
                              interpolation: str = 'bilinear',
                              padding: str = 'zeros',
                              keep_aspect_ratio: bool = False) -> torch.Tensor:
    warped_height, warped_width = warped_images.size()[-2:]
    roi_centers = (rois[:, 2:4] + rois[:, :2]) / 2.0
    rois_radii = (rois[:, 2:4] - rois[:, :2]) / math.pi**0.5

    grids = torch.zeros(warped_images.size()[:1] +
                        (image_height, image_width, 2),
                        dtype=warped_images.dtype,
                        device=warped_images.device)
    dest_x_indices = torch.arange(image_width,
                                  dtype=warped_images.dtype,
                                  device=warped_images.device)
    dest_y_indices = torch.arange(image_height,
                                  dtype=warped_images.dtype,
                                  device=warped_images.device)
    dest_indices = torch.cat(
        (dest_x_indices.unsqueeze(0).expand(
            (image_height, image_width)).unsqueeze(-1),
         dest_y_indices.unsqueeze(-1).expand(
             (image_height, image_width)).unsqueeze(-1)), -1)

    if torch.is_tensor(angular_offsets):
        cos_offsets, sin_offsets = angular_offsets.cos(), angular_offsets.sin()
    else:
        cos_offsets = [math.cos(angular_offsets)] * grids.size()[0]
        sin_offsets = [math.sin(angular_offsets)] * grids.size()[0]

    for roi_center, roi_radii, grid, cos_offset, sin_offset in zip(
            roi_centers, rois_radii, grids, cos_offsets, sin_offsets):
        normalised_dest_indices = dest_indices - roi_center
        normalised_dest_indices[..., 0], normalised_dest_indices[..., 1] = (
            cos_offset * normalised_dest_indices[..., 0] +
            sin_offset * normalised_dest_indices[..., 1],
            cos_offset * normalised_dest_indices[..., 1] -
            sin_offset * normalised_dest_indices[..., 0])
        if keep_aspect_ratio:
            radii = normalised_dest_indices.norm(dim=-1)
            orientation_x = normalised_dest_indices[...,
                                                    0] / radii.clamp(min=1e-9)
            orientation_y = normalised_dest_indices[...,
                                                    1] / radii.clamp(min=1e-9)
            radii *= torch.sqrt(roi_radii[1]**2 * orientation_x**2 +
                                roi_radii[0]**2 *
                                orientation_y**2) / roi_radii[0] / roi_radii[1]
        else:
            normalised_dest_indices /= roi_radii
            radii = normalised_dest_indices.norm(dim=-1)
            orientation_x = normalised_dest_indices[...,
                                                    0] / radii.clamp(min=1e-9)
            orientation_y = normalised_dest_indices[...,
                                                    1] / radii.clamp(min=1e-9)
        warped_radii = torch.tanh(radii)
        grid[...,
             0] = ((orientation_x * warped_radii + 1.0) * warped_width / 2.0 -
                   0.5) / (warped_width - 1.0) * 2.0 - 1.0
        grid[...,
             1] = ((orientation_y * warped_radii + 1.0) * warped_height / 2.0 -
                   0.5) / (warped_height - 1.0) * 2.0 - 1.0

    return tf.grid_sample(warped_images,
                          grids,
                          mode=interpolation,
                          padding_mode=padding,
                          align_corners=True)
コード例 #56
0
def train_bert(net, data_iter, lr, num_epochs, batch_size, tgt_vocab, device):
    """Train a model for sequence to sequence."""
    def xavier_init_weights(m):
        if type(m) == nn.Linear:
            torch.nn.init.xavier_uniform_(m.weight)
        if type(m) == nn.GRU:
            for param in m._flat_weights_names:
                if "weight" in param:
                    torch.nn.init.xavier_uniform_(m._parameters[param])
    # net.apply(xavier_init_weights)
    try:
        checkpoint_prefix = os.path.join("model_data/model_bert.pt")
        checkpoint = torch.load(checkpoint_prefix)
        net.load_state_dict(checkpoint['model_state_dict'])
        net.to(device)
        optimizer = torch.optim.Adam(net.parameters(), lr=lr)
        optimizer.load_state_dict(checkpoint['optimizer'])
        for state in optimizer.state.values():
            for k, v in state.items():
                if torch.is_tensor(v):
                    state[k] = v.to(device)
    except Exception as e:
        net.apply(xavier_init_weights)
        net.to(device)
        optimizer = torch.optim.Adam(net.parameters(), lr=lr)
        print("Can not load the model with error:", e)
    
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    
    loss = am.MaskedSoftmaxCELoss()
    net.train()
    animator = am.Animator(xlabel='epoch', ylabel='loss',
                            xlim=[1, num_epochs*batch_size])

    
    checkpoint_prefix = os.path.join("model_data/model_bert.pt")
    # ratio = 100 / len(data_iter)
    # print("ratio=", ratio)
    num_trained = 0
    for epoch in range(num_epochs):
        timer = Utility.Timer()
        metric = am.Accumulator(2)  # Sum of training loss, no. of tokens
        # print("epoch ...", epoch)
        for i, batch in enumerate(data_iter):
            # if random.random() < (1 - ratio * 1.5):
            #     continue
            num_trained += 1
            optimizer.zero_grad()
            X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch]
            bos = torch.tensor([tgt_vocab['<bos>']] * Y.shape[0],
                               device=device).reshape(-1, 1)
            dec_input = torch.cat([bos, Y[:, :-1]], 1)  # Teacher forcing
            Y_hat, _ = net(X, dec_input, X_valid_len)
            l = loss(Y_hat, Y, Y_valid_len)
            l.sum().backward()  # Make the loss scalar for `backward`
            # Utility.grad_clipping(net, 1)
            num_tokens = Y_valid_len.sum()
            optimizer.step()
            with torch.no_grad():
                metric.add(l.sum(), num_tokens)
            # if (i + 1) % 100 == 0:
            # print("    batch>>>", i)
            if (num_trained + 1) % 100 == 0:
                animator.add(num_trained + 1, (metric[0] / metric[1],))
                # print(f'epoch = {epoch}, loss = {metric[0] / metric[1]:.3f}')
                torch.save({'model_state_dict': net.state_dict(), "optimizer": optimizer.state_dict()},checkpoint_prefix)
        # if (epoch + 1) % 10 == 0:
        # animator.add(epoch + 1, (metric[0] / metric[1],))
        # # print(f'epoch = {epoch}, loss = {metric[0] / metric[1]:.3f}')
        # torch.save({'model_state_dict': net.state_dict(), "optimizer": optimizer.state_dict()},checkpoint_prefix)
        # sys.stdout.flush()
    print(f'loss {metric[0] / metric[1]:.3f}, {metric[1] / timer.stop():.1f} '
          f'tokens/sec on {str(device)}')
コード例 #57
0
def roi_tanh_polar_warp(images: torch.Tensor,
                        rois: torch.Tensor,
                        target_width: int,
                        target_height: int,
                        angular_offsets: Union[float, torch.Tensor] = 0.0,
                        interpolation: str = 'bilinear',
                        padding: str = 'zeros',
                        keep_aspect_ratio: bool = False) -> torch.Tensor:
    image_height, image_width = images.size()[-2:]
    roi_centers = (rois[:, 2:4] + rois[:, :2]) / 2.0
    rois_radii = (rois[:, 2:4] - rois[:, :2]) / math.pi**0.5

    grids = torch.zeros(images.size()[:1] + (target_height, target_width, 2),
                        dtype=images.dtype,
                        device=images.device)
    warped_radii = arctanh(
        torch.arange(0.0,
                     1.0,
                     1.0 / target_width,
                     dtype=grids.dtype,
                     device=grids.device)).unsqueeze(0).expand(
                         (target_height, target_width))
    thetas = torch.arange(0.0,
                          2.0 * math.pi,
                          2.0 * math.pi / target_height,
                          dtype=grids.dtype,
                          device=grids.device).unsqueeze(-1).expand(
                              (target_height, target_width))
    orientation_x = torch.cos(thetas)
    orientation_y = torch.sin(thetas)
    if not keep_aspect_ratio:
        orientation_x *= warped_radii
        orientation_y *= warped_radii

    if torch.is_tensor(angular_offsets):
        cos_offsets, sin_offsets = angular_offsets.cos(), angular_offsets.sin()
    else:
        cos_offsets = [math.cos(angular_offsets)] * grids.size()[0]
        sin_offsets = [math.sin(angular_offsets)] * grids.size()[0]

    for roi_center, roi_radii, grid, cos_offset, sin_offset in zip(
            roi_centers, rois_radii, grids, cos_offsets, sin_offsets):
        if keep_aspect_ratio:
            src_radii = warped_radii * (
                roi_radii[0] * roi_radii[1] /
                torch.sqrt(roi_radii[1]**2 * orientation_x**2 +
                           roi_radii[0]**2 * orientation_y**2))
            warped_x_indices = src_radii * orientation_x
            warped_y_indices = src_radii * orientation_y
        else:
            warped_x_indices = roi_radii[0] * orientation_x
            warped_y_indices = roi_radii[1] * orientation_y
        src_x_indices, src_y_indices = (cos_offset * warped_x_indices -
                                        sin_offset * warped_y_indices,
                                        cos_offset * warped_y_indices +
                                        sin_offset * warped_x_indices)
        grid[..., 0] = (roi_center[0] + src_x_indices) / (image_width -
                                                          1.0) * 2.0 - 1.0
        grid[..., 1] = (roi_center[1] + src_y_indices) / (image_height -
                                                          1.0) * 2.0 - 1.0

    return tf.grid_sample(images,
                          grids,
                          mode=interpolation,
                          padding_mode=padding,
                          align_corners=True)
コード例 #58
0
def roi_tanh_circular_warp(images: torch.Tensor,
                           rois: torch.Tensor,
                           target_width: int,
                           target_height: int,
                           angular_offsets: Union[float, torch.Tensor] = 0.0,
                           interpolation: str = 'bilinear',
                           padding: str = 'zeros',
                           keep_aspect_ratio: bool = False) -> torch.Tensor:
    image_height, image_width = images.size()[-2:]
    roi_centers = (rois[:, 2:4] + rois[:, :2]) / 2.0
    rois_radii = (rois[:, 2:4] - rois[:, :2]) / math.pi**0.5

    grids = torch.zeros(images.size()[:1] + (target_height, target_width, 2),
                        dtype=images.dtype,
                        device=images.device)
    normalised_dest_x_indices = torch.arange(
        -1.0, 1.0, 2.0 / target_width, dtype=grids.dtype,
        device=grids.device) + 1.0 / target_width
    normalised_dest_y_indices = torch.arange(
        -1.0, 1.0, 2.0 / target_height, dtype=grids.dtype,
        device=grids.device) + 1.0 / target_height
    normalised_dest_indices = torch.cat(
        (normalised_dest_x_indices.unsqueeze(0).expand(
            (target_height, target_width)).unsqueeze(-1),
         normalised_dest_y_indices.unsqueeze(-1).expand(
             (target_height, target_width)).unsqueeze(-1)), -1)
    radii = normalised_dest_indices.norm(dim=-1)
    orientation_x = normalised_dest_indices[..., 0] / radii.clamp(min=1e-9)
    orientation_y = normalised_dest_indices[..., 1] / radii.clamp(min=1e-9)

    if torch.is_tensor(angular_offsets):
        cos_offsets, sin_offsets = angular_offsets.cos(), angular_offsets.sin()
    else:
        cos_offsets = [math.cos(angular_offsets)] * grids.size()[0]
        sin_offsets = [math.sin(angular_offsets)] * grids.size()[0]

    warped_radii = arctanh(radii)
    warped_x_indices = warped_radii * orientation_x
    warped_y_indices = warped_radii * orientation_y
    for roi_center, roi_radii, grid, cos_offset, sin_offset in zip(
            roi_centers, rois_radii, grids, cos_offsets, sin_offsets):
        if keep_aspect_ratio:
            src_radii = warped_radii * (
                roi_radii[0] * roi_radii[1] /
                torch.sqrt(roi_radii[1]**2 * orientation_x**2 +
                           roi_radii[0]**2 * orientation_y**2))
            src_x_indices, src_y_indices = src_radii * orientation_x, src_radii * orientation_y
        else:
            src_x_indices, src_y_indices = roi_radii[
                0] * warped_x_indices, roi_radii[1] * warped_y_indices
        src_x_indices, src_y_indices = (cos_offset * src_x_indices -
                                        sin_offset * src_y_indices,
                                        cos_offset * src_y_indices +
                                        sin_offset * src_x_indices)
        grid[..., 0] = (roi_center[0] + src_x_indices) / (image_width -
                                                          1.0) * 2.0 - 1.0
        grid[..., 1] = (roi_center[1] + src_y_indices) / (image_height -
                                                          1.0) * 2.0 - 1.0

    return tf.grid_sample(images,
                          grids,
                          mode=interpolation,
                          padding_mode=padding,
                          align_corners=True)
コード例 #59
0
ファイル: plot_utils.py プロジェクト: zengwang430521/expose
    def __call__(self,
                 vertices,
                 faces,
                 intrinsics,
                 bg_imgs=None,
                 deg=0,
                 return_with_alpha=False,
                 **kwargs):
        ''' Returns a B3xHxW batch of mesh overlays
        '''

        if torch.is_tensor(vertices):
            vertices = vertices.detach().cpu().numpy()
        if torch.is_tensor(intrinsics):
            intrinsics = intrinsics.detach().cpu().numpy()
        batch_size = vertices.shape[0]

        body_color = COLORS['GT']
        output_imgs = []
        for bidx in range(batch_size):
            if bg_imgs is not None:
                _, H, W = bg_imgs[bidx].shape
                # Update the renderer's viewport
                self.renderer.viewport_height = H
                self.renderer.viewport_width = W
            self.update_camera(intrinsics[bidx])
            self.update_mesh(vertices[bidx],
                             faces,
                             body_color=body_color,
                             deg=deg)

            flags = (pyrender.RenderFlags.RGBA
                     | pyrender.RenderFlags.SKIP_CULL_FACES)
            color, depth = self.renderer.render(self.scene, flags=flags)
            color = np.transpose(color, [2, 0, 1]).astype(np.float32) / 255.0
            color = np.clip(color, 0, 1)

            if bg_imgs is None:
                if return_with_alpha:
                    output_imgs.append(color)
                else:
                    output_imgs.append(color[:-1])
            else:
                if return_with_alpha:
                    valid_mask = (color[3] > 0)[np.newaxis]

                    if bg_imgs[bidx].shape[0] < 4:
                        curr_bg_img = np.concatenate([
                            bg_imgs[bidx],
                            np.ones_like(bg_imgs[bidx, [0], :, :])
                        ],
                                                     axis=0)
                    else:
                        curr_bg_img = bg_imgs[bidx]

                    output_img = (color * valid_mask +
                                  (1 - valid_mask) * curr_bg_img)
                    output_imgs.append(np.clip(output_img, 0, 1))
                else:
                    valid_mask = (color[3] > 0)[np.newaxis]

                    output_img = (color[:-1] * valid_mask +
                                  (1 - valid_mask) * bg_imgs[bidx])
                    output_imgs.append(np.clip(output_img, 0, 1))
        return np.stack(output_imgs, axis=0)
コード例 #60
0
    def add(self, output, target):
        """
        Args:
            output (Tensor): NxK tensor that for each of the N examples
                indicates the probability of the super_video belonging to each of
                the K classes, according to the model. The probabilities should
                sum to one over all classes
            target (Tensor): binary NxK tensort that encodes which of the K
                classes are associated with the N-th input
                    (eg: a row [0, 1, 0, 1] indicates that the super_video is
                         associated with classes 2 and 4)
            weight (optional, Tensor): Nx1 tensor representing the weight for
                each super_video (each weight > 0)
        """
        if not torch.is_tensor(output):
            output = torch.from_numpy(output)
        if not torch.is_tensor(target):
            target = torch.from_numpy(target)

        if output.dim() == 1:
            output = output.view(-1, 1)
        else:
            assert output.dim() == 2, \
                'wrong output size (should be 1D or 2D with one column \
                per class)'

        if target.dim() == 1:
            target = target.view(-1, 1)
        else:
            assert target.dim() == 2, \
                'wrong target size (should be 1D or 2D with one column \
                per class)'

        if self.scores.numel() > 0:
            assert target.size(1) == self.targets.size(1), \
                'dimensions for output should match previously added examples.'

        # make sure storage is of sufficient size
        if self.scores.storage().size() < self.scores.numel() + output.numel():
            new_size = math.ceil(self.scores.storage().size() * 1.5)
            self.scores.storage().resize_(int(new_size + output.numel()))
            self.targets.storage().resize_(int(new_size + output.numel()))

        # store scores and targets
        offset = self.scores.size(0) if self.scores.dim() > 0 else 0
        self.scores.resize_(offset + output.size(0), output.size(1))
        self.targets.resize_(offset + target.size(0), target.size(1))
        self.scores.narrow(0, offset, output.size(0)).copy_(output)
        self.targets.narrow(0, offset, target.size(0)).copy_(target)

        # Idx of correct preds
        B, C = target.size()
        list_idx_correct_preds = []
        for idx in range(B):
            correct_preds = True
            for j in range(C):
                # does not have the same sign so bad preds -> break
                target_idx_j = -1 if target[idx, j] == 0 else 1
                if target_idx_j * output[idx, j] < 0:
                    correct_preds = False
                    break
            # good preds! if finished the loop
            if correct_preds:
                list_idx_correct_preds.append(idx)

        return list_idx_correct_preds