Esempio n. 1
0
def tucker(X, n_components=None, tol=1E-6, max_iter=500, init_type="hosvd",
           random_state=None):
    """
    Tucker decomposition using an alternating least squares
    algorithm.

    Parameters
    ----------
    X : ndarray
        Input data to decompose

    n_components : int
        The number of components in the decomposition. Note that unlike PCA or
        SVD, the decomposition of n_components + 1 DOES NOT contain
        the basis from the decomposition of n_components.

    tol : float, optional (default=1E-4)
        Stopping tolerance for reconstruction error.

    max_iter : int, optional (default=500)
        Maximum number of iterations to perform before exiting.

    init_type : string, optional (default="hosvd")
        How to initialize the decomposition. Choices are "random" or "hosvd",
        where "random" is initialized with uniform random values, and "hosvd" is
        initialized by the high order SVD of the dataset.

    random_state : int, None, or np.RandomState instance
       Random seed information to use when ``init_type`` == "random"


    Returns
    -------
    components : list, length = X.ndim
        Basis functions for X, each of shape [X.shape[idx], n_components] where
        idx is the index into ``components``. First component is a multiplier G,
        followed by components for each mode.


    References
    ----------
    Kolda, T. G. & Bader, B. W.
        Tensor Decompositions and Applications. SIAM Rev. 51, 455-500 (2009).

    J.M. Landsberg, Tensors: Geometry and Applications. American Mathematical
        Society (2011).

    G. Golub and C. Van Loan. Matrix Computations, Third Edition, Chapter 5,
        Section 5.4.4, pp. 252-253.

    """
    if n_components is None:
        raise ValueError("n_components is a required argument!")

    check_tensor(X)
    return _tuckerN(X, n_components, tol=tol, max_iter=max_iter,
                    init_type=init_type, random_state=random_state)
Esempio n. 2
0
    def augment(self, sample, **kwargs):
        # DEBUG(kisuk)
        # print "\n[Misalign]"
        # for k, v in sample.iteritems():
        #     print "Slip = {}".format(self.slip)
        #     print "{} at {}".format(k, self.pivot[k])

        ret = dict()

        if self.do_augment:
            for k, v in sample.iteritems():
                # Ensure data is a 4D tensor.
                data = check_tensor(v)
                new_data = np.zeros(self.spec[k], dtype=data.dtype)
                new_data = check_tensor(new_data)
                # Dimension.
                z, y, x = v.shape[-3:]
                assert z > 1
                if self.slip:
                    # Copy whole box.
                    xmin = max(self.x_t, 0)
                    ymin = max(self.y_t, 0)
                    xmax = min(self.x_t, 0) + x
                    ymax = min(self.y_t, 0) + y
                    new_data[:, :, ...] = data[:, :, ymin:ymax, xmin:xmax]
                    # Slip.
                    xmin = max(-self.x_t, 0)
                    ymin = max(-self.y_t, 0)
                    xmax = min(-self.x_t, 0) + x
                    ymax = min(-self.y_t, 0) + y
                    pvot = self.pivot[k]
                    new_data[:, pvot, ...] = data[:, pvot, ymin:ymax,
                                                  xmin:xmax]
                else:
                    # Copy upper box.
                    xmin = max(self.x_t, 0)
                    ymin = max(self.y_t, 0)
                    xmax = min(self.x_t, 0) + x
                    ymax = min(self.y_t, 0) + y
                    pvot = self.pivot[k]
                    new_data[:, 0:pvot, ...] = data[:, 0:pvot, ymin:ymax,
                                                    xmin:xmax]
                    # Copy lower box.
                    xmin = max(-self.x_t, 0)
                    ymin = max(-self.y_t, 0)
                    xmax = min(-self.x_t, 0) + x
                    ymax = min(-self.y_t, 0) + y
                    pvot = self.pivot[k]
                    new_data[:, pvot:, ...] = data[:, pvot:, ymin:ymax,
                                                   xmin:xmax]
                # Augmented sample.
                ret[k] = new_data
        else:
            ret = sample

        return ret
Esempio n. 3
0
    def augment(self, sample, **kwargs):
        """Apply misalignment data augmentation."""
        # DEBUG
        #print '\n[MisalignAugment]'
        #print 'misalign     z = {}'.format(self.pivot)
        #print 'misalign (x,y) = ({},{})'.format(self.x_t,self.y_t)

        ret = dict()

        if self.do_augment:
            for k, v in sample.iteritems():
                # Ensure data is 4D tensor.
                data = check_tensor(v)
                new_data = np.zeros(self.spec[k], dtype=data.dtype)
                new_data = check_tensor(new_data)
                # Dimension
                z, y, x = v.shape[-3:]
                assert z > 1
                # Copy upper box.
                xmin = max(self.x_t, 0)
                ymin = max(self.y_t, 0)
                xmax = min(self.x_t, 0) + x
                ymax = min(self.y_t, 0) + y
                pvot = self.pivot[k]
                new_data[:, 0:pvot, ...] = data[:, 0:pvot, ymin:ymax,
                                                xmin:xmax]
                # Copy lower box.
                xmin = max(-self.x_t, 0)
                ymin = max(-self.y_t, 0)
                xmax = min(-self.x_t, 0) + x
                ymax = min(-self.y_t, 0) + y
                pvot = self.pivot[k]
                new_data[:, pvot:, ...] = data[:, pvot:, ymin:ymax, xmin:xmax]
                # Augmented sample.
                ret[k] = new_data
        else:
            ret = sample

        return ret
Esempio n. 4
0
    def forward_sequence(self, obs, rnn_states, roles=None, terminals=None):
        """
        Input should be in

        :param obs: tensor of dims [N, B, n_players, * obs_shape] format.
        :param rnn_states:
        :param terminals:
        :return: dict containing "global_ext_value" and "global_int_value" of shape [N, B, n_players]
        """

        # check the input
        (N, B, n_players, *obs_shape), device = obs.shape, obs.device

        check_tensor("obs", obs, (N, B, n_players, *obs_shape), torch.uint8)
        check_tensor("rnn_states", rnn_states, (N, B, 2, self.memory_units),
                     torch.float32)
        if terminals is not None:
            check_tensor("terminals", terminals, (N, B), torch.int64)

        # upload to device
        obs = obs.to(self.device)
        rnn_states = rnn_states.to(self.device)
        if terminals is not None:
            terminals = terminals.to(self.device)

        # stack player observations together as channels
        C, H, W = obs_shape
        obs = torch.reshape(obs, [N, B, C * n_players, H, W])
        encoder_output, new_rnn_states = self._forward_sequence(
            obs, rnn_states, terminals)

        ext_value = self.global_ext_value_head(encoder_output)
        int_value = self.global_int_value_head(encoder_output)

        # these come out as [N, B,  n_roles * n_players], so reshape
        ext_value = ext_value.reshape([N, B, self.n_roles, self.n_players])
        int_value = int_value.reshape([N, B, self.n_roles, self.n_players])

        result = {
            'global_role_ext_value': ext_value,
            'global_role_int_value': int_value
        }

        if roles is not None:
            result['global_ext_value'] = extract_roles(ext_value, roles)
            result['global_int_value'] = extract_roles(int_value, roles)

        if self.nan_check:
            self._check_for_nans([(k, v) for k, v in result.items()] +
                                 [('rnn_states', new_rnn_states)])

        return result, new_rnn_states
Esempio n. 5
0
    def augment(self, sample, **kwargs):
        """Apply warp data augmentation."""
        # DEBUG
        # print '\n[WarpAugment]'
        # self.counter += 1
        # if self.skip:
        #     self.count['skip'] += 1
        # else:
        #     self.count['warp'] += 1
        # for k,v in self.count.iteritems():
        #     print '{}={}'.format(k,'%0.3f'%(v/float(self.counter)))

        if self.skip:
            return sample

        # DEBUG
        #print 'rot      = {}'.format(self.rot)
        #print 'shear    = {}'.format(self.shear)
        #print 'scale    = {}'.format(self.scale)
        #print 'stretch  = {}'.format(self.stretch)
        #print 'twist    = {}'.format(self.twist)
        #print 'req_size = {}'.format(self.size)

        imgs = kwargs['imgs']

        # Apply warp to each tensor.
        for k, v in sample.iteritems():
            v = check_tensor(v)
            v = np.transpose(v, (1, 0, 2, 3))
            if k in imgs:  # Images.
                v = warping.warp3d(v, self.spec[k][-3:], self.rot, self.shear,
                                   self.scale, self.stretch, self.twist)
            else:  # Labels and masks.
                v = warping.warp3dLab(v, self.spec[k][-3:], self.size,
                                      self.rot, self.shear, self.scale,
                                      self.stretch, self.twist)
            sample[k] = np.transpose(v, (1, 0, 2, 3))
        return sample
Esempio n. 6
0
    def forward_sequence(self, obs, rnn_states, roles=None, terminals=None):
        """
        Forwards sequence through model. If roles are provided returns the appropriate policy and value as
            log_policy: tensor [N, B, *policy_shape]
            ext_value: tensor [N, B]
        Regardless all role policy and values are returned as
            log_policies: tensor [N, B, R, *policy_shape]
            ext_values: tensor [N, B, R]

        Tensors will be uploaded to correct device

        :param obs:
        :param rnn_states:
        :param roles: Roles for each agent at each timestep (tensor) [N, B]
        :param terminals:
        :return: results dictionary, and updated rnn_states
        """

        # check the input
        (N, B, *obs_shape), device = obs.shape, obs.device
        check_tensor("obs", obs, (N, B, *obs_shape), torch.uint8)
        check_tensor("rnn_states", rnn_states, (N, B, 2, self.memory_units),
                     torch.float32)
        if roles is not None:
            check_tensor("roles", roles, (N, B), torch.int64)
        if terminals is not None:
            check_tensor("terminals", terminals, (N, B), torch.int64)

        # upload to device
        obs = obs.to(self.device)
        rnn_states = rnn_states.to(self.device)
        if terminals is not None:
            terminals = terminals.to(self.device)
        if roles is not None:
            roles = roles.to(self.device)

        encoder_output, new_rnn_states = self._forward_sequence(
            obs, rnn_states, terminals)

        policy_outputs = self.policy_head(encoder_output).reshape(
            N, B, self.roles, self.n_actions)
        log_policy = torch.log_softmax(policy_outputs, dim=-1)
        ext_value = self.local_ext_value_head(encoder_output)

        result = {
            'role_log_policy': log_policy,
            'role_ext_value': ext_value,
        }

        # these will come out as [N, B, n_players * n_roles] but we need [N, B, n_players, n_roles] for normalization
        if self.role_prediction_head is not None:
            unnormalized_role_predictions = self.role_prediction_head(
                encoder_output).reshape(N, B, self.n_players, self.n_roles)
            role_prediction = torch.log_softmax(unnormalized_role_predictions,
                                                dim=-1)
            result['policy_role_prediction'] = role_prediction

        if roles is not None:
            result['log_policy'] = extract_roles(log_policy, roles)
            result['ext_value'] = extract_roles(ext_value, roles)

        if self.nan_check:
            self._check_for_nans([(k, v) for k, v in result.items()] +
                                 [('rnn_states', new_rnn_states)])

        return result, new_rnn_states
Esempio n. 7
0
    def forward_sequence(self, obs, rnn_states, terminals=None):

        # check the input
        N, B, *obs_shape = obs.shape
        check_tensor("obs", obs, (N, B, *obs_shape), torch.uint8)
        check_tensor("rnn_states", rnn_states, (N, B, 2, self.memory_units),
                     torch.float)
        if terminals is not None:
            check_tensor("terminals", terminals, (N, B), torch.int64)

        # upload to device
        obs = obs.to(self.device)
        rnn_states = rnn_states.to(self.device)
        if terminals is not None:
            terminals = terminals.to(self.device)

        result = {}

        encoder_output, new_rnn_states = self._forward_sequence(
            obs, rnn_states, terminals)

        # ------------------------------
        # role prediction
        # ------------------------------
        # these will come out as [N, B, n_players * n_roles] but we need [N, B, n_players, n_roles] for normalization
        unnormalized_role_predictions = self.role_prediction_head(
            encoder_output).reshape([N, B, self.n_players, self.n_roles])
        role_prediction = torch.log_softmax(unnormalized_role_predictions,
                                            dim=-1)
        result['role_prediction'] = role_prediction

        if self.role_backwards_prediction_head is not None:
            unnormalized_role_backwards_predictions = self.role_backwards_prediction_head(
                encoder_output).reshape([N, B, self.n_players, self.n_roles])
            role_backwards_prediction = torch.log_softmax(
                unnormalized_role_backwards_predictions, dim=-1)
            result['role_backwards_prediction'] = role_backwards_prediction

        # ------------------------------
        # int value prediction
        # ------------------------------
        int_value = self.local_int_value_head(encoder_output)
        result['role_int_value'] = int_value

        # ------------------------------
        # obs forward prediction
        # ------------------------------

        def restack_observations(x: torch.Tensor):
            """
            Take input of (N*B, c, h*n_players, w) and return (N, B, n_players, c, h, w)
            :return:
            """
            c, h, w = self.input_shape
            assert x.shape == (N * B, c, h * self.n_players, w)
            x = x.reshape(N, B, c, self.n_players * h, w)
            x = x.split(h, dim=3)
            x = torch.stack(x, dim=2)
            return x

        if self.observation_prediction_head is not None:
            # predictions will come out as (N*B, c, h*n_players, w)
            # but we need (N, B, n_players, c, h, w)
            obs_prediction = self.observation_prediction_head(
                encoder_output.reshape(N * B, self.encoder_output_features))
            result['obs_prediction'] = restack_observations(obs_prediction)

        # ------------------------------
        # obs backward prediction
        # ------------------------------

        if self.observation_backwards_prediction_head is not None:
            obs_pp = self.observation_backwards_prediction_head(
                encoder_output.reshape(N * B, self.encoder_output_features))
            result["obs_backwards_prediction"] = restack_observations(obs_pp)

        # ------------------------------
        # action forward prediction
        # ------------------------------

        if self.action_prediction_head is not None:
            unnormalized_action_predictions = self.action_prediction_head(
                encoder_output.reshape(N * B, self.encoder_output_features))
            # result will be [N*B, n_predictions * n_roles * n_actions]
            unnormalized_action_predictions = unnormalized_action_predictions.reshape(
                N, B, self.n_predictions, self.n_roles, self.n_actions)
            action_predictions = torch.log_softmax(
                unnormalized_action_predictions, dim=-1)
            result["action_prediction"] = action_predictions

        if self.action_backwards_prediction_head is not None:
            unnormalized_action_backwards_predictions = self.action_backwards_prediction_head(
                encoder_output.reshape(N * B, self.encoder_output_features))
            # result will be [N*B, n_predictions * n_roles * n_actions]
            unnormalized_action_backwards_predictions = unnormalized_action_backwards_predictions.reshape(
                N, B, self.n_predictions, self.n_roles, self.n_actions)
            action_backwards_predictions = torch.log_softmax(
                unnormalized_action_backwards_predictions, dim=-1)
            result[
                "action_backwards_prediction"] = action_backwards_predictions

        if self.nan_check:
            self._check_for_nans([(k, v) for k, v in result.items()] +
                                 [('rnn_states', new_rnn_states)])

        return result, new_rnn_states