예제 #1
0
def complex_to_channels(image, requires_grad=False):
    """Convert data from complex to channels."""
    image_out = torch.stack([torch.real(image), torch.imag(image)], axis=-1)
    shape_out = torch.cat([torch.shape(image)[:-1], [image.shape[-1] * 2]],
                          axis=0)
    image_out = torch.reshape(image_out, shape_out)
    return image_out
예제 #2
0
def fft2c(im, name="fft2c", do_orthonorm=True):
    """Centered FFT2 on second and third dimensions."""
    im_out = im
    dims = torch.shape(im_out)
    if do_orthonorm:
        fftscale = torch.sqrt(
            torch.cast(dims[1] * dims[2], dtype=torch.float32))
    else:
        fftscale = 1.0
    fftscale = torch.cast(fftscale, dtype=torch.complex64)

    # permute FFT dimensions to be the last (faster!)
    tpdims = list(range(len(im_out.get_shape().as_list())))
    tpdims[-1], tpdims[1] = tpdims[1], tpdims[-1]
    tpdims[-2], tpdims[2] = tpdims[2], tpdims[-2]

    im_out = torch.transpose(im_out, tpdims)
    im_out = fftshift(im_out, axis=-1)
    im_out = fftshift(im_out, axis=-2)

    # with torch.device('/gpu:0'):
    im_out = torch.fft2d(im_out) / fftscale

    im_out = fftshift(im_out, axis=-1)
    im_out = fftshift(im_out, axis=-2)
    im_out = torch.transpose(im_out, tpdims)

    return im_out
예제 #3
0
def interleave(pt_output, data_format):
    if data_format == "channels_last":
        output_shape = torch.shape(pt_output)
        s = output_shape[3]
        realOut = pt_output[:, :, :, 0:s // 2]
        imagOut = pt_output[:, :, :, s // 2:s]
        pt_output = torch.cat([realOut, imagOut], 2)
        pt_output = torch.reshape(pt_output, output_shape)
    else:
        output_shape = torch.shape(pt_output)
        s = output_shape[1]
        realOut = pt_output[:, 0:s // 2, :, :]
        imagOut = pt_output[:, s // 2:s, :, :]
        pt_output = torch.cat([realOut, imagOut], 0)
        pt_output = torch.reshape(pt_output, output_shape)
    return pt_output
예제 #4
0
    def call(self, q_head, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat,
             r_w_bias, r_r_bias, r_s_bias, attn_mask):
        # content based attention score
        ac = torch.einsum('ibnd,jbnd->ijbn', q_head + r_w_bias, k_head_h)

        # position based attention score
        bd = torch.einsum('ibnd,jbnd->ijbn', q_head + r_r_bias, k_head_r)
        bd = rel_shift(bd, klen=tf.shape(ac)[1])

        # segment-based attention score
        if seg_mat is None:
            ef = 0
        else:
            ef = torch.einsum('ibnd,snd->isbn', q_head + r_s_bias, seg_embed)
            tgt_shape = torch.shape(bd)
            ef = torch.where(
                torch.Tensor(
                    np.broadcast_to(torch.expand_dims(seg_mat, 3), tgt_shape)),
                torch.Tensor(np.broadcast_to(ef[:, 1:, :, :], tgt_shape)),
                torch.Tensor(np.broadcast_to(ef[:, :1, :, :], tgt_shape)))

        # merges attention scores and performs masking
        attn_score = (ac + bd + ef) * self.scale
        if attn_mask is not None:
            attn_score = attn_score - 1e30 * attn_mask

        # attention probability
        attn_prob = functional.softmax(attn_score, 1)
        attn_prob = self.attention_probs_dropout(attn_prob)

        # attention output
        attn_vec = torch.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h)
예제 #5
0
파일: deduce.py 프로젝트: quantapix/qnarre
 def forward(self, inputs):
     cfg = self.cfg
     x, tgt = inputs
     if cfg.brackets:
         y = torch.zeros_like(tgt, dtype=torch.floatx())
         bs = cfg.brackets + [cfg.num_toks]
         b = 0
         for i, e in enumerate(bs):
             msk = (tgt >= (b or 1)) & (tgt < e)
             mt = torch.boolean_mask(tgt, msk) - b
             gi = torch.stack([torch.range(torch.shape(mt)[0]), mt])
             if i == 0:
                 logp = torch.log_softmax(self.logits(x, i))
                 mp = torch.boolean_mask(logp, msk)
                 u = torch.gather_nd(mp, gi)
             else:
                 mp = torch.boolean_mask(logp, msk)
                 u = mp[:, bs[i - 1]]
                 mc = torch.boolean_mask(x, msk)[None]
                 mp = torch.log_softmax(self.logits(mc, i))
                 mp = torch.squeeze(mp, 0)
                 u += torch.gather_nd(mp, gi)
             y = torch.tensor_scatter_nd_add(y, torch.where(msk), -u)
             b = e
     else:
         y = self.logits(x)
         # f = torch.SparseCategoricalAccuracy
         # self.add_metric(f(name='acc')(tgt, y))
         f = torch.sparse_softmax_cross_entropy_with_logits
         loss = f(labels=tgt, logits=y)
     # self.add_loss(lambda: torch.reduce_mean(loss))
     return y
예제 #6
0
def batch_flatten(x):
    """
    Flatten the tensor except the first dimension.
    """
    shape = x.shape[1:]
    if None not in shape:
        return torch.reshape(x, [-1, int(np.prod(shape))])
    return torch.reshape(x, torch.stack([torch.shape(x)[0], -1]))
예제 #7
0
def channels_to_complex(image, requires_grad=False):
    """Convert data from channels to complex."""
    image_out = torch.reshape(image, [-1, 2])
    image_out = torch.tensor(image_out[:, 0],
                             image_out[:, 1],
                             dtype=torch.cfloat)
    shape_out = torch.cat([torch.shape(image)[:-1], [image.shape[-1] // 2]],
                          axis=0)
    image_out = torch.reshape(image_out, shape_out)
    return image_out
예제 #8
0
 def forward(self, t_img1, t_img2):
     t_pyr1 = self.make_laplacian_pyramid(t_img1, self.max_levels)
     t_pyr2 = self.make_laplacian_pyramid(t_img2, self.max_levels)
     t_losses = [
         torch.norm(a - b, ord=1) / torch.size(a, out_type=torch.float32)
         for a, b in zip(t_pyr1, t_pyr2)
     ]
     t_loss = torch.sum(t_losses) * torch.shape(t_img1,
                                                out_type=torch.float32)[0]
     return t_loss
예제 #9
0
 def get_data_shape(self):
   """
   Gets array shape of datapoints in this dataset.
   """
   if not len(self.metadata_df):
     raise ValueError("No data in dataset.")
   sample_X = load_from_disk(
       os.path.join(self.data_dir,
                    next(self.metadata_df.iterrows())[1]['X']))
   return torch.shape(sample_X)[1:]
def mmd2_rbf(X, t, p, sig):
    """ Computes the l2-RBF MMD for X given t """
    X = X.squeeze(0)

    it = torch.where(t > 0)[1]
    ic = torch.where(t < 1)[1]

    Xc = torch.index_select(X, 1, ic)
    Xt = torch.index_select(X, 1, it)

    Kcc = torch.exp(-pdist2sq(Xc, Xc) / np.square(sig))
    Kct = torch.exp(-pdist2sq(Xc, Xt) / np.square(sig))
    Ktt = torch.exp(-pdist2sq(Xt, Xt) / np.square(sig))
    m = torch.float(torch.shape(Xc)[0])
    n = torch.float(torch.shape(Xt)[0])

    mmd = torch.square(1.0 - p) / (m * (m - 1.0)) * (torch.sum(Kcc) - m)
    mmd = mmd + torch.square(p) / (n * (n - 1.0)) * (torch.sum(Ktt) - n)
    mmd = mmd - 2.0 * p * (1.0 - p) / (m * n) * torch.sum(Kct)
    mmd = 4.0 * mmd

    return mmd
예제 #11
0
파일: embed.py 프로젝트: quantapix/qnarre
 def lookup(self, x, i):
     t = self.weights[i]
     if self.one_hot:
         y = torch.one_hot(x, torch.shape(t)[0], axis=-1)
         y = torch.einsum("np,in->ip", t, y)
     else:
         cfg = self.cfg
         y = F.embedding(x, t, cfg.PAD, cfg.max_norm, cfg.norm_type,
                         cfg.scale_grad, cfg.sparse)
     a = self.adjusts[i]
     if a is not None:
         y = torch.einsum("ip,ph->ih", y, a)
     return y
예제 #12
0
    def get_features(self, x):
        x_has_timesteps = (len(x.shape) == 5)
        if x_has_timesteps:
            sh = torch.shape(x)
            x = flatten_two_dims(x)

        x = (x - self.ob_mean) / self.ob_std
        x = np.transpose(x, [i for i in range(len(x.shape) - 3)] +
                         [-1, -3, -2])  # [N, H, W, C] --> [N, C, H, W]
        x = self.features_model(torch.tensor(x))

        if x_has_timesteps:
            x = unflatten_first_dim(x, sh)
        return x
예제 #13
0
 def _build_likelihood(self):
     L = th.cumsum(self.E_log_p_Y(self.X, self.Y))[:]
     KL = th.cumsum([layer.KL() for layer in self.layers])[:]
     scale = th.cumsum(self.num_data, float_type)
     scale /= th.cast(th.shape(self.X)[0], float_type)  # minibatch size
     return L * scale - KL
예제 #14
0
    def call(self, inputs):
        """Implements call() for the layer."""
        inp_k = inputs['inp_k']
        seg_id = inputs['seg_id']
        input_mask = inputs['input_mask']
        mems = inputs['mems']
        perm_mask = inputs['perm_mask']
        target_mapping = inputs['target_mapping']
        inp_q = inputs['inp_q']

        new_mems = []

        bsz = torch.shape(inp_k)[1]

        qlen = inp_k.shape.as_list()[0]

        mlen = mems[0].shape.as_list()[0] if mems is not None else 0
        klen = mlen + qlen

        ##### Attention mask
        # causal attention mask
        if self.attn_type == 'uni':
            attn_mask = _create_mask(qlen, mlen, self.tf_float,
                                     self.same_length)
            # pylint: enable=protected-access
            attn_mask = attn_mask[:, :, None, None]
        elif self.attn_type == 'bi':
            attn_mask = None
        else:
            raise ValueError('Unsupported attention type: {}'.format(
                self.attn_type))

        # data mask: input mask & perm mask
        if input_mask is not None and perm_mask is not None:
            data_mask = input_mask[None] + perm_mask

        elif input_mask is not None and perm_mask is None:
            data_mask = input_mask[None]
        elif input_mask is None and perm_mask is not None:
            data_mask = perm_mask
        else:
            data_mask = None

        if data_mask is not None:
            # all mems can be attended to
            mems_mask = torch.zeros([tf.shape(data_mask)[0], mlen, bsz],
                                    dtype=self.tf_float)
            data_mask = torch.cat([mems_mask, data_mask], 1)
            if attn_mask is None:
                attn_mask = data_mask[:, :, :, None]
            else:
                attn_mask += data_mask[:, :, :, None]

        if attn_mask is not None:
            attn_mask = torch.cast(attn_mask > 0, dtype=self.tf_float)

        if attn_mask is not None:
            non_tgt_mask = -torch.eye(qlen, dtype=self.tf_float)
            non_tgt_mask = torch.cat(
                [tf.zeros([qlen, mlen], dtype=self.tf_float), non_tgt_mask],
                axis=-1)
            non_tgt_mask = torch.cast(
                (attn_mask + non_tgt_mask[:, :, None, None]) > 0,
                dtype=self.tf_float)
        else:
            non_tgt_mask = None

        word_emb_k = self.embedding_lookup(inp_k)

        if inp_q is not None:
            if target_mapping is not None:
                word_emb_q = torch.tile(self.mask_emb,
                                        [tf.shape(target_mapping)[0], bsz, 1])
            else:
                inp_q_ext = inp_q[:, :, None]
                word_emb_q = inp_q_ext * self.mask_emb + (
                    1 - inp_q_ext) * word_emb_k

        output_h = self.h_dropout(word_emb_k)
        output_g = None
        if inp_q is not None:
            output_g = self.g_dropout(word_emb_q)

        ##### Segment embedding
        if seg_id is not None:

            # Convert `seg_id` to one-hot `seg_mat`

            mem_pad = torch.zeros([mlen, bsz], dtype=tf.int32)

            cat_id = torch.concat([mem_pad, seg_id], 0)

            if self.use_cls_mask:
                # `1` indicates not in the same segment [qlen x klen x bsz]
                # seg_id: [qlen x bsz] & cat_id: [klen x bsz]
                cls_mat = torch.logical_or(
                    torch.equal(seg_id,
                                tf.constant([data_utils.SEG_ID_CLS]))[:, None],
                    torch.equal(cat_id,
                                tf.constant([data_utils.SEG_ID_CLS]))[None, :])
                seg_mat = torch.equal(seg_id[:, None], cat_id[None, :])
                seg_mat = torch.logical_or(cls_mat, seg_mat)
            else:
                seg_mat = tf.logical_not(
                    tf.equal(seg_id[:, None], cat_id[None, :]))
        else:
            seg_mat = None

        dtype = self.tf_float
        freq_seq = tf.range(0, self.d_model, 2.0)
        if dtype is not None and dtype != tf.float32:
            freq_seq = tf.cast(freq_seq, dtype=self.dtype)

        if self.attn_type == 'bi':
            beg, end = klen, -qlen
        elif self.attn_type == 'uni':
            beg, end = klen, -1
        else:
            raise ValueError('Unknown `attn_type` {}.'.format(self.attn_type))

        if self.bi_data:
            fwd_pos_seq = torch.range(beg, end, -1.0)
            bwd_pos_seq = torch.range(-beg, -end, 1.0)

            if dtype is not None and dtype != tf.float32:
                fwd_pos_seq = torch.cast(fwd_pos_seq, dtype=dtype)
                bwd_pos_seq = torxh.cast(bwd_pos_seq, dtype=dtype)

            if self.clamp_len > 0:
                fwd_pos_seq = torch.clip_by_value(fwd_pos_seq, -self.clamp_len,
                                                  self.clamp_len)
                bwd_pos_seq = torch.clip_by_value(bwd_pos_seq, -self.clamp_len,
                                                  self.clamp_len)

            if bsz is not None:
                fwd_pos_emb = self.fwd_position_embedding(
                    fwd_pos_seq, bsz // 2)
                bwd_pos_emb = self.bwd_position_embedding(
                    bwd_pos_seq, bsz // 2)
            else:
                fwd_pos_emb = self.fwd_position_embedding(fwd_pos_seq, None)
                bwd_pos_emb = self.bwd_position_embedding(bwd_pos_seq, None)

            pos_emb = tf.concat([fwd_pos_emb, bwd_pos_emb], axis=1)
        else:
            fwd_pos_seq = tf.range(beg, end, -1.0)
            if dtype is not None and dtype != tf.float32:
                fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype)
            if self.clamp_len > 0:
                fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len,
                                               self.lamp_len)

            pos_emb = self.fwd_position_embedding(fwd_pos_seq, bsz)

        pos_emb = self.emb_dropout(pos_emb)

        if mems is None:
            mems = [None] * self.n_layer
        for i in range(self.n_layer):
            # cache new mems
            new_mems.append(
                _cache_mem(output_h, mems[i], self.mem_len, self.reuse_len))
            # pylint: enable=protected-access

            # segment bias
            if seg_id is None:
                r_s_bias_i = None
                seg_embed_i = None
            else:
                r_s_bias_i = self.r_s_bias if not self.untie_r else self.r_s_bias[
                    i]
                seg_embed_i = self.seg_embed[i]

            ffn_layer = self.h_positionwise_ffn_layers[i]
            attention_layer = self.rel_multihead_layers[i]
            output_h, output_g = attention_layer(
                h=output_h,
                g=output_g,
                r=pos_emb,
                r_w_bias=self.r_w_bias
                if not self.untie_r else self.r_w_bias[i],
                r_r_bias=self.r_r_bias
                if not self.untie_r else self.r_r_bias[i],
                seg_mat=seg_mat,
                r_s_bias=r_s_bias_i,
                seg_embed=seg_embed_i,
                attn_mask_h=non_tgt_mask,
                attn_mask_g=attn_mask,
                mems=mems[i],
                target_mapping=target_mapping)
            output_h = ffn_layer(output_h)
            if output_g is not None:
                output_g = ffn_layer(output_g)

        if inp_q is not None:
            output = output_g
        else:
            output = output_h

        return output
예제 #15
0
파일: saliency.py 프로젝트: wnstlr/LNets
def auc_degredation_measure(model, x, sal):
    # Compute the auc measure of the performance drop using the saliency map
    D = torch.shape(sal)