コード例 #1
0
    def __getitem__(self, index):
        A_path = self.files_A[index % len(self.files_A)]
        image_A = Image.open(A_path)

        # Convert grayscale images to rgb
        if image_A.mode != "RGB":
            image_A = to_rgb(image_A)

        item_A = self.transform(image_A)
        item_A = jt.array(item_A)

        item_A_l = {}
        regions = ['eyel','eyer','nose','mouth']
        basen = os.path.basename(A_path)[:-4]
        lm_path = os.path.join(self.lmdir, basen+'.txt')
        feats = getfeats(lm_path)
        mouth_x = int((feats[3,0]+feats[4,0])/2.0)
        mouth_y = int((feats[3,1]+feats[4,1])/2.0)
        ratio = self.load_h // 256
        rhs = np.array([EYE_H,EYE_H,NOSE_H,MOUTH_H]) * ratio
        rws = np.array([EYE_W,EYE_W,NOSE_W,MOUTH_W]) * ratio
        center = np.array([[feats[0,0],feats[0,1]-4*ratio],[feats[1,0],feats[1,1]-4*ratio],[feats[2,0],feats[2,1]-rhs[2]//2+16*ratio],[mouth_x,mouth_y]])
        soft_border_mask4 = []
        for i in range(4):
            xb = [np.zeros(rhs[i]),np.ones(rhs[i])*(rws[i]-1)]
            yb = [np.zeros(rws[i]),np.ones(rws[i])*(rhs[i]-1)]
            soft_border_mask = getSoft([rhs[i],rws[i]],xb,yb)
            soft_border_mask = jt.array(soft_border_mask).unsqueeze(0).float()
            soft_border_mask4.append(soft_border_mask)
        for i in range(4):
            item_A_l[regions[i]+'_A'] = item_A[:,int(center[i,1]-rhs[i]/2):int(center[i,1]+rhs[i]/2),int(center[i,0]-rws[i]/2):int(center[i,0]+rws[i]/2)] * soft_border_mask4[i].repeat(3,1,1)
        
        cmasks = []
        for i in range(4):
            cmaskpath = os.path.join(self.cmaskdir.format(regions[i]),basen+'.png')
            im_cmask = Image.open(cmaskpath)
            cmask0 = self.transform_mask(im_cmask)
            cmask0 = jt.array(cmask0)
            cmask0 = (cmask0 >= 0.5).float()
            cmask = cmask0.clone()
            cmask = cmask[:,int(center[i,1]-rhs[i]/2):int(center[i,1]+rhs[i]/2),int(center[i,0]-rws[i]/2):int(center[i,0]+rws[i]/2)]
            cmasks.append(cmask)

        mask = jt.ones([1,item_A.shape[1],item_A.shape[2]]) # mask out eyes, nose, mouth
        for i in range(4):
            mask[:,int(center[i,1]-rhs[i]/2):int(center[i,1]+rhs[i]/2),int(center[i,0]-rws[i]/2):int(center[i,0]+rws[i]/2)] = 0
        imgsize = self.load_h
        maskn = mask[0].numpy()
        masks = [np.ones([imgsize,imgsize]),np.ones([imgsize,imgsize]),np.ones([imgsize,imgsize]),np.ones([imgsize,imgsize])]
        masks[0][1:] = maskn[:-1]
        masks[1][:-1] = maskn[1:]
        masks[2][:,1:] = maskn[:,:-1]
        masks[3][:,:-1] = maskn[:,1:]
        masks2 = [maskn-e for e in masks]
        bound = np.minimum.reduce(masks2)
        bound = -bound
        xb = []
        yb = []
        for i in range(4):
            xbi = [center[i,0]-rws[i]/2, center[i,0]+rws[i]/2-1]
            ybi = [center[i,1]-rhs[i]/2, center[i,1]+rhs[i]/2-1]
            for j in range(2):
                maskx = bound[:,int(xbi[j])]
                masky = bound[int(ybi[j]),:]
                tmp_a = maskx * xbi[j]
                tmp_b = 1-maskx
                xb += [tmp_b*10000 + tmp_a]

                tmp_a = masky * ybi[j]
                tmp_b = 1-masky
                yb += [tmp_b*10000 + tmp_a]
        soft = 1-getSoft([imgsize,imgsize],xb,yb)
        soft = jt.array(soft).unsqueeze(0).float()
        mask = (jt.ones(mask.shape)-mask)*soft + mask

        bgpath = os.path.join(self.maskdir, basen+'.png')
        im_bg = Image.open(bgpath)
        mask2 = self.transform_mask(im_bg) # mask out background
        mask2 = jt.array(mask2)
        mask2 = (mask2 >= 0.5).float() # foreground: 1, background: 0
        item_A_l['hair_A'] = (item_A/2+0.5) * mask.repeat(3,1,1) * mask2.repeat(3,1,1) * 2 - 1
        item_A_l['bg_A'] = (item_A/2+0.5) * (jt.ones(mask2.shape)-mask2).repeat(3,1,1) * 2 - 1

        return item_A, item_A_l['eyel_A'], item_A_l['eyer_A'], item_A_l['nose_A'], item_A_l['mouth_A'], item_A_l['hair_A'], item_A_l['bg_A'], mask, mask2, center, cmasks[0], cmasks[1], cmasks[2], cmasks[3]
コード例 #2
0
ファイル: test_setitem.py プロジェクト: lzhengning/jittor
 def test_setitem_inplace_case2(self):
     # test un-continuous first dim
     a = jt.zeros((3, ))
     a[0::2] = jt.ones((2, ))
     assert a.data[2] == 1
コード例 #3
0
def main():
    model = PSPNet()
    x = jt.ones([2, 3, 513, 513])
    y = model(x)
    print(y.shape)
    _ = y.data
コード例 #4
0
ファイル: cgan.py プロジェクト: zhourunlong/ComputerGraphics
        np.array([num for _ in range(n_row)
                  for num in range(n_row)])).float32().stop_grad()
    gen_imgs = generator(z, labels)
    save_image(gen_imgs.numpy(), "images/%d.png" % batches_done, nrow=n_row)


# ----------
#  Training
# ----------

for epoch in range(opt.n_epochs):
    for i, (imgs, labels) in enumerate(dataloader):
        batch_size = imgs.shape[0]

        # Adversarial ground truths
        valid = jt.ones([batch_size, 1]).float32().stop_grad()
        fake = jt.zeros([batch_size, 1]).float32().stop_grad()

        # Configure input
        real_imgs = jt.array(imgs)
        labels = jt.array(labels)

        # -----------------
        #  Train Generator
        # -----------------

        # Sample noise and labels as generator input
        z = jt.array(np.random.normal(0, 1,
                                      (batch_size, opt.latent_dim))).float32()
        gen_labels = jt.array(np.random.randint(0, opt.n_classes,
                                                batch_size)).float32()
コード例 #5
0
def read_planetoid_data(folder, prefix):
    names = ['x', 'tx', 'allx', 'y', 'ty', 'ally', 'graph', 'test.index']
    items = [read_file(folder, prefix, name) for name in names]
    x, tx, allx, y, ty, ally, graph, test_index = items
    train_index = jt.arange(y.size(0), dtype=Var.int32)
    val_index = jt.arange(y.size(0), y.size(0) + 500, dtype=Var.int32)
    sorted_test_index = test_index.argsort()[1]

    if prefix.lower() == 'citeseer':
        # There are some isolated nodes in the Citeseer graph, resulting in
        # none consecutive test indices. We need to identify them and add them
        # as zero vectors to `tx` and `ty`.
        len_test_indices = (test_index.max() - test_index.min()).item() + 1

        tx_ext = jt.zeros((len_test_indices, tx.size(1)))
        tx_ext[sorted_test_index - test_index.min(), :] = tx
        ty_ext = jt.zeros((len_test_indices, ty.size(1)))
        ty_ext[sorted_test_index - test_index.min(), :] = ty

        tx, ty = tx_ext, ty_ext

    if prefix.lower() == 'nell.0.001':
        tx_ext = jt.zeros((len(graph) - allx.size(0), x.size(1)))
        tx_ext[sorted_test_index - allx.size(0)] = tx

        ty_ext = jt.zeros((len(graph) - ally.size(0), y.size(1)))
        ty_ext[sorted_test_index - ally.size(0)] = ty

        tx, ty = tx_ext, ty_ext

        x = jt.concat([allx, tx], dim=0)
        x[test_index] = x[sorted_test_index]

        # Creating feature vectors for relations.
        row, col, value = SparseTensor.from_dense(x).coo()
        rows, cols, values = [row], [col], [value]

        mask1 = index_to_mask(test_index, size=len(graph))
        mask2 = index_to_mask(jt.arange(allx.size(0), len(graph)),
                              size=len(graph))
        mask = jt.logical_or(jt.logical_not(mask1), jt.logical_not(mask2))
        isolated_index = mask.nonzero(as_tuple=False).view(-1)[allx.size(0):]

        rows += [isolated_index]
        cols += [jt.arange(isolated_index.size(0)) + x.size(1)]
        values += [jt.ones((isolated_index.size(0)))]

        x = SparseTensor(row=jt.concat(rows),
                         col=jt.concat(cols),
                         value=jt.concat(values))
    else:
        x = jt.concat([allx, tx], dim=0)
        x[test_index] = x[sorted_test_index]
    y = jt.concat([ally, ty], dim=0).argmax(dim=1)[0]
    y[test_index] = y[sorted_test_index]

    train_mask = index_to_mask(train_index, size=y.size(0))
    val_mask = index_to_mask(val_index, size=y.size(0))
    test_mask = index_to_mask(test_index, size=y.size(0))

    edge_index = edge_index_from_dict(graph, num_nodes=y.size(0))

    data = Data(x=x, edge_index=edge_index, y=y)
    data.train_mask = train_mask
    data.val_mask = val_mask
    data.test_mask = test_mask
    return data
コード例 #6
0
ファイル: nn.py プロジェクト: pc8504/jittor
def sign(x):
    one = jt.ones(x.shape)
    x = jt.ternary(x > 0, one, x)
    return jt.ternary(x < 0, -one, x)
コード例 #7
0
def addone_with_mask(A, mask):
    return ((A/2+0.5)*mask + (jt.ones(mask.shape)-mask))*2-1
コード例 #8
0
 def test_view(self):
     a = jt.ones([2,3,4])
     assert a.view(2,-1).shape == [2,12]
コード例 #9
0
 def test_permute(self):
     a = jt.ones([2, 3, 4])
     assert a.permute().shape == [4, 3, 2]
     assert a.permute(0, 2, 1).shape == [2, 4, 3]
コード例 #10
0
def inverse_mask(mask):
    return jt.ones(mask.shape)-mask
コード例 #11
0
    def execute(self, x, img_size, scale=1.0):
        """Forward Region Proposal Network.

        Here are notations.

        * :math:`N` is batch size.
        * :math:`C` channel size of the input.
        * :math:`H` and :math:`W` are height and witdh of the input feature.
        * :math:`A` is number of anchors assigned to each pixel.

        Args:
            x : The Features extracted from images.
                Its shape is :math:`(N, C, H, W)`.
            img_size (tuple of ints): A tuple :obj:`height, width`,
                which contains image size after scaling.

        Returns:
            This is a tuple of five following values.

            * **rpn_locs**: Predicted bounding box offsets and scales for \
                anchors. Its shape is :math:`(N, H W A, 4)`.
            * **rpn_scores**:  Predicted foreground scores for \
                anchors. Its shape is :math:`(N, H W A, 2)`.
            * **rois**: A bounding box array containing coordinates of \
                proposal boxes.  This is a concatenation of bounding box \
                arrays from multiple images in the batch. \
                Its shape is :math:`(R', 4)`. Given :math:`R_i` predicted \
                bounding boxes from the :math:`i` th image, \
                :math:`R' = \\sum _{i=1} ^ N R_i`.
            * **roi_indices**: An array containing indices of images to \
                which RoIs correspond to. Its shape is :math:`(R',)`.
            * **anchor**: Coordinates of enumerated shifted anchors. \
                Its shape is :math:`(H W A, 4)`.

        """
        n, _, hh, ww = x.shape
        anchor = _enumerate_shifted_anchor(self.anchor_base, self.feat_stride,
                                           hh, ww)
        anchor = jt.array(anchor)

        n_anchor = anchor.shape[0] // (hh * ww)
        h = nn.relu(self.conv1(x))

        rpn_locs = self.loc(h)

        rpn_locs = rpn_locs.permute(0, 2, 3, 1).view(n, -1, 4)
        rpn_scores = self.score(h)
        rpn_scores = rpn_scores.permute(0, 2, 3, 1)
        rpn_softmax_scores = nn.softmax(rpn_scores.view(
            n, hh, ww, n_anchor, 2),
                                        dim=4)
        rpn_fg_scores = rpn_softmax_scores[:, :, :, :, 1]
        rpn_fg_scores = rpn_fg_scores.view(n, -1)
        rpn_scores = rpn_scores.view(n, -1, 2)
        rois = []
        roi_indices = []
        for i in range(n):
            roi = self.proposal_layer(rpn_locs[i], rpn_fg_scores[i], anchor,
                                      img_size, scale)
            batch_index = i * jt.ones((len(roi), ), dtype='int32')
            rois.append(roi)
            roi_indices.append(batch_index)

        rois = jt.contrib.concat(rois, dim=0)
        roi_indices = jt.contrib.concat(roi_indices, dim=0)
        return rpn_locs, rpn_scores, rois, roi_indices, anchor
コード例 #12
0
def build_targets(p, targets, model):
    # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
    det = model.model[-1]  # Detect() module
    na, nt = det.na, targets.shape[0]  # number of anchors, targets
    tcls, tbox, indices, anch = [], [], [], []
    gain = jt.ones((7, ))  # normalized to gridspace gain
    ai = jt.index(
        (na, ),
        dim=0).float().view(na, 1).repeat(1,
                                          nt)  # same as .repeat_interleave(nt)

    targets = jt.contrib.concat((targets.repeat(na, 1, 1), ai[:, :, None]),
                                2)  # append anchor indices

    g = 0.5  # bias
    off = jt.array(
        [
            [0, 0],
            # [1, 0], [0, 1], [-1, 0], [0, -1],  # j,k,l,m
            # [1, 1], [1, -1], [-1, 1], [-1, -1],  # jk,jm,lk,lm
        ], ).float() * g  # offsets

    for i in range(det.nl):
        anchors = det.anchors[i]
        gain[2:6] = jt.array(
            [p[i].shape[3], p[i].shape[2], p[i].shape[3],
             p[i].shape[2]])  # xyxy gain

        # Match targets to anchors
        t = targets * gain

        if nt:
            # Matches
            r = t[:, :, 4:6] / anchors[:, None]  # wh ratio
            j = jt.maximum(r, 1. / r).max(2) < model.hyp['anchor_t']  # compare
            # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t']  # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
            t = t[j]  # filter

            # Offsets
            gxy = t[:, 2:4]  # grid xy
            gxi = gain[jt.array([2, 3])] - gxy  # inverse
            # j, k = jt.logical_and((gxy % 1. < g), (gxy > 1.)).int().transpose(1,0).bool()
            # l, m = jt.logical_and((gxi % 1. < g),(gxi > 1.)).int().transpose(1,0).bool()
            jk = jt.logical_and((gxy % 1. < g), (gxy > 1.))
            lm = jt.logical_and((gxi % 1. < g), (gxi > 1.))
            j, k = jk[:, 0], jk[:, 1]
            l, m = lm[:, 0], lm[:, 1]

            j = jt.stack((jt.ones_like(j), ))
            t = t.repeat((off.shape[0], 1, 1))[j]
            offsets = (jt.zeros_like(gxy)[None] + off[:, None])[j]
        else:
            t = targets[0]
            offsets = 0

        # Define
        b = t[:, 0].int32()
        c = t[:, 1].int32()  # image, class
        gxy = t[:, 2:4]  # grid xy
        gwh = t[:, 4:6]  # grid wh
        gij = (gxy - offsets).int32()
        gi, gj = gij[:, 0], gij[:, 1]  # grid xy indices

        # Append
        a = t[:, 6].int32()  # anchor indices
        indices.append((b, a, gj.clamp(0, gain[3] - 1),
                        gi.clamp(0,
                                 gain[2] - 1)))  # image, anchor, grid indices
        tbox.append(jt.contrib.concat((gxy - gij, gwh), 1))  # box
        anch.append(anchors[a])  # anchors
        tcls.append(c)  # class

    return tcls, tbox, indices, anch
コード例 #13
0
    def __getitem__(self, index):

        AB_path = self.files[index % len(self.files)]
        img = Image.open(AB_path)
        w, h = img.size
        img_A = img.crop((0, 0, w / 2, h))
        img_B = img.crop((w / 2, 0, w, h))

        flip = random.random() > 0.5

        params = {'load_h': self.load_h, 'load_w': self.load_w, 'flip': flip}
        transform_A = get_transform(params)
        transform_B = get_transform(params, gray=True)
        transform_mask = get_transform(params, gray=True, mask=True)

        item_A = transform_A(img_A)
        item_A = jt.array(item_A)
        item_B = transform_B(img_B)
        item_B = jt.array(item_B)

        item_A_l = {}
        item_B_l = {}
        regions = ['eyel','eyer','nose','mouth']
        basen = os.path.basename(AB_path)[:-4]
        lm_path = os.path.join(self.lmdir, basen+'.txt')
        feats = getfeats(lm_path)
        if flip:
            for i in range(5):
                feats[i,0] = self.load_w - feats[i,0] - 1
            tmp = [feats[0,0],feats[0,1]]
            feats[0,:] = [feats[1,0],feats[1,1]]
            feats[1,:] = tmp
        mouth_x = int((feats[3,0]+feats[4,0])/2.0)
        mouth_y = int((feats[3,1]+feats[4,1])/2.0)
        ratio = self.load_h // 256
        rhs = np.array([EYE_H,EYE_H,NOSE_H,MOUTH_H]) * ratio
        rws = np.array([EYE_W,EYE_W,NOSE_W,MOUTH_W]) * ratio
        center = np.array([[feats[0,0],feats[0,1]-4*ratio],[feats[1,0],feats[1,1]-4*ratio],[feats[2,0],feats[2,1]-rhs[2]//2+16*ratio],[mouth_x,mouth_y]])
        soft_border_mask4 = []
        for i in range(4):
            xb = [np.zeros(rhs[i]),np.ones(rhs[i])*(rws[i]-1)]
            yb = [np.zeros(rws[i]),np.ones(rws[i])*(rhs[i]-1)]
            soft_border_mask = getSoft([rhs[i],rws[i]],xb,yb)
            soft_border_mask = jt.array(soft_border_mask).unsqueeze(0).float()
            soft_border_mask4.append(soft_border_mask)
        for i in range(4):
            item_A_l[regions[i]+'_A'] = item_A[:,int(center[i,1]-rhs[i]/2):int(center[i,1]+rhs[i]/2),int(center[i,0]-rws[i]/2):int(center[i,0]+rws[i]/2)] * soft_border_mask4[i].repeat(3,1,1)
            item_B_l[regions[i]+'_B'] = item_B[:,int(center[i,1]-rhs[i]/2):int(center[i,1]+rhs[i]/2),int(center[i,0]-rws[i]/2):int(center[i,0]+rws[i]/2)] * soft_border_mask4[i]
        
        cmasks = []
        for i in range(4):
            if not flip or i not in [2,3]:
                cmaskpath = os.path.join(self.cmaskdir.format(regions[i]),basen+'.png')
            else:
                cmaskpath = os.path.join(self.cmaskdir.format(regions[1-i]),basen+'.png')
            im_cmask = Image.open(cmaskpath)
            cmask0 = transform_mask(im_cmask)
            cmask0 = jt.array(cmask0)
            cmask0 = (cmask0 >= 0.5).float()
            cmask = cmask0.clone()
            cmask = cmask[:,int(center[i,1]-rhs[i]/2):int(center[i,1]+rhs[i]/2),int(center[i,0]-rws[i]/2):int(center[i,0]+rws[i]/2)]
            cmasks.append(cmask)
        
        mask = jt.ones([1,item_A.shape[1],item_A.shape[2]]) # mask out eyes, nose, mouth
        for i in range(4):
            mask[:,int(center[i,1]-rhs[i]/2):int(center[i,1]+rhs[i]/2),int(center[i,0]-rws[i]/2):int(center[i,0]+rws[i]/2)] = 0
        imgsize = self.load_h
        maskn = mask[0].numpy()
        masks = [np.ones([imgsize,imgsize]),np.ones([imgsize,imgsize]),np.ones([imgsize,imgsize]),np.ones([imgsize,imgsize])]
        masks[0][1:] = maskn[:-1]
        masks[1][:-1] = maskn[1:]
        masks[2][:,1:] = maskn[:,:-1]
        masks[3][:,:-1] = maskn[:,1:]
        masks2 = [maskn-e for e in masks]
        bound = np.minimum.reduce(masks2)
        bound = -bound
        xb = []
        yb = []
        for i in range(4):
            xbi = [center[i,0]-rws[i]/2, center[i,0]+rws[i]/2-1]
            ybi = [center[i,1]-rhs[i]/2, center[i,1]+rhs[i]/2-1]
            for j in range(2):
                maskx = bound[:,int(xbi[j])]
                masky = bound[int(ybi[j]),:]
                tmp_a = maskx * xbi[j]
                tmp_b = 1-maskx
                xb += [tmp_b*10000 + tmp_a]

                tmp_a = masky * ybi[j]
                tmp_b = 1-masky
                yb += [tmp_b*10000 + tmp_a]
        soft = 1-getSoft([imgsize,imgsize],xb,yb)
        soft = jt.array(soft).unsqueeze(0).float()
        mask = (jt.ones(mask.shape)-mask)*soft + mask

        bgpath = os.path.join(self.maskbgdir, basen+'.png')
        im_bg = Image.open(bgpath)
        mask2 = transform_mask(im_bg) # mask out background
        mask2 = jt.array(mask2)
        mask2 = (mask2 >= 0.5).float() # foreground: 1, background: 0
        item_A_l['hair_A'] = (item_A/2+0.5) * mask.repeat(3,1,1) * mask2.repeat(3,1,1) * 2 - 1
        item_A_l['bg_A'] = (item_A/2+0.5) * (jt.ones(mask2.shape)-mask2).repeat(3,1,1) * 2 - 1
        item_B_l['hair_B'] = (item_B/2+0.5) * mask * mask2 * 2 - 1
        item_B_l['bg_B'] = (item_B/2+0.5) * (jt.ones(mask2.shape)-mask2) * 2 - 1

        facepath = os.path.join(self.maskfacedir, basen+'.png')
        im_face = Image.open(facepath)
        maskface = transform_mask(im_face)
        maskface = jt.array(maskface)

        img = tocv2(item_B)
        dt1, dt2 = dt(img)
        dt1 = jt.array(dt1)
        dt2 = jt.array(dt2)
        dt1 = dt1.unsqueeze(0)
        dt2 = dt2.unsqueeze(0)

        return item_A, item_A_l['eyel_A'], item_A_l['eyer_A'], item_A_l['nose_A'], item_A_l['mouth_A'], item_A_l['hair_A'], item_A_l['bg_A'], item_B, item_B_l['eyel_B'], item_B_l['eyer_B'], item_B_l['nose_B'], item_B_l['mouth_B'], item_B_l['hair_B'], item_B_l['bg_B'], mask, mask2, center, dt1, dt2, cmasks[0], cmasks[1], cmasks[2], cmasks[3], maskface
コード例 #14
0
ファイル: test_setitem.py プロジェクト: lzhengning/jittor
    def test_getitem(self):
        # test for different slice type
        arr0 = jt.random((4, 3))
        arr0_res = arr0[2, :]
        arr0_res.data[1] = 1
        assert arr0[2, 1] == 1

        arr1 = jt.array([1, 2, 3, 4])
        arr1_res = arr1[None]
        arr1_res.data[0, 2] = -1
        assert arr1[2] == -1

        arr2 = jt.array([1, 2, 3, 4])
        arr2_res = arr2[...]
        arr2_res.data[2] = -1
        assert arr2[2] == -1

        arr3 = jt.array([1, 2, 3, 4])
        arr3_res = arr3[3]
        arr3_res.data[0] = -1
        assert arr3[3] == -1

        arr4 = jt.random((4, 2, 3, 3))
        arr4_res = arr4[..., :, :]
        arr4_res.data[0, 0, 1, 1] = 1
        assert arr4[0, 0, 1, 1] == 1

        arr4 = jt.random((4, 2, 3, 3))
        arr4_res = arr4[..., :, :2]
        arr4_res.data[0, 0, 1, 1] = 1
        assert arr4[0, 0, 1, 1] != 1

        arr4 = jt.random((3, 3))
        arr4_res = arr4[..., :, :2]
        arr4_res.data[1, 1] = 1
        assert arr4[1, 1] != 1

        arr5 = jt.random((4, 2, 3, 3))
        arr5_res = arr5[1:3, :, :, :]
        arr5_res.data[1, 0, 1, 1] = 1
        assert arr5[2, 0, 1, 1] == 1

        arr6 = jt.random((4, 2, 3, 3))
        arr6_res = arr6[1]
        arr6_res.data[0, 1, 1] = 1
        assert arr6[1, 0, 1, 1] == 1

        # test for different data type (float32/float64/bool/int8/int32)
        arr_float32 = jt.random((4, 2, 3))
        arr_float32_res = arr_float32[1:3, :, :]
        arr_float32_res.data[0, 0, 0] = 1
        assert arr_float32[1, 0, 0] == 1
        arr_float32_res.data[1, 1, 2] = 1
        assert arr_float32[2, 1, 2] == 1
        arr_float32[1, 0, 0] = 0
        # getitem and setitem do not conflict
        assert arr_float32_res[0, 0, 0] == 1

        arr_bool = jt.bool(np.ones((4, 2, 3)))
        arr_bool_res = arr_bool[1:3, :, :]
        arr_bool_res.data[0, 0, 0] = False
        assert arr_bool[1, 0, 0] == False
        arr_bool_res.data[0, 0, 1] = False
        assert arr_bool[1, 0, 1] == False

        arr_float64 = jt.random((4, 2, 3), dtype='float64')
        arr_float64_res = arr_float64[1:3, :, :]
        arr_float64_res.data[0, 0, 0] = 1
        assert arr_float64[1, 0, 0] == 1
        arr_float64_res.data[1, 1, 2] = 1
        assert arr_float64[2, 1, 2] == 1

        arr_int32 = jt.ones((4, 2, 3), dtype='int32')
        arr_int32_res = arr_int32[1:3, :, :]
        arr_int32_res.data[0, 0, 0] = 0
        assert arr_int32[1, 0, 0] == 0
        arr_int32_res.data[1, 1, 2] = 0
        assert arr_int32[2, 1, 2] == 0
コード例 #15
0
def add_with_mask(A, B, mask):
    return ((A/2+0.5)*mask + (B/2+0.5)*(jt.ones(mask.shape)-mask))*2-1
コード例 #16
0
    def __init__(self,
                 vertices,
                 faces,
                 textures=None,
                 texture_res=1,
                 texture_type='surface',
                 dr_type='softras',
                 metallic_textures=None,
                 roughness_textures=None):
        '''
        vertices, faces and textures (if not None) are expected to be Tensor objects
        '''
        self._vertices = vertices
        self._faces = faces

        if isinstance(self._vertices, np.ndarray):
            self._vertices = jt.array(self._vertices).float()
        if isinstance(self._faces, np.ndarray):
            self._faces = jt.array(self._faces).int()
        if len(self._vertices.shape) == 2:
            self._vertices = self._vertices.unsqueeze(0)
        if len(self._faces.shape) == 2:
            self._faces = self._faces.unsqueeze(0)

        self.texture_type = texture_type

        self.batch_size = self._vertices.shape[0]
        self.num_vertices = self._vertices.shape[1]
        self.num_faces = self._faces.shape[1]

        self._face_vertices = None
        self._face_vertices_update = True
        self._surface_normals = None
        self._surface_normals_update = True
        self._vertex_normals = None
        self._vertex_normals_update = True
        self._with_specular = True

        self._fill_back = False
        self.dr_type = dr_type

        if texture_type == 'surface':
            if self.dr_type == 'softras':
                self._metallic_textures = jt.zeros(
                    (self.batch_size, self.num_faces, texture_res**2, 1))
                self._roughness_textures = jt.ones(
                    (self.batch_size, self.num_faces, texture_res**2, 1))
            elif self.dr_type == 'n3mr':
                self._metallic_textures = jt.zeros(
                    (self.batch_size, self.num_faces, texture_res, texture_res,
                     texture_res, 1))
                self._roughness_textures = jt.ones(
                    (self.batch_size, self.num_faces, texture_res, texture_res,
                     texture_res, 1))
        elif texture_type == 'vertex':
            self._metallic_textures = jt.zeros(
                (self.batch_size, self.num_vertices, 1))
            self._roughness_textures = jt.ones(
                (self.batch_size, self.num_vertices, 1))

        if metallic_textures is not None:
            self._metallic_textures = metallic_textures
        if roughness_textures is not None:
            self._roughness_textures = roughness_textures

        # create textures
        if textures is None:
            if texture_type == 'surface':
                if self.dr_type == 'softras':
                    self._textures = jt.ones(
                        (self.batch_size, self.num_faces, texture_res**2, 3))
                elif self.dr_type == 'n3mr':
                    self._textures = jt.ones(
                        (self.batch_size, self.num_faces, texture_res,
                         texture_res, texture_res, 3))
                self.texture_res = texture_res
            elif texture_type == 'vertex':
                self._textures = jt.ones(
                    (self.batch_size, self.num_vertices, 3))
                self.texture_res = 1
        else:
            if isinstance(textures, np.ndarray):
                textures = jt.array(textures).float()
            if len(textures.shape) == 3 and texture_type == 'surface':
                textures = textures.unsqueeze(0)
            if len(textures.shape) == 2 and texture_type == 'vertex':
                textures = textures.unsqueeze(0)
            if len(textures.shape) == 5:
                textures = textures.unsqueeze(0)
            self._textures = textures
            self.texture_res = int(np.sqrt(self._textures.shape[2]))

        self._origin_vertices = self._vertices
        self._origin_faces = self._faces
        self._origin_textures = self._textures
コード例 #17
0
ファイル: batch_norm.py プロジェクト: li-xl/detectron.jittor
 def __init__(self, n):
     super(FrozenBatchNorm2d, self).__init__()
     self.weight =  jt.ones(n).stop_grad()
     self.bias = jt.zeros(n).stop_grad()
     self.running_mean = jt.zeros(n).stop_grad()
     self.running_var = jt.ones(n).stop_grad()
コード例 #18
0
 def test_flatten(self):
     a = jt.ones([2,3,4])
     assert a.flatten().shape == [24]
     assert a.flatten(1).shape == [2,12]
     assert a.flatten(0,-2).shape == [6,4]
コード例 #19
0
 def test_tensor_bad_types_to_pil_image(self):
     with self.assertRaises(ValueError):
         transform.ToPILImage()(jt.ones((1, 3, 4, 4)))
コード例 #20
0
ファイル: aae.py プロジェクト: whuyyc/gan-jittor
    # Sample noise
    z = jt.array(np.random.normal(
        0, 1, (n_row**2, opt.latent_dim))).float32().stop_grad()
    gen_imgs = decoder(z)
    save_image(gen_imgs.numpy(), "images/%d.png" % batches_done, nrow=n_row)


# ----------
#  Training
# ----------

for epoch in range(opt.n_epochs):
    for i, (imgs, _) in enumerate(train_loader):
        sta = time.time()
        # Adversarial ground truths
        valid = jt.ones([imgs.shape[0], 1]).stop_grad()
        fake = jt.zeros([imgs.shape[0], 1]).stop_grad()

        # Configure input
        real_imgs = jt.array(imgs).stop_grad()

        # -----------------
        #  Train Generator
        # -----------------
        encoded_imgs = encoder(real_imgs)
        decoded_imgs = decoder(encoded_imgs)

        # Loss measures generator's ability to fool the discriminator
        g_loss = (
            0.001 * adversarial_loss(discriminator(encoded_imgs), valid) +
            0.999 * pixelwise_loss(decoded_imgs, real_imgs))
コード例 #21
0
ファイル: infogan.py プロジェクト: whuyyc/gan-jittor
               nrow=n_row)
    save_image(sample2.numpy(),
               "images/varying_c2/%d.png" % batches_done,
               nrow=n_row)


# ----------
#  Training
# ----------
for epoch in range(opt.n_epochs):
    for i, (imgs, labels) in enumerate(dataloader):

        batch_size = imgs.shape[0]

        # Adversarial ground truths
        valid = jt.ones((batch_size, 1)).float32().stop_grad()
        fake = jt.zeros((batch_size, 1)).float32().stop_grad()

        # Configure input
        real_imgs = jt.array(imgs).float32()
        labels = to_categorical(labels.numpy(), num_columns=opt.n_classes)

        # -----------------
        #  Train Generator
        # -----------------

        # Sample noise and labels as generator input
        z = jt.array(np.random.normal(0, 1,
                                      (batch_size, opt.latent_dim))).float32()
        label_input = to_categorical(np.random.randint(0, opt.n_classes,
                                                       batch_size),
コード例 #22
0
    loss = jt.mean(mu_2)
    return loss


# ----------
#  Training
# ----------
prev_time = time.time()
for epoch in range(opt.epoch, opt.n_epochs):
    for i, batch in enumerate(dataloader):
        # Set model input
        X1 = batch[0].stop_grad()
        X2 = batch[1].stop_grad()

        # Adversarial ground truths
        valid = jt.ones((X1.size(0), *D1.output_shape)).stop_grad()
        fake = jt.zeros((X1.size(0), *D1.output_shape)).stop_grad()
        # -------------------------------
        #  Train Encoders and Generators
        # -------------------------------

        E1.train()
        E2.train()
        G1.train()
        G2.train()

        # Get shared latent representation
        mu1, Z1 = E1(X1)
        mu2, Z2 = E2(X2)

        # Reconstruct images
コード例 #23
0
ファイル: test_optimizer.py プロジェクト: lzhengning/jittor
 def test_state_dict(self):
     a = jt.ones(2)
     opt = jt.optim.SGD([a], 0.1)
     s = opt.state_dict()
     # print(s)
     opt.load_state_dict(s)
コード例 #24
0
    def execute(self, x):
        """ The input should be of size [batch_size, 3, img_h, img_w] """
        _, _, img_h, img_w = x.shape
        cfg._tmp_img_h = img_h
        cfg._tmp_img_w = img_w

        with timer.env('backbone'):
            outs = self.backbone(x)

        if cfg.fpn is not None:
            with timer.env('fpn'):
                # Use backbone.selected_layers because we overwrote self.selected_layers
                outs = [outs[i] for i in cfg.backbone.selected_layers]
                outs = self.fpn(outs)
        proto_out = None
        if cfg.mask_type == mask_type.lincomb and cfg.eval_mask_branch:
            with timer.env('proto'):
                proto_x = x if self.proto_src is None else outs[self.proto_src]

                if self.num_grids > 0:
                    grids = self.grid.repeat(proto_x.shape[0], 1, 1, 1)
                    proto_x = jt.contrib.concat([proto_x, grids], dim=1)

                proto_out = self.proto_net(proto_x)
                proto_out = cfg.mask_proto_prototype_activation(proto_out)

                if cfg.mask_proto_prototypes_as_features:
                    # Clone here because we don't want to permute this, though idk if contiguous makes this unnecessary
                    proto_downsampled = proto_out.clone()

                    if cfg.mask_proto_prototypes_as_features_no_grad:
                        proto_downsampled = proto_out.detach()

                # Move the features last so the multiplication is easy
                proto_out = proto_out.permute(0, 2, 3, 1)

                if cfg.mask_proto_bias:
                    bias_shape = [x for x in proto_out.shape]
                    bias_shape[-1] = 1
                    proto_out = jt.contrib.concat(
                        [proto_out, jt.ones(bias_shape)], -1)

        with timer.env('pred_heads'):
            pred_outs = {'loc': [], 'conf': [], 'mask': [], 'priors': []}

            if cfg.use_mask_scoring:
                pred_outs['score'] = []

            if cfg.use_instance_coeff:
                pred_outs['inst'] = []

            for idx, pred_layer in zip(self.selected_layers,
                                       self.prediction_layers):
                pred_x = outs[idx]

                if cfg.mask_type == mask_type.lincomb and cfg.mask_proto_prototypes_as_features:
                    # Scale the prototypes down to the current prediction layer's size and add it as inputs
                    proto_downsampled = nn.interpolate(
                        proto_downsampled,
                        size=outs[idx].shape[2:],
                        mode='bilinear',
                        align_corners=False)
                    # proto_downsampled = interpolate(proto_downsampled, size=outs[idx].shape[2:], mode='bilinear', align_corners=False)

                    pred_x = jt.contrib.concat([pred_x, proto_downsampled],
                                               dim=1)

                # A hack for the way dataparallel works
                if cfg.share_prediction_module and pred_layer is not self.prediction_layers[
                        0]:
                    pred_layer.parent = [self.prediction_layers[0]]

                p = pred_layer(pred_x)

                for k, v in p.items():
                    pred_outs[k].append(v)

        for k, v in pred_outs.items():
            pred_outs[k] = jt.contrib.concat(v, -2)

        if proto_out is not None:
            pred_outs['proto'] = proto_out

        #print('hh',pred_outs)
        #print()
        if self.is_training():
            # For the extra loss functions
            if cfg.use_class_existence_loss:
                pred_outs['classes'] = self.class_existence_fc(
                    outs[-1].mean(dim=(2, 3)))

            if cfg.use_semantic_segmentation_loss:
                pred_outs['segm'] = self.semantic_seg_conv(outs[0])

            return pred_outs
        else:
            if cfg.use_mask_scoring:
                pred_outs['score'] = jt.sigmoid(pred_outs['score'])
            if cfg.use_focal_loss:
                if cfg.use_sigmoid_focal_loss:
                    # Note: even though conf[0] exists, this mode doesn't train it so don't use it
                    pred_outs['conf'] = jt.sigmoid(pred_outs['conf'])
                    if cfg.use_mask_scoring:
                        pred_outs['conf'] *= pred_outs['score']
                elif cfg.use_objectness_score:
                    # See focal_loss_sigmoid in multibox_loss.py for details
                    objectness = jt.sigmoid(pred_outs['conf'][:, :, 0])
                    pred_outs['conf'][:, :, 1:] = objectness.unsqueeze(
                        2) * nn.softmax(pred_outs['conf'][:, :, 1:], -1)
                    pred_outs['conf'][:, :, 0] = 1 - objectness
                else:
                    pred_outs['conf'] = nn.softmax(pred_outs['conf'], -1)
            else:

                if cfg.use_objectness_score:
                    objectness = jt.sigmoid(pred_outs['conf'][:, :, 0])

                    pred_outs['conf'][:, :, 1:] = (objectness > 0.10).unsqueeze(-1) \
                        * nn.softmax(pred_outs['conf'][:, :, 1:], dim=-1)

                else:
                    pred_outs['conf'] = nn.softmax(pred_outs['conf'], -1)
            return self.detect(pred_outs, self)
コード例 #25
0
ファイル: test_optimizer.py プロジェクト: lzhengning/jittor
 def test_opt_grad(self):
     a = jt.ones(2)
     opt = jt.optim.SGD([a], 0.1)
     opt.backward(a**2)
     g = a.opt_grad(opt)
     np.testing.assert_allclose(g.data, 2)
コード例 #26
0
ファイル: pix2pix.py プロジェクト: whuyyc/gan-jittor
warmup_times = -1
run_times = 3000
total_time = 0.
cnt = 0

# ----------
#  Training
# ----------

prev_time = time.time()

for epoch in range(opt.epoch, opt.n_epochs):
    for i, (real_B, real_A) in enumerate(dataloader):
        # Adversarial ground truths
        valid = jt.ones([real_A.shape[0], 1]).stop_grad()
        fake = jt.zeros([real_A.shape[0], 1]).stop_grad()

        # ------------------
        #  Train Generators
        # ------------------
        # GAN loss
        fake_B = generator(real_A)
        pred_fake = discriminator(fake_B, real_A)
        loss_GAN = criterion_GAN(pred_fake, valid)
        # Pixel-wise loss
        loss_pixel = criterion_pixelwise(fake_B, real_B)
        # Total loss
        loss_G = loss_GAN + lambda_pixel * loss_pixel
        optimizer_G.step(loss_G)
コード例 #27
0
    def __getitem__(self, index):

        AB_path = self.files[index % len(self.files)]
        img = Image.open(AB_path)
        w, h = img.size
        img_A = img.crop((0, 0, w / 2, h))
        img_B = img.crop((w / 2, 0, w, h))

        flip = random.random() > 0.5

        params = {'load_h': self.load_h, 'load_w': self.load_w, 'flip': flip}
        transform_A = get_transform(params)
        transform_B = get_transform(params, gray=True)
        transform_mask = get_transform(params, gray=True, mask=True)

        item_A = transform_A(img_A)
        item_A = jt.array(item_A)
        item_B = transform_B(img_B)
        item_B = jt.array(item_B)

        item_A_l = {}
        regions = ['eyel', 'eyer', 'nose', 'mouth']
        basen = os.path.basename(AB_path)[:-4]
        lm_path = os.path.join(self.lmdir, basen + '.txt')
        feats = getfeats(lm_path)
        if flip:
            for i in range(5):
                feats[i, 0] = self.load_w - feats[i, 0] - 1
            tmp = [feats[0, 0], feats[0, 1]]
            feats[0, :] = [feats[1, 0], feats[1, 1]]
            feats[1, :] = tmp
        mouth_x = int((feats[3, 0] + feats[4, 0]) / 2.0)
        mouth_y = int((feats[3, 1] + feats[4, 1]) / 2.0)
        ratio = self.load_h // 256
        rhs = np.array([EYE_H, EYE_H, NOSE_H, MOUTH_H]) * ratio
        rws = np.array([EYE_W, EYE_W, NOSE_W, MOUTH_W]) * ratio
        center = np.array(
            [[feats[0, 0], feats[0, 1] - 4 * ratio],
             [feats[1, 0], feats[1, 1] - 4 * ratio],
             [feats[2, 0], feats[2, 1] - rhs[2] // 2 + 16 * ratio],
             [mouth_x, mouth_y]])

        for i in range(4):
            item_A_l[regions[i] +
                     '_A'] = item_A[:,
                                    int(center[i, 1] -
                                        rhs[i] / 2):int(center[i, 1] +
                                                        rhs[i] / 2),
                                    int(center[i, 0] -
                                        rws[i] / 2):int(center[i, 0] +
                                                        rws[i] / 2)]

        mask = jt.ones([1, item_A.shape[1],
                        item_A.shape[2]])  # mask out eyes, nose, mouth
        for i in range(4):
            mask[:,
                 int(center[i, 1] - rhs[i] / 2):int(center[i, 1] + rhs[i] / 2),
                 int(center[i, 0] - rws[i] / 2):int(center[i, 0] +
                                                    rws[i] / 2)] = 0

        bgpath = os.path.join(self.maskdir, basen + '.png')
        im_bg = Image.open(bgpath)
        mask2 = transform_mask(im_bg)  # mask out background
        mask2 = jt.array(mask2)
        mask2 = (mask2 >= 0.5).float()  # foreground: 1, background: 0
        item_A_l['hair_A'] = (item_A / 2 + 0.5) * mask.repeat(
            3, 1, 1) * mask2.repeat(3, 1, 1) * 2 - 1
        item_A_l['bg_A'] = (item_A / 2 + 0.5) * (jt.ones(mask2.shape) -
                                                 mask2).repeat(3, 1, 1) * 2 - 1

        img = tocv2(item_B)
        dt1, dt2 = dt(img)
        dt1 = jt.array(dt1)
        dt2 = jt.array(dt2)
        dt1 = dt1.unsqueeze(0)
        dt2 = dt2.unsqueeze(0)

        return item_A, item_A_l['eyel_A'], item_A_l['eyer_A'], item_A_l[
            'nose_A'], item_A_l['mouth_A'], item_A_l['hair_A'], item_A_l[
                'bg_A'], mask, mask2, center, item_B, dt1, dt2