def build_model(self):
        """ DataLoader """
        pad = int(30 * self.img_size // 256)
        train_transform = T.Compose([
            T.RandomHorizontalFlip(),
            T.Resize((self.img_size + pad, self.img_size + pad)),
            T.RandomCrop(self.img_size),
            T.ToTensor(),
            T.Normalize(mean=0.5, std=0.5),
        ])

        test_transform = T.Compose([
            T.Resize((self.img_size, self.img_size)),
            T.ToTensor(),
            T.Normalize(mean=0.5, std=0.5)
        ])

        self.trainA = ImageFolder('dataset/photo2cartoon/trainA', self.img_size, train_transform)
        self.trainB = ImageFolder('dataset/photo2cartoon/trainB', self.img_size, train_transform)
        self.testA = ImageFolder('dataset/photo2cartoon/testA', self.img_size, test_transform)
        self.testB = ImageFolder('dataset/photo2cartoon/testB', self.img_size, test_transform)

        self.trainA_loader = DataLoader(self.trainA, batch_size=self.batch_size, shuffle=True)
        self.trainB_loader = DataLoader(self.trainB, batch_size=self.batch_size, shuffle=True)
        self.testA_loader = DataLoader(self.testA, batch_size=1, shuffle=False)
        self.testB_loader = DataLoader(self.testB, batch_size=1, shuffle=False)

        """ Define Generator, Discriminator """
        self.genA2B = ResnetGenerator(ngf=self.ch, img_size=self.img_size, light=self.light)
        self.genB2A = ResnetGenerator(ngf=self.ch, img_size=self.img_size, light=self.light)
        self.disGA = Discriminator(input_nc=3, ndf=self.ch, n_layers=7)
        self.disGB = Discriminator(input_nc=3, ndf=self.ch, n_layers=7)
        self.disLA = Discriminator(input_nc=3, ndf=self.ch, n_layers=5)
        self.disLB = Discriminator(input_nc=3, ndf=self.ch, n_layers=5)

        """ Define Loss """
        self.L1_loss = nn.loss.L1Loss()
        self.MSE_loss = nn.loss.MSELoss()
        self.BCE_loss = nn.loss.BCEWithLogitsLoss()

        self.G_optim = paddle.optimizer.Adam(
            learning_rate=self.lr, beta1=0.5, beta2=0.999, weight_decay=0.0001,
            parameters=self.genA2B.parameters()+self.genB2A.parameters()
        )
        self.D_optim = paddle.optimizer.Adam(
            learning_rate=self.lr, beta1=0.5, beta2=0.999, weight_decay=0.0001,
            parameters=self.disGA.parameters()+self.disGB.parameters()+self.disLA.parameters()+self.disLB.parameters()
        )

        self.Rho_clipper = RhoClipper(0, self.rho_clipper)
        self.W_clipper = WClipper(0, self.w_clipper)
Ejemplo n.º 2
0
    def test_to_tensor(self):
        trans = transforms.Compose([transforms.ToTensor()])
        fake_img = self.create_image((50, 100, 3))

        tensor = trans(fake_img)

        assert isinstance(tensor, paddle.Tensor)
        np.testing.assert_equal(tensor.shape, (3, 50, 100))
Ejemplo n.º 3
0
def test_det(
        opt,
        batch_size=12,
        img_size=(1088, 608),
        iou_thres=0.5,
        print_interval=40,
):
    data_cfg = opt.data_cfg
    f = open(data_cfg)
    data_cfg_dict = json.load(f)
    f.close()
    nC = 1
    test_path = data_cfg_dict['test']
    dataset_root = data_cfg_dict['root']
    if opt.gpus[0] >= 0:
        # opt.device = torch.device('cuda')
        opt.device = 'gpu'
    else:
        # opt.device = torch.device('cpu')
        opt.device = 'cpu'
    paddle.set_device(opt.device)
    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    model = load_model(model, opt.load_model)
    #model = torch.nn.DataParallel(model)
    # model = model.to(opt.device)
    model.eval()

    # Get dataloader
    transforms = T.Compose([T.ToTensor()])
    dataset = DetDataset(dataset_root, test_path, img_size, augment=False, transforms=transforms)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False,
                                             num_workers=8, drop_last=False, collate_fn=collate_fn)
    mean_mAP, mean_R, mean_P, seen = 0.0, 0.0, 0.0, 0
    print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP'))
    outputs, mAPs, mR, mP, TP, confidence, pred_class, target_class, jdict = \
        [], [], [], [], [], [], [], [], []
    AP_accum, AP_accum_count = np.zeros(nC), np.zeros(nC)
    for batch_i, (imgs, targets, paths, shapes, targets_len) in enumerate(dataloader):
        t = time.time()
        #seen += batch_size

        output = model(imgs.cuda())[-1]
        origin_shape = shapes[0]
        width = origin_shape[1]
        height = origin_shape[0]
        inp_height = img_size[1]
        inp_width = img_size[0]
        c = np.array([width / 2., height / 2.], dtype=np.float32)
        s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
        meta = {'c': c, 's': s,
                'out_height': inp_height // opt.down_ratio,
                'out_width': inp_width // opt.down_ratio}
        hm = output['hm'].sigmoid_()
        wh = output['wh']
        reg = output['reg'] if opt.reg_offset else None
        opt.K = 200
        detections, inds = mot_decode(hm, wh, reg=reg, ltrb=opt.ltrb, K=opt.K)
        # Compute average precision for each sample
        targets = [targets[i][:int(l)] for i, l in enumerate(targets_len)]
        for si, labels in enumerate(targets):
            seen += 1
            #path = paths[si]
            #img0 = cv2.imread(path)
            dets = detections[si]
            dets = dets.unsqueeze(0)
            dets = post_process(opt, dets, meta)
            dets = merge_outputs(opt, [dets])[1]

            #remain_inds = dets[:, 4] > opt.det_thres
            #dets = dets[remain_inds]
            if dets is None:
                # If there are labels but no detections mark as zero AP
                if labels.size(0) != 0:
                    mAPs.append(0), mR.append(0), mP.append(0)
                continue

            # If no labels add number of detections as incorrect
            correct = []
            if labels.size(0) == 0:
                # correct.extend([0 for _ in range(len(detections))])
                mAPs.append(0), mR.append(0), mP.append(0)
                continue
            else:
                target_cls = labels[:, 0]

                # Extract target boxes as (x1, y1, x2, y2)
                target_boxes = xywh2xyxy(labels[:, 2:6])
                target_boxes[:, 0] *= width
                target_boxes[:, 2] *= width
                target_boxes[:, 1] *= height
                target_boxes[:, 3] *= height

                '''
                path = paths[si]
                img0 = cv2.imread(path)
                img1 = cv2.imread(path)
                for t in range(len(target_boxes)):
                    x1 = target_boxes[t, 0]
                    y1 = target_boxes[t, 1]
                    x2 = target_boxes[t, 2]
                    y2 = target_boxes[t, 3]
                    cv2.rectangle(img0, (x1, y1), (x2, y2), (0, 255, 0), 4)
                cv2.imwrite('gt.jpg', img0)
                for t in range(len(dets)):
                    x1 = dets[t, 0]
                    y1 = dets[t, 1]
                    x2 = dets[t, 2]
                    y2 = dets[t, 3]
                    cv2.rectangle(img1, (x1, y1), (x2, y2), (0, 255, 0), 4)
                cv2.imwrite('pred.jpg', img1)
                abc = ace
                '''

                detected = []
                for *pred_bbox, conf in dets:
                    obj_pred = 0
                    pred_bbox = torch.FloatTensor(pred_bbox).view(1, -1)
                    # Compute iou with target boxes
                    iou = bbox_iou(pred_bbox, target_boxes, x1y1x2y2=True)[0]
                    # Extract index of largest overlap
                    best_i = np.argmax(iou)
                    # If overlap exceeds threshold and classification is correct mark as correct
                    if iou[best_i] > iou_thres and obj_pred == labels[best_i, 0] and best_i not in detected:
                        correct.append(1)
                        detected.append(best_i)
                    else:
                        correct.append(0)

            # Compute Average Precision (AP) per class
            AP, AP_class, R, P = ap_per_class(tp=correct,
                                              conf=dets[:, 4],
                                              pred_cls=np.zeros_like(dets[:, 4]),  # detections[:, 6]
                                              target_cls=target_cls)

            # Accumulate AP per class
            AP_accum_count += np.bincount(AP_class, minlength=nC)
            AP_accum += np.bincount(AP_class, minlength=nC, weights=AP)

            # Compute mean AP across all classes in this image, and append to image list
            mAPs.append(AP.mean())
            mR.append(R.mean())
            mP.append(P.mean())

            # Means of all images
            mean_mAP = np.sum(mAPs) / (AP_accum_count + 1E-16)
            mean_R = np.sum(mR) / (AP_accum_count + 1E-16)
            mean_P = np.sum(mP) / (AP_accum_count + 1E-16)

        if batch_i % print_interval == 0:
            # Print image mAP and running mean mAP
            print(('%11s%11s' + '%11.3g' * 4 + 's') %
                  (seen, dataloader.dataset.nF, mean_P, mean_R, mean_mAP, time.time() - t))
    # Print mAP per class
    print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP'))

    print('AP: %-.4f\n\n' % (AP_accum[0] / (AP_accum_count[0] + 1E-16)))

    # Return mAP
    return mean_mAP, mean_R, mean_P
Ejemplo n.º 4
0
    def __len__(self):
        return len(self.img_names)


if __name__ == '__main__':
    from paddle.vision.transforms import transforms as T
    from paddle.io import DataLoader

    img_size = 256
    pad = 30

    train_transform = T.Compose([
        T.RandomHorizontalFlip(),
        T.Resize((img_size + pad, img_size + pad)),
        T.RandomCrop(img_size),
        T.ToTensor(),
        T.Normalize(mean=0.5, std=0.5)
    ])

    dataloader = ImageFolder('dataset/photo2cartoon/trainB',
                             transform=train_transform)

    train_loader = DataLoader(dataloader, batch_size=1, shuffle=True)
    print('num: ', len(train_loader))
    for i in range(300):
        print(i)
        try:
            real_A, _ = next(trainA_iter)
        except:
            trainA_iter = iter(train_loader())
            real_A, _ = next(trainA_iter)
Ejemplo n.º 5
0
 def do_transform(self, trans):
     trans.transforms.insert(0, transforms.ToTensor(data_format='CHW'))
     trans.transforms.append(transforms.Transpose(order=(1, 2, 0)))
     dataset_folder = DatasetFolder(self.data_dir, transform=trans)
     for _ in dataset_folder:
         pass
Ejemplo n.º 6
0
Created on : 2021/5/18 21:15

@author: Jeremy
"""
'''
增加了paddle.Model高层API,大部分任务可以使用此API用于简化训练、评估、预测类代码开发。
注意区别Model和Net概念,Net是指继承paddle.nn.Layer的网络结构;
而Model是指持有一个Net对象,同时指定损失函数、优化算法、评估指标的可训练、评估、预测的实例。
'''

import paddle

from paddle.vision.transforms import transforms
from paddle.io import DataLoader
#
transform = transforms.Compose([transforms.ToTensor()])
#
# train_data = paddle.vision.datasets.MNIST(mode='train',transform = transform)
# test_data = paddle.vision.datasets.MNIST(mode='test',transform= transform)
# lenet = paddle.vision.models.LeNet()
#
# # Mnist继承paddle.nn.Layer属于Net,model包含了训练功能
#
# model = paddle.Model(lenet)
# # 设置训练模型所需的optimizer, loss, metric
# model.prepare(
#     paddle.optimizer.Adam(learning_rate=0.001,parameters=model.parameters()),
#     paddle.nn.CrossEntropyLoss(),
#     paddle.metric.Accuracy()
# )
#