Example #1
0
def getWhiteBoxLocations(img, preview=False):
    if type(img) == type(''):
        img = cv2.imread(img)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    thresh = cv2.threshold(gray, 30, 255, cv2.THRESH_OTSU)[1]
    contours = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    contours = contours[0] if len(contours) == 2 else contours[1]
    # area_thresh = 0
    min_area = 0.95*180*29
    max_area = 1.05*180*35
    result = img.copy()
    boxLocations = []
    boxes = []
    for c in contours:
        area = cv2.contourArea(c)
        if area >= min_area <= max_area:
            x, y, w, h = cv2.boundingRect(c)
            # print(x, y, x + w, y + h)
            x, y, w, h = x - 5, y - 5, w + 7, h + 7
            # print(x, y, x + w, y + h)
            cv2.rectangle(result, (x, y), (x + w, y + h), RED, 2)
            boxLocations.append((x, y, x + w, y + h)) # top, left, bottom, right
            boxes.append(img[y : y + h, x : x + w])
            # cv2.drawContours(result, [c], -1, RED, 3)
    if preview:
        showImages(['main', 'gray', 'thresh', 'result'], [img, gray, thresh, result])
    return boxes, boxLocations
Example #2
0
 def showAllImages(self):
     currFrame = self.vs.getFrames("BGR")[0]
     # bgModel = self.hd.getBackgroundModel()
     # skinBgModel = self.hd.getSkinBackgroundModel()
     images, titles = self.hd.getState()
     showImages(images, titles)
     showImages((currFrame, ), ('Camera', ))
Example #3
0
 def d(cls, images, titles=None, preFunc=()):
     '''
         out images if config 'debug' = true, else do nothing
     '''
     if (not debug):
         return
     images = list(images) if type(images) == type(
         ()) or type(images) == type([]) else [
             images,
         ]
     if (not titles == None):
         titles = list(titles) if type(titles) == type(
             ()) or type(titles) == type([]) else (titles, )
     for i, _ in enumerate(images):
         for f in preFunc:
             images[i] = f(images[i])
     showImages(
         images, titles
     )  #if type(images) == type(()) or type(images) == type([]) else imshow(titles, images)
Example #4
0
    def testMerge():
        input_data, label_data, (nx, ny) = testSrcnnDataLoader(shuffle=False)
        stride = 16

        # 將預測圖片子集合,彙整成一張圖片
        # mergeImages 內含 1 -> 255, uint8 等處理
        # torch.Size([N, 3, 32, 32])
        merged_result = mergeImages(input_data, stride, (nx[0], ny[0]))
        print("merged result.shape:", merged_result.shape)

        # numpy squeeze:將陣列 shape 中為1的維度,例如>> (1,1,10) → (10,)
        squeeze_result = merged_result.squeeze()
        squeeze_result = squeeze_result.transpose(1, 2, 0)

        showImages(transpose=squeeze_result)
        print("squeezed result.shape:", squeeze_result.shape)

        origin = mergeImages(label_data, stride, (nx[0], ny[0]))
        # origin = origin.squeeze()
        origin = origin.transpose(1, 2, 0)
        # origin = cv2.cvtColor(origin, cv2.COLOR_RGB2BGR)

        multichannel = squeeze_result.ndim == 3
        print("multichannel:", multichannel)
        if not multichannel:
            origin = cv2.cvtColor(origin, cv2.COLOR_BGR2GRAY)

        print("origin.shape:", origin.shape)

        sim = metrics.structural_similarity(origin,
                                            squeeze_result,
                                            data_range=origin.max() - origin.min(),
                                            multichannel=multichannel)
        print("metrics error:", 1 - sim)

        # 計算誤差
        loss = 1 - PyTorchLoss.ssim4(label_data, input_data, is_normalized=True)
        print("Loss:", loss.item())

        showImages(origin=origin, squeeze_result=squeeze_result)
Example #5
0
def main():
    config = parse_args()
    using_mask = config.model in ["PConvSR", "PConvResNet"
                                  ] or "Partial" in config.model

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    div2k = datasets.Div2K(config.lr_size, config.up_factor,
                           config.num_patches, config.cache_size)

    d = config.num_patches
    training_set, validation_set, test_set = torch.utils.data.random_split(
        div2k, [640 * d, 80 * d, 80 * d])
    hr_size = config.lr_size * config.up_factor

    if config.show_example:
        lr, hr = div2k[6]
        print(lr.shape)
        print(hr.shape)
        utils.showImages([lr, hr])

    # model
    if config.model == "PixelCNN":
        model = models.PixelCNN(3, 3).to(device)
    elif config.model == "PartialSR" or config.model == "ConvSR":
        if "BN" in config.ckpt_name:
            model = getattr(models, config.model)().to(device)
        else:
            model = getattr(models, config.model)(batch_norm=False).to(device)
    else:
        try:
            model = getattr(models, config.model)().to(device)
        except:
            print(f"Model {config.model} not found. Quitting...")
            sys.exit(1)

    if config.show_summary:
        input_size = (3, hr_size, hr_size)
        if using_mask:
            input_size = [input_size, input_size]
        torchsummary.summary(model, input_size=input_size)

    # for pretrained srresnet
    # weights = torch.load(
    #     f"./models/outputs/{config.ckpt_name}/" + config.ckpt_name + ".pt"
    # )
    # model = weights["model"]

    # try:
    assert config.ckpt_name
    checkpoint = torch.load(f"./models/outputs/{config.ckpt_name}/" +
                            config.ckpt_name + ".pt")
    model.load_state_dict(checkpoint["model_state_dict"])
    # start_epoch = checkpoint["epoch"]
    # training_loss = checkpoint["training_loss"]
    # validation_loss = checkpoint["validation_loss"]
    print("Loading from previous checkpoint")
    # except:
    #     print("Must load a model from a checkpoint or error loading model")
    #     sys.exit(1)

    # for i, batch in enumerate(dataloader):

    #     lr, hr = batch
    #     lr, hr = lr.to(device), hr.to(device)

    #     if using_mask:
    #         with torch.no_grad():
    #             upscaled, mask_in = image_mask(lr, config.up_factor)
    #         pred, mask_out = model(upscaled.to(device), mask_in.to(device))
    #     else:
    #         with torch.no_grad():
    #             upscaled = transforms.functional.resize(lr, (hr_size, hr_size))
    #         pred = model(upscaled)

    #     if config.loss == "VGG19":
    #         loss, _, _ = loss_func(pred, hr)  # VGG style loss
    #     else:
    #         loss = loss_func(pred, hr)

    #     break

    if config.img_path:
        try:
            img = Image.open(config.img_path)
        except:
            print(f"Image {config.img_path} not found")
            sys.exit(1)
        with torch.no_grad():
            img = transforms.ToTensor()(img)
            c, height, width = img.shape
            s = config.lr_size
            rows = []
            for h in range(height // s):
                row = []
                for w in range(width // s):
                    patch = transforms.functional.crop(img, h * s, w * s, s,
                                                       s).to(device)
                    batch = utils.example_batch(patch, config.batch_size)
                    if using_mask:
                        upscaled, mask_in = image_mask(batch, config.up_factor)
                        pred = model(upscaled.to(device), mask_in.to(device))
                    elif config.pre_upscale:
                        upscaled = torchvision.transforms.functional.resize(
                            torch.tensor(batch), (hr_size, hr_size))
                        pred = model(upscaled)
                    else:
                        pred = model(batch)
                    out = pred.cpu()[0]  # 3, 64, 64
                    row.append(out)
                full_row = torch.cat(row, -1)  # 3, 64, 1984
                rows.append(full_row)
            img_out = torch.cat(rows, 1).permute(1, 2, 0)
            img_out = np.uint8(np.array(img_out * 255))
            img_out = Image.fromarray(img_out)
            img_out.save(f"./models/outputs/{config.ckpt_name}/out.png")

    if config.patches:
        for i in range(0, 48):
            with torch.no_grad():
                lr, hr = test_set[i]
                batch = utils.example_batch(
                    torch.tensor(lr).to(device), config.batch_size)
                if using_mask:
                    upscaled, mask_in = image_mask(batch, config.up_factor)
                    pred = model(upscaled.to(device), mask_in.to(device))
                elif config.pre_upscale:
                    upscaled = torchvision.transforms.functional.resize(
                        torch.tensor(batch), (hr_size, hr_size))
                    pred = model(upscaled)
                else:
                    upscaled = torchvision.transforms.functional.resize(
                        torch.tensor(batch), (hr_size, hr_size))
                    pred = model(batch)
                bicubic = torchvision.transforms.functional.resize(
                    torch.tensor(lr), (hr_size, hr_size))
                utils.saveImages(
                    [
                        np.asarray(lr),
                        np.array(upscaled[0].cpu()),
                        np.array(bicubic),
                        np.array(pred[0].cpu()),
                        hr,
                    ],
                    f"./models/outputs/{config.ckpt_name}/output_{i}.png",
                )
        print(f"Outputs saved to ./models/outputs/{config.ckpt_name}/")

    if config.unsupervised:
        with torch.no_grad():
            model(
                torch.ones(config.batch_size, 3, config.lr_size,
                           config.lr_size).to(device))
        num_imgs = 16
        imgs = model.sample(n_samples=num_imgs)
        for i in range(num_imgs):
            img = imgs[i]
            img = (img * 255).permute(1, 2, 0).cpu()
            img = np.uint8(np.array(img))
            img_out = Image.fromarray(img)
            img_out.save(f"./models/outputs/{config.ckpt_name}/sample{i}.png")
Example #6
0
debug = False
test_pipeline = False
test_pipeline_path = test_images_path
#test_pipeline_path = video_frame_path

image_paths = glob.glob(calibration_path + '*.jpg')
camera = Camera()
camera.calibrate(image_paths)

if test_pipeline:
    images = utils.loadImages(test_pipeline_path, cv2.COLOR_BGR2RGB)
    hud = camera.pipeline(images[0], debug=debug, dump_partials=False)
    utils.showImage(hud)
else:
    camera.processVideo('project_video.mp4', debug=debug, live=False)
'''
test_images = list(map(lambda image_path:cv2.imread(image_path),test_images_paths))
test_images = list(map(lambda image:cv2.cvtColor(image,cv2.COLOR_BGR2RGB),test_images))
test_images_grid = list(map(lambda image:utils.drawGrid(image),test_images))

test_images_undist =  list(map(lambda img: camera.unsidtort(img),test_images_grid))

interlaved = []
for i in range(len(test_images)):
    interlaved.append(test_images_grid[i])
    interlaved.append(test_images_undist[i])

utils.showImages(interlaved)
'''
Example #7
0
            print(i, loss)

# In[5]:

# basic testing loop
print("Testing")
for i, batch in enumerate(test_dataloader):
    with torch.no_grad():
        lr, hr = batch
        lr, hr = lr.to(device), hr.to(device)
        with torch.no_grad():
            upscaled = torchvision.transforms.functional.resize(lr, (64, 64))

        pred = model(upscaled)
        loss = loss_func(pred, hr)
        print(i, loss)

# In[16]:

for i in range(8):
    with torch.no_grad():
        lr, hr = test_set[i]
        upscaled = torchvision.transforms.functional.resize(
            torch.Tensor(lr), (64, 64)).to(device)
        pred = np.asarray(
            model(utils.example_batch(upscaled, batch_size))[0].cpu())
        upscaled = np.asarray(upscaled.cpu())
        utils.showImages([lr, upscaled, pred, hr])

# In[ ]:
Example #8
0
    # # files = prepare_data("Train")
    # files = prepare_data("Test")
    # files = prepareData()

    # img = imRead(files[0], False)
    # showImage(img)
    # img_mod3 = modCrop(img, scale=3)
    # print("img_mod3.shape:", img_mod3.shape)
    # img_mod5 = modCrop(img, scale=5)
    # print("img_mod5.shape:", img_mod5.shape)
    # showImages(img=img, img_mod3=img_mod3, img_mod5=img_mod5)

    # input_data, label_data = differentResolution(files[0], scale=3)

    # input_, label_ = preprocess(files[0])
    # temp_input = np.uint8(input_data.copy() * 255.)
    # temp_label = np.uint8(label_data.copy() * 255.)
    # showImages(img=img, temp_input=temp_input, temp_label=temp_label)

    # subData(data, scale, image_size, stride, is_gray=False)
    # sub_input_sequence, sub_label_sequence, (nx, ny) = subData(files, scale, image_size, stride, is_gray=False)
    # input_data, label_data, _ = setupInput(idx=-1, image_size=image_size, scale=scale, stride=stride)
    input_data, label_data, (nx, ny) = readData(idx=1,
                                                image_size=image_size,
                                                scale=scale,
                                                stride=stride)
    result = mergeImages(input_data, stride, (nx, ny))
    origin = mergeImages(label_data, stride, (nx, ny))

    showImages(origin=origin, result=result)
Example #9
0
def main():
    config = parse_args()
    using_mask = (config.model in [
        "PConvSR",
        "PConvResNet",
        "PartialConv",
        "PartialAttention",
        "PartialNoAttention",
        "PartialSR",
    ] or "Partial" in config.model)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    div2k = datasets.Div2K(
        size=config.lr_size,
        factor=config.up_factor,
        mult=config.num_patches,
        cache_size=config.cache_size,
    )
    d = config.num_patches
    training_set, validation_set, test_set = torch.utils.data.random_split(
        div2k, [640 * d, 80 * d, 80 * d])

    hr_size = config.lr_size * config.up_factor

    if config.show_example:
        lr, hr = div2k[6]
        print(lr.shape)
        print(hr.shape)
        utils.showImages([lr, hr])

    training_dataloader = torch.utils.data.DataLoader(
        training_set, batch_size=config.batch_size)
    validation_dataloader = torch.utils.data.DataLoader(
        validation_set, batch_size=config.batch_size)
    test_dataloader = torch.utils.data.DataLoader(test_set,
                                                  batch_size=config.batch_size)

    # model
    if config.model == "PixelCNN":
        model = models.PixelCNN(3, 3).to(device)
    elif config.model == "PartialSR" or config.model == "ConvSR":
        if "BN" in config.ckpt_name:
            model = getattr(models, config.model)().to(device)
        else:
            model = getattr(models, config.model)(batch_norm=False).to(device)
    else:
        try:
            model = getattr(models, config.model)().to(device)
        except:
            print(f"Model {config.model} not found. Quitting...")
            sys.exit(1)

    # optimizer
    lr = config.learning_rate
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.95)

    # loss
    if config.loss == "L1":
        loss_func = torch.nn.L1Loss()
    elif config.loss == "L2" or config.loss == "MSE":
        loss_func = torch.nn.MSELoss()
    elif config.loss == "VGG16Partial":
        loss_func = losses.VGG16PartialLoss().to(device)
        if "VGG16Partial" in config.metrics:
            vgg = loss_func
    elif config.loss == "VGG19":
        loss_func = losses.VGG19Loss().to(device)
    elif config.loss == "DISTS":
        loss_func = losses.DISTS().to(device)
    else:
        if "VGG16Partial" in config.metrics:
            vgg = losses.VGG16PartialLoss().to(device)
        try:
            loss_func = getattr(losses, config.loss)
        except:
            print(f"loss {config.loss} not found.")
            sys.exit(1)

    if config.show_summary:
        input_size = (3, hr_size, hr_size)
        if using_mask:
            input_size = [input_size, input_size]
        torchsummary.summary(model, input_size=input_size)

    writer = SummaryWriter(f"./models/outputs/{config.ckpt_name}")

    try:  # hacky workaround, doesnt load checkpoint if doesnt exist
        assert config.ckpt_name
        checkpoint = torch.load(f"./models/outputs/{config.ckpt_name}/" +
                                config.ckpt_name + ".pt")
        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        start_epoch = checkpoint["epoch"]
        training_loss = checkpoint["training_loss"]
        validation_loss = checkpoint["validation_loss"]
        print("Loading from previous checkpoint")
    except:
        start_epoch = 0
        training_loss = AverageMeter("Training")
        validation_loss = AverageMeter("Validation")
        print("Not loading from checkpoint")

    def loop(dataloader, epoch, loss_meter, back=True):
        for i, batch in enumerate(dataloader):
            step = epoch * len(dataloader) + i

            if back:
                optimizer.zero_grad()

            lr, hr = batch
            lr, hr = lr.to(device), hr.to(device)

            if using_mask:
                with torch.no_grad():
                    if config.over_upscale:
                        factor = 4
                    else:
                        factor = 1
                    upscaled, mask_in = image_mask(lr,
                                                   config.up_factor * factor)
                pred = model(upscaled.to(device), mask_in.to(device))
            elif config.unsupervised:
                pred = model(lr)
            elif config.pre_upscale:
                with torch.no_grad():
                    upscaled = transforms.functional.resize(
                        lr, (hr_size, hr_size))
                pred = model(upscaled)
            else:
                pred = model(lr)

            if config.loss == "VGG16Partial":
                loss, _, _ = loss_func(pred, hr)  # VGG style loss
            elif config.loss == "DISTS":
                loss = loss_func(pred,
                                 hr,
                                 require_grad=True,
                                 batch_average=True)
            else:
                loss = loss_func(pred, hr)

            if back:
                loss.backward()
                optimizer.step()

            loss_meter.update(loss.item(), writer, step, name=config.loss)

            if config.metrics:
                with torch.no_grad():
                    for metric in config.metrics:
                        tag = loss_meter.name + "/" + metric
                        if metric == "PSNR":
                            writer.add_scalar(tag, losses.psnr(pred, hr), step)
                        elif metric == "SSIM":
                            writer.add_scalar(tag, losses.ssim(pred, hr), step)
                        elif metric == "consistency":
                            downscaled_pred = transforms.functional.resize(
                                pred, (config.lr_size, config.lr_size))
                            writer.add_scalar(
                                tag,
                                torch.nn.functional.mse_loss(
                                    downscaled_pred, lr).item(),
                                step,
                            )
                        elif metric == "lr":
                            writer.add_scalar(tag,
                                              lr_scheduler.get_last_lr()[0],
                                              step)
                        elif metric == "sample":
                            model.eval()
                            if step % config.sample_step == 0:
                                writer.add_image("sample/hr",
                                                 hr[0],
                                                 global_step=step)
                                writer.add_image("sample/lr",
                                                 lr[0],
                                                 global_step=step)
                                writer.add_image("sample/bicubic",
                                                 upscaled[0],
                                                 global_step=step)
                                writer.add_image("sample/pred",
                                                 pred[0],
                                                 global_step=step)
                            model.train()
                        elif metric == "VGG16Partial":
                            val, _, _ = vgg(pred, hr)
                            writer.add_scalar(tag, val.item(), step)

    print(f"Training starting at epoch {start_epoch}")
    for epoch in range(start_epoch, start_epoch + config.epochs):
        model.train()
        loop(training_dataloader, epoch, training_loss)
        lr_scheduler.step()
        print(f"Epoch {epoch}: {training_loss}")
        model.eval()
        with torch.no_grad():
            loop(validation_dataloader, epoch, validation_loss, back=False)
            print(f"Epoch {epoch}: {validation_loss}")

        if config.ckpt_every != -1 and epoch % config.ckpt_every == 0:
            torch.save(
                {
                    "epoch": epoch + 1,
                    "model_state_dict": model.state_dict(),
                    "optimizer_state_dict": optimizer.state_dict(),
                    "loss": validation_loss.val,
                },
                f"./models/outputs/{config.ckpt_name}/" + config.ckpt_name +
                f"_{epoch}.pt",
            )
            torch.save(
                {
                    "epoch": epoch + 1,
                    "model_state_dict": model.state_dict(),
                    "optimizer_state_dict": optimizer.state_dict(),
                    "loss": validation_loss.val,
                },
                f"./models/outputs/{config.ckpt_name}/" + config.ckpt_name +
                ".pt",
            )

    testing_loss = AverageMeter("Testing")
    loop(test_dataloader, 0, testing_loss)
    print(f"Test: {testing_loss}")

    if config.ckpt_name:
        torch.save(
            {
                "epoch": epoch + 1,
                "model_state_dict": model.state_dict(),
                "optimizer_state_dict": optimizer.state_dict(),
                "training_loss": training_loss,
                "validation_loss": validation_loss,
            },
            f"./models/outputs/{config.ckpt_name}/" + config.ckpt_name + ".pt",
        )
Example #10
0
        return final_img

    #omar trial
    def __combine2__(self, MD, SCD, sB, Hand):
        skinDifference = np.maximum(
            SCD.astype(float) + Hand -
            cv2.dilate(sB, np.ones((7, 7), dtype='float'), iterations=3), 0)
        ILog.d(skinDifference, 'skindiff')
        totalDifference = np.minimum((skinDifference + 0.9 * MD) * 255,
                                     255).astype('uint8')
        ILog.d(totalDifference, 'before otsu')
        _, final_img = cv2.threshold(totalDifference, 0, 255,
                                     cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        final_img = cv2.erode(final_img, np.ones((2, 2)), iterations=1)
        final_img = cv2.medianBlur(final_img, 5)
        return final_img


if __name__ == "__main__":
    from VideoSequence import VideoSequence as Vs
    from utils import showImages
    vs = Vs().start()
    while (True):
        frames = vs.process()
        handDetector = HandDetector(frames("BGR")[-2])
        images, titles = handDetector.detect(frames('gray'), frames('BGR')[0])
        showImages(images, titles)
        key = cv2.waitKey(10)
        if key == 27 or 0xff:
            break
Example #11
0
    def threasholdLaneLines(self, image, kernel_size=(3, 2), debug=False):
        gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        hls = cv2.split(cv2.cvtColor(image, cv2.COLOR_RGB2HLS))

        hue = hls[0]
        lig = hls[1]
        sat = hls[2]

        gray = cv2.equalizeHist(gray, gray)
        #hue = cv2.equalizeHist(hue, hue) # hue should not be equalized (or it would detect a different color)
        sat = cv2.equalizeHist(sat, sat)
        lig = cv2.equalizeHist(lig, lig)

        yellow = np.zeros_like(hue)
        bright = np.zeros_like(gray)
        saturation = np.zeros_like(sat)
        light = np.zeros_like(lig)

        yellow[(hue > 20) & (hue < 30)] = 1
        bright[(gray > 250)] = 1
        saturation[sat > 240] = 1
        light[lig > 127] = 1

        color = (yellow & light) | (bright & light) | (saturation & light)

        if debug:
            utils.showImages((yellow, bright, saturation, light, color),
                             cmap='gray')

        thresh = (100, 200)
        hue_sx = utils.gradientAbolutes(hue, orient='x', thresh=thresh)
        gray_sx = utils.gradientAbolutes(gray, orient='x', thresh=thresh)
        sat_sx = utils.gradientAbolutes(sat, orient='x', thresh=thresh)

        hue_sy = utils.gradientAbolutes(hue, orient='y', thresh=thresh)
        gray_sy = utils.gradientAbolutes(gray, orient='y', thresh=thresh)
        sat_sy = utils.gradientAbolutes(sat, orient='y', thresh=thresh)

        if debug:
            utils.showImages(
                (hue_sx, hue_sy, gray_sx, gray_sy, sat_sx, sat_sy),
                cmap='gray')

        gradient = (hue_sx | gray_sx | sat_sx | hue_sy | gray_sy | sat_sy)

        # density based noise reduction
        # opening: erosion and dilation, see : http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html
        kernel = np.ones(kernel_size, np.uint8)
        #kernel = [[0,1,1,1,0],[0,1,1,1,0],[0,1,1,1,0]]
        #kernel = np.array(kernel, np.uint8)
        #gradient_clean = cv2.morphologyEx(gradient, cv2.MORPH_OPEN, kernel, iterations=1)
        # turned out that noise is good for interpolation, i'll try it later down the line

        if debug:
            utils.showImages((color, gradient))
        mask = color  #| gradient

        if debug:
            utils.showImages((mask, color, gradient))

        return mask