def visualize_batch(x0, x, step_i):
    vis0 = np.hstack(common.to_rgb(x0).numpy())
    vis1 = np.hstack(common.to_rgb(x).numpy())
    vis = np.vstack([vis0, vis1])
    common.imwrite('train_log/batches_%04d.jpg' % step_i, vis)
    print('batch (before/after):')
    common.imshow(vis)
Esempio n. 2
0
def main(
        draw_boxes=True,
        draw_ourboxes=True,
        equalize_color_dist=False,
        show=False,
        dataset='val',  # 'val' or 'test'
        skip=0,
):
    predictions = {}
    ground_truth = {}
    image_sets = get_image_sets(sets=100, filter=dataset)

    for i in range(skip):
        next(image_sets)

    for images, paths, boxes in tqdm(image_sets):
        if equalize_color_dist:
            images = equalize_color_distribution(images)
            print("Images color distribution equalized")

        # check that equalization worked
        # print(images[5,6])
        # print(images[5,6].min())
        # print(images[5,6].mean())
        # print(images[5,6].max())
        # print(np.percentile(images[0,0], 25, axis=(0,1))) # should be three 0s
        # print(np.percentile(images[0,0], 75, axis=(0,1))) # should be three 1s

        p = Paths.output / paths[3, 4]
        p.parent.mkdir(parents=True, exist_ok=True)
        heatmaps = get_temporal_diff_heatmaps(images, boxes, blur_color_diff=lambda x: blor(x))
        detection_map, ourboxes = get_detection_map(heatmaps, images, boxes=boxes, thresh_quantile=0.98)
        im = images[3, 4].copy()
        im[np.where(detection_map > 0)] = [0, 0, 255]
        if draw_boxes:
            draw_bounding_boxes(im, boxes)
        if draw_ourboxes:
            draw_bounding_boxes(im, ourboxes, box_color=(255, 255, 0))
        cv2.imwrite(str(p), im)
        print(f'Image exported: {p}')
        if show:
            imshow(im)

        predictions[p.parent.name] = ourboxes.tolist()
        ground_truth[p.parent.name] = boxes.tolist()

    p = Paths.output / f'{dataset}.json'
    with open(p, 'w') as outfile:
        json.dump(predictions, outfile)
    print(f'JSON created: {p}')

    if len(ground_truth):
        ap = compute_AP(predictions, ground_truth)
        print(f"Average precision={ap:.5f} on {dataset} set.")
Esempio n. 3
0
def vgg_eval_model(dataset_root_dir, restore_model: str):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("DEVICE WILL BE USED: ", device)

    net = VggNet(torchvision.models.vgg16_bn(True))
    net = net.to(device)

    classes = ('not hotdog', 'hotdog')

    if restore_model is not None and len(restore_model) > 0:
        # original saved file with DataParallel
        state_dict = torch.load(restore_model, map_location={'cuda:0': 'cpu'})
        net.load_state_dict(state_dict)
        print("Model {} restored".format(restore_model))
    else:
        print("ERROR: no restore model file found!")
        return

    #from torch.autograd import Variable
    #dummy_input = Variable(torch.randn(1, 3, 224, 224), requires_grad=True)
    # input_names = ["actual_input_1"] + ["learned_%d" % i for i in range(12)]
    # output_names = ["output1"]
    #torch.onnx.export(net, dummy_input, "vgg_hot_dog.onnx", export_params=True, verbose=True)
    #print("SUCCESS")

    hot_dog_dataset_test = HotDogsDatasetEval(
        root_dir=dataset_root_dir,
        transform=transforms.Compose([
            Rescale((224, 224)),  #normalize,
            ToTensor(),
        ]))
    test_dataloader = DataLoader(hot_dog_dataset_test,
                                 batch_size=4,
                                 shuffle=True,
                                 num_workers=4)

    for dl, type in zip([test_dataloader], ['test']):
        correct = 0
        total = 0
        with torch.no_grad():
            for data in dl:
                images, names = data['image'].float(), data['name']
                images = images.to(device)

                outputs = net(images)
                total += len(names)
                for id, prediction in enumerate(outputs.data):
                    res = torch.nn.functional.softmax(prediction, dim=0)
                    _, rid = torch.max(res, 0)
                    print('{} is {}'.format(names[id], classes[rid]))
                    imshow(torchvision.utils.make_grid(images[id]))
Esempio n. 4
0
def get_detection_map(
        heatmaps: np.ndarray,
        images: np.ndarray,
        kernel_size=19,
        thresh_quantile=0.95,
        thresh_factor=2,
        min_detections=7,
        boxes=[],
        debug=False,
) -> Tuple[np.ndarray, np.ndarray]:
    kernel = np.ones((kernel_size, kernel_size)) / kernel_size ** 2

    detection_maps = []
    for heatmap in heatmaps.reshape(-1, *heatmaps.shape[2:]):
        conv_heatmap = convolve2d(heatmap, kernel, mode='same')
        thresh = thresh_factor * np.nanquantile(conv_heatmap, thresh_quantile)
        detection = conv_heatmap > thresh
        detection_maps.append(detection)

    detection_maps = np.reshape(detection_maps, heatmaps.shape)

    # detection_map = detection_maps.sum(axis=(0, 1)) >= min_detections
    detection_map = detection_maps.sum(axis=(0, 1))

    if debug:
        detection_map_bb = detection_map.copy()
        draw_bounding_boxes(np.moveaxis([detection_map_bb] * 3, 0, 2), boxes, box_color=(0, 170, 255))
        imshow(detection_map_bb * 255 / 20)

    cluster_positions, cluster_intensities = find_clusters(detection_map)

    for i in reversed(range(len(cluster_intensities))):
        if cluster_intensities[i] < min_detections:
            cluster_positions.pop(i)
            cluster_intensities.pop(i)

    label = create_label_clusters(detection_map, cluster_positions, cluster_intensities, min_detections)

    ourboxes = find_boxes(label)

    return label, ourboxes
Esempio n. 5
0
def run(args):

    face1_file = args.face1
    face2_file = args.face2

    #face1_file, face2_file = face2_file, face1_file

    print("loading face 1: {}".format(face1_file))
    face1 = cv2.imread(face1_file)
    if face1 is None:
        print("can't read image ", face1_file)
        return

    print("loading face 2: {}".format(face2_file))
    face2 = cv2.imread(face2_file)
    if face2 is None:
        print("can't read image ", face2_file)
        return

    print("loading models...")
    models = load_models(args)

    result1, result2 = swap_faces(models, face1, face2)

    if args.output1 is not None:
        print("writing output result for image1...")
        cv2.imwrite(args.output1, result1)

    if args.output2 is not None:
        print("writing output result for image2...")
        cv2.imwrite(args.output2, result2)

    if args.show:
        imshow("result 1", result1)
        imshow("result 2", result2)
        cv2.waitKey()
Esempio n. 6
0
def swap_faces(models,
               image1,
               image2,
               params=build_params(),
               debug=False):

    if params['precomputed1'] is not None:
        pc = params['precomputed1']
        face1 = pc['face']
        landmarks1 = pc['landmarks']
        lp1 = pc['landmark_points']
    else:
        face1, landmarks1 = get_landmarks(models, image1)
        lp1 = get_landmarks_points(landmarks1)

    if params['precomputed2'] is not None:
        pc = params['precomputed2']
        face2 = pc['face']
        landmarks2 = pc['landmarks']
        lp2 = pc['landmark_points']
    else:
        face2, landmarks2 = get_landmarks(models, image2)
        lp2 = get_landmarks_points(landmarks2)

    if params['triplets'] is None:
        _, triplets = delunay(lp1)
    else:
        triplets = params['triplets']

    if params['smooth1']:
        if 'hist1' not in params:
            params['hist1'] = []
        params['hist1'].append(lp1)
        if len(params['hist1']) > 2:
            params['hist1'].pop(0)
        h1 = np.array(params['hist1'])
        h1 = np.round(np.median(h1, axis=0)).astype(np.int32)
        lp1 = list(h1)

    #warped1 = image1.copy()
    #warped2 = image2.copy()
    warped1 = np.zeros_like(image1)
    warped2 = np.zeros_like(image2)

    two_ways = params['two_ways']
    for triplet in triplets:

        warp_triangle(
            image1,
            image2,
            warped1,
            warped2,
            triplet,
            lp1,
            lp2,
            two_ways=two_ways
        )

    out1 = blend(image1, warped1, lp1, clone_mode=params['clone'])
    if two_ways:
        out2 = blend(image2, warped2, lp2, clone_mode=params['clone'])
    else:
        out2 = None

    if debug:
        imshow("landmarks 1", draw_landmarks(image1, landmarks1, face1))
        imshow("landmarks 2", draw_landmarks(image2, landmarks2, face2))

        imshow("triangles 1", draw_triangles(image1, triplets, lp1))
        imshow("triangles 2", draw_triangles(image2, triplets, lp2))

        imshow("result 1", warped1)
        imshow("result 2", warped2)

        imshow("out 1", out1)
        imshow("out 2", out2)

        cv2.waitKey()

    return out1, out2
Esempio n. 7
0
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 22 09:37:17 2018

@author: Lovro
"""

import cv2
import numpy as np

import common

# read image
img = cv2.imread('figures\\box.jpg')
common.imshow(img, 800, 600)

# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# find corners
corners = cv2.goodFeaturesToTrack(gray, 100, 0.01, 10)
corners = np.int0(corners)

#mark corners on image
for i in corners:
    x, y = i.ravel()
    cv2.circle(img, (x, y), 3, 255, -1)

common.imshow(img)
Esempio n. 8
0
import cv2
import common

img1 = common.imread('dog.jpg',0)
img2 = cv2.blur(img1,(5,5))

common.imshow('blur',img1,img2)

Esempio n. 9
0
import cv2
import common

img1 = common.imread('dog.jpg')
img2 = img1.copy()
v = img2.item(10, 20)
print(v)
for y in range(10, 20):
    for x in range(20, 40):
        img2.itemset((y, x), (x + y) * 4)
v = img2.item(10, 20)
print(v)
v = img2.item(19, 39)
print(v)

common.imshow('pixel', img1, img2)
Esempio n. 10
0
import cv2
import common

img1 = common.imread('dog.jpg')
img2 = img1.copy()
v = img2.item(10,20)
print(v)
for y in range(10,20):
    for x in range(20,40):
        img2.itemset((y,x),(x+y)*4)
v = img2.item(10,20)
print(v)
v = img2.item(19,39)
print(v)

common.imshow('pixel',img1,img2)




Esempio n. 11
0
import cv2
import numpy
import common

img1 = numpy.zeros((60,360,3),numpy.uint8)
img2 = img1
h,w,_ = img1.shape

for y in range(0,h):
    for x in range(0,w):
        img1.itemset((y,x,0),x/2)
        img1.itemset((y,x,1),255)
        img1.itemset((y,x,2),255)
img2 = cv2.cvtColor(img1,cv2.COLOR_HSV2BGR)

common.imshow('colors',img1,img2)
Esempio n. 12
0
import cv2
import common
import numpy

img1 = common.imread('shapes.jpg',0)
ret,img1cp = cv2.threshold(img1,127,255,cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(img1cp,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
img2 = numpy.zeros(img1.shape,numpy.uint8)
cv2.drawContours(img2, contours, -1, 255, 1)
print "number of contours = %d" % (len(contours))
for i in range(0,len(contours)):
    c = contours[i]
    f = cv2.FONT_HERSHEY_SIMPLEX
    a = cv2.contourArea(c)
    l = cv2.arcLength(c,True)
    m = "%d" % (i)
    print "%d: len=%d, arcLen=%d, area=%d" % (i,len(c),l,a)
    p = tuple(c[0][0])
    cv2.putText(img2,m,p,f,0.4,(255),1,cv2.CV_AA)

common.imshow('contours',img1,img2)

# See below for more details:
# http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.html#contour-features
Esempio n. 13
0
import cv2
import common

img1 = common.imread('dog.jpg',0)
ret,img2 = cv2.threshold(img1,127,255,cv2.THRESH_BINARY)

common.imshow('thresh',img1,img2)
    vis1 = np.hstack(common.to_rgb(x).numpy())
    vis = np.vstack([vis0, vis1])
    common.imwrite('train_log/batches_%04d.jpg' % step_i, vis)
    print('batch (before/after):')
    common.imshow(vis)


def plot_loss(loss_log):
    pl.figure(figsize=(10, 4))
    pl.title('Loss history (log10)')
    pl.plot(np.log10(loss_log), '.', alpha=0.1)
    pl.show()


target_img = common.load_emoji(url)
common.imshow(common.zoom(common.to_rgb(target_img), 2), fmt='png')

p = common.TARGET_PADDING
pad_target = tf.pad(target_img, [(p, p), (p, p), (0, 0)])
h, w = pad_target.shape[:2]
seed = np.zeros([h, w, common.CHANNEL_N], np.float32)
seed[h // 2, w // 2, 3:] = 1.0


def loss_f(x):
    return tf.reduce_mean(tf.square(common.to_rgba(x) - pad_target),
                          [-2, -3, -1])


ca = common.CAModel()
Esempio n. 15
0
import cv2
import common

img1 = common.imread('dog.jpg',0)
img2 = img1[100:180, 90:150]  
cv2.rectangle(img2,(0,0),(59,79),255,3)

common.imshow('roi',img1,img2)
Esempio n. 16
0
import cv2
import common

img1 = common.imread('dog.jpg',0)

# do something.

common.imshow('noname',img1,img2)
Esempio n. 17
0
import cv2
import common

img1 = common.imread('dog.jpg',1)
img2 = img1.copy()
img2[:,:,2] = 0 # Make all red pixels to zero.

# b,g,r = cv2.split(img1)
# img2 = cv2.merge((b,g,r)) # Same image
# img2 = b # Grayscale image using red pixels

common.imshow('split',img1,img2)

# See below for details:
# http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.html#splitting-and-merging-image-channels
Esempio n. 18
0
Created on Wed Aug 22 08:28:36 2018

@author: Lovro
"""

#pro packages
import cv2

#my packages
import common

#read image
img = cv2.imread('figures\\box.jpg')

#display image
common.imshow(img, 800, 600)

#rgb2gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

#display
common.imshow(gray, 800, 600)

#corner detection
corner = cv2.cornerHarris(gray, 2, 3, 0.04)
#make corner more visible
corner = cv2.dilate(corner, None)
#add corner to original image
img_corner = img
img_corner[corner > 0.01 * corner.max()] = [0, 255, 0]
Esempio n. 19
0
import cv2
import common

img1 = common.imread('dog.jpg', 1)
img2 = img1.copy()
img2[:, :, 2] = 0  # Make all red pixels to zero.

# b,g,r = cv2.split(img1)
# img2 = cv2.merge((b,g,r)) # Same image
# img2 = b # Grayscale image using red pixels

common.imshow('split', img1, img2)

# See below for details:
# http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.html#splitting-and-merging-image-channels
Esempio n. 20
0
import cv2
import common

img1 = common.imread('dog.jpg', 0)
img2 = cv2.blur(img1, (5, 5))

common.imshow('blur', img1, img2)
Esempio n. 21
0
import cv2
import common

img1 = common.imread('dog.jpg', 0)
ret, img2 = cv2.threshold(img1, 127, 255, cv2.THRESH_BINARY)

common.imshow('thresh', img1, img2)