示例#1
0
def tbnail_raw(inputs, outputs):
    r = 1 / tbnail_scale
    imgs = [
        cv.resize(rgbg2srgb(imread(x), maximum=65535), None, fx=r, fy=r)
        for x in inputs[:4]
    ]
    for img, o in zip(imgs, outputs[:4]):
        imwrite(o, img)
    imwrite(
        outputs[4],
        cv.resize(rgbg2linref(imread(inputs[4]), maximum=65535) * 255,
                  None,
                  fx=r,
                  fy=r).astype(np.uint8))
示例#2
0
def crop(input_path, output_path, other=1):
    img = imread(input_path)
    h, w, _ = img.shape
    bd = [int(h * bds[0]), int(h * bds[1]), int(w * bds[2]), int(w * bds[3])]
    if img is None:
        print(bd)
    cv.imwrite(output_path, img[bd[0]:bd[1], bd[2]:bd[3]])
示例#3
0
def get5rgbc(inputs, outputs, black_level=64):
    """inputs: gt, ab, f
        outputs: gt, ab, f, fo, m"""
    check_exif(inputs)
    rgbs = [raw_read_rgb(x) for x in inputs]
    h, w, c = rgbs[0].shape
    bd = [int(h * bds[0]), int(h * bds[1]), int(w * bds[2]), int(w * bds[3])]
    rgbsc = [x[bd[0]:bd[1], bd[2]:bd[3]] for x in rgbs]

    for out, rgb in zip(outputs[:3], rgbsc):
        imwrite(out, rgb)
    # return

    raws = [rawpy.imread(x) for x in inputs[1:3]]
    raws[1].raw_image_visible[:] = np.clip(
        raws[1].raw_image_visible.astype(np.int64) -
        raws[0].raw_image_visible.astype(np.int64) + black_level, 0,
        1023).astype(np.uint16)
    fo = cv.resize(raws[1].postprocess(use_camera_wb=True,
                                       no_auto_bright=True),
                   None,
                   fx=0.5,
                   fy=0.5)
    imwrite(outputs[3], fo[bd[0]:bd[1], bd[2]:bd[3]])

    rgbs = [rgbg2rgb(imread(x)) for x in inputs[:2]]
    m = (np.clip((rgbs[0].astype(np.float32) - rgbs[1].astype(np.float32)) /
                 (rgbs[0] + 1), 0, 1) * 255).astype(np.uint8)
    imwrite(outputs[4], m[bd[0]:bd[1], bd[2]:bd[3]])
示例#4
0
def tbnail_rgb(inputs, outputs):
    for i, o in zip(inputs, outputs):
        im = imread(i)
        if im is None:
            print(i)
            continue
        # print(im.shape,im.dtype)
        imwrite(o, cv.resize(im, None, fx=1 / 16, fy=1 / 16))
def tbnail_raw(inputs, outputs):
    r = 1 / 16
    imgs = [
        cv.resize(rgbg2srgb(imread(x), maximum=65535), None, fx=r, fy=r)
        for x in inputs[:5]
    ]
    for img, o in zip(imgs, outputs[:5]):
        imwrite(o, img)
示例#6
0
 def get_frame(self,
               frame_number,
               dtype=np.float32,
               sfactor=1.0,
               image_type='rgb'):
     return io.imread(self.frame_fullnames[frame_number],
                      dtype=dtype,
                      sfactor=sfactor,
                      image_type=image_type)
示例#7
0
文件: core.py 项目: benwrk/SportVideo
def _set_correspondences(img1, img2, field_img_path='./demo/data/field.png'):

    field_img = io_utils.imread(field_img_path)

    h2, w2 = field_img.shape[0:2]
    W, H = 104.73, 67.74

    fig, ax = plt.subplots(nrows=2, ncols=2)
    ax[0, 0].imshow(img1)
    ax[1, 0].imshow(img2)
    ax[0, 1].imshow(field_img)
    ax[1, 1].imshow(field_img)

    ax[0, 0].axis('off')
    ax[1, 0].axis('off')
    ax[0, 1].axis('off')
    ax[1, 1].axis('off')

    points2d_img1 = []
    points2d_img2 = []
    points3d = []

    def onclick(event):
        # print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
        # (event.button, event.x, event.y, event.xdata, event.ydata))
        x, y = event.xdata, event.ydata
        print("Click : ", x, y)
        if event.inaxes.axes.get_position(
        ).x0 < 0.5 and event.inaxes.axes.get_position().y0 > 0.5:
            # if event.inaxes in ax[0, 0]:
            ax[0, 0].plot(x, y, 'r.', ms=10)
            points2d_img1.append([x, y])
        elif event.inaxes.axes.get_position(
        ).x0 < 0.5 and event.inaxes.axes.get_position().y0 < 0.5:
            # elif event.inaxes in ax[1, 0]:
            ax[1, 0].plot(x, y, 'r.', ms=10)
            points2d_img2.append([x, y])
        else:
            ax[0, 1].plot(x, y, 'b+', ms=10)
            ax[1, 1].plot(x, y, 'b+', ms=10)
            points3d.append([x, 0, y])
        fig.canvas.draw()

    cid = fig.canvas.mpl_connect('button_press_event', onclick)
    plt.show()
    points2d_img1 = np.array(points2d_img1)
    points2d_img2 = np.array(points2d_img2)
    points3d = np.array(points3d)

    # Scale to H, W of soccer field
    points3d[:, 0] = ((points3d[:, 0] - w2 / 2.) / w2) * W
    points3d[:, 2] = ((points3d[:, 2] - h2 / 2.) / h2) * H

    print(points3d, points2d_img1, points2d_img2)

    return points2d_img1, points2d_img2, points3d
示例#8
0
文件: core.py 项目: benwrk/SportVideo
 def get_frame(self,
               vid_name,
               frame_number,
               dtype=np.float32,
               image_type='rgb',
               sfactor=1.0,
               flip=False):
     print("[#] Gather Frame : ", vid_name)
     return io.imread(self.frame_fullnames[vid_name][frame_number],
                      dtype=dtype,
                      sfactor=sfactor,
                      image_type=image_type,
                      flip=flip)
示例#9
0
文件: core.py 项目: benwrk/SportVideo
 def get_mask_from_detectron(self,
                             vid_name,
                             frame_number,
                             dtype=np.float32,
                             image_type='rgb',
                             sfactor=1.0,
                             flip=False):
     print("[#] Gather Detectron : ", vid_name)
     return io.imread(self.mask_fullnames[vid_name][frame_number],
                      dtype=dtype,
                      sfactor=sfactor,
                      image_type=image_type,
                      flip=flip)[..., 0]
示例#10
0
def get5rawc(inputs, outputs):
    """inputs: gt, ab, f
        outputs: gt, ab, f, fo, m"""
    raws = [imread(x) for x in inputs]
    h, w, _ = raws[0].shape
    bd = [int(h * bds[0]), int(h * bds[1]), int(w * bds[2]), int(w * bds[3])]
    rawsc = [x[bd[0]:bd[1], bd[2]:bd[3]] for x in raws]

    for out, raw in zip(outputs[:3], rawsc):
        imwrite(out, raw)
    fo = np.clip(rawsc[2].astype(np.int32) - rawsc[1].astype(np.int32), 0,
                 65535).astype(np.uint16)
    imwrite(outputs[3], fo)
    m = (np.clip((rawsc[0].astype(np.float32) - rawsc[1].astype(np.float32)) /
                 (rawsc[0] + 1), 0, 1) * 65535).astype(np.uint16)
    imwrite(outputs[4], m)
示例#11
0
def _set_correspondences(
    img,
    field_img_path='/home/krematas/Mountpoints/grail/data/Singleview/Soccer/field.png'
):

    field_img = io.imread(field_img_path)

    h2, w2 = field_img.shape[0:2]
    W, H = 104.73, 67.74

    fig, ax = plt.subplots(1, 2)
    ax[0].imshow(img)
    ax[1].imshow(field_img)

    ax[0].axis('off')
    ax[1].axis('off')

    points2d = []
    points3d = []

    def onclick(event):
        # print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
        #       (event.button, event.x, event.y, event.xdata, event.ydata))
        x, y = event.xdata, event.ydata
        if event.inaxes.axes.get_position().x0 < 0.5:
            ax[0].plot(x, y, 'r.', ms=10)
            points2d.append([x, y])
        else:
            ax[1].plot(x, y, 'b+', ms=10)
            points3d.append([x, 0, y])
        plt.show()

    cid = fig.canvas.mpl_connect('button_press_event', onclick)
    plt.show()
    points2d = np.array(points2d)
    points3d = np.array(points3d)

    points3d[:, 0] = ((points3d[:, 0] - w2 / 2.) / w2) * W
    points3d[:, 2] = ((points3d[:, 2] - h2 / 2.) / h2) * H

    return points2d, points3d
def get5rawc(inputs, outputs, bds):
    """inputs: gt, ab, f
        outputs: gt, ab, f, fo, m"""
    raws = [imread(x) for x in inputs]
    w_start, h_start, w_end, h_end = [int(x) for x in bds[:4]]
    # print(h_start, w_start, h_end, w_end)
    h_offset = (h_end - h_start) // 32 * 32
    w_offset = (w_end - w_start) // 32 * 32
    rawsc = [
        x[h_start:h_start + h_offset, w_start:w_start + w_offset] for x in raws
    ]

    for out, raw in zip(outputs[:3], rawsc):
        imwrite(out, raw)
    fo = np.maximum(rawsc[2].astype(np.int32) - rawsc[1].astype(np.int32),
                    0).astype(np.uint16)
    imwrite(outputs[3], fo)

    tran = np.maximum(rawsc[1].astype(np.int32) - rawsc[0].astype(np.int32),
                      0).astype(np.uint16)
    imwrite(outputs[4], tran)
示例#13
0
 def get_mask_from_detectron(self, frame_number):
     return io.imread(
         join(self.path_to_dataset, 'detectron',
              self.frame_basenames[frame_number] + '.png'))[:, :, 0]
field = gloo.Program(vertex_tex, fragment_tex)
field.bind(vertices_field)
field['position'] = vertices_field
field['u_texture'] = data.get(join(opt.path_to_data, 'texture.png'))
field['transform'] = trackball

all_programs = []

for fid, fname in enumerate(filenames):

    (basename, ext) = file_utils.extract_basename(fname)
    print('Loading model {0}/{1}: {2}'.format(fid, len(filenames), basename))

    path_to_pc = join(opt.path_to_data, 'scene3d')
    img = io.imread(join(path_to_pc, '{0}.jpg'.format(basename)),
                    dtype=np.float32)

    vertices, indices = objload(join(path_to_pc, '{0}.obj'.format(basename)))
    vertices['texcoord'][:, 1] = 1.0 - vertices['texcoord'][:, 1]

    tex_program = gloo.Program(vertex_tex, fragment_tex)
    tex_program.bind(vertices)
    tex_program['u_texture'] = img
    tex_program['transform'] = trackball

    all_programs.append(tex_program)

trackball.theta, trackball.phi, trackball.zoom = -10, 0, 15

window = app.Window(width=512, height=512, color=(0.30, 0.30, 0.35, 1.00))
    description='Track camera given an initial estimate')
parser.add_argument('--dataset', default='kth-0', help='Dataset folder')
parser.add_argument('--frame',
                    type=int,
                    default=0,
                    help='Specific frame to estimate camera parameters')
opt, _ = parser.parse_known_args()

path_to_data = file_utils.get_platform_datadir(
    join('Singleview/Soccer/', opt.dataset))

path_to_labels = join(path_to_data, 'cnn', 'youtube', 'labels')
path_to_masks = join(path_to_data, 'cnn', 'youtube', 'masks')

label_files = listdir(path_to_masks)
label_files = [i for i in label_files if '_dcrf_2.png' in i]
label_files.sort()

n_files = len(label_files)

for i in range(n_files):
    mask = io.imread(join(path_to_masks, label_files[i])) / 255.0
    h, w = mask.shape[:2]

    lbl = np.zeros((2, h, w), dtype=np.float32)
    lbl[0, :, :] = mask[:, :, 0]
    lbl[1, :, :] = mask[:, :, 0]

    savename = label_files[i].replace('_dcrf_2.png', '_r.npy')
    np.save(join(path_to_labels, savename), lbl)
cam = cam_utils.Camera('camera0', db[0].calib['00118']['A'],
                       db[0].calib['00118']['R'], db[0].calib['00118']['T'],
                       db[0].shape[0], db[0].shape[1])

# initialize figure
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')

# camera0
x = cam.get_position().item(0, 0)
y = cam.get_position().item(1, 0)
z = cam.get_position().item(2, 0)
ax.scatter(x, y, z, label='camera0', marker='o')

# demo field for x-y plane
field = io.imread('./demo/data/demo_field.png')

plot_field()

# set axis names and position
ax.set_xlabel('X', fontsize=25, rotation=0)
ax.set_ylabel('Y', fontsize=25, rotation=0)
ax.set_zlabel('Z', fontsize=25, rotation=0)
ax.xaxis._axinfo['juggled'] = (0, 0, 0)
ax.yaxis._axinfo['juggled'] = (1, 1, 1)
ax.zaxis._axinfo['juggled'] = (2, 2, 2)

# scale
ax.set_xlim3d(-100, 100)
ax.set_ylim3d(0, 200)
ax.set_zlim3d(-100, 100)
db.refine_poses(keypoint_thresh=7, score_thresh=0.4, neck_thresh=0.4)

file_utils.mkdir(os.path.join(db.path_to_dataset, 'players', 'masks'))
file_utils.mkdir(os.path.join(db.path_to_dataset, 'players', 'labels'))

# Merge masks for players
for sel_frame in tqdm(range(db.n_frames)):

    basename = db.frame_basenames[sel_frame]
    poses = db.poses[basename]

    for i in range(len(poses)):
        fname = os.path.join(
            db.path_to_dataset, 'players', 'cnn_masks',
            '{0}_{1:05d}.png'.format(db.frame_basenames[sel_frame], i))
        cnn_mask = io.imread(fname)
        fname = os.path.join(
            db.path_to_dataset, 'players', 'pose_masks',
            '{0}_{1:05d}.png'.format(db.frame_basenames[sel_frame], i))
        pose_mask = io.imread(fname)

        if len(cnn_mask.shape) == 1:
            cnn_mask = np.ones_like(pose_mask)
        mask = cnn_mask[:, :, 0] * pose_mask[:, :, 0] * 255
        cv2.imwrite(
            os.path.join(
                db.path_to_dataset, 'players', 'masks',
                '{0}_{1:05d}.png'.format(db.frame_basenames[sel_frame], i)),
            mask)

        label = {'mask': mask, 'depth': mask, 'billboard': mask}