コード例 #1
0
ファイル: tests.py プロジェクト: zyc00/keypoint-copycat
def test_flowfield():
    u = UniImageViewer()
    x = bad_monkey()

    theta = torch.tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]).expand(1, -1, -1)

    grid = F.affine_grid(theta, x.shape)

    out = F.grid_sample(x, grid)

    u.render(out[0], block=True)
コード例 #2
0
def evaluate(args, env, policy, render=False):
    with torch.no_grad():

        def get_action(s, prepro, transform, view, policy, action_map, device):
            s = prepro(s)
            s_t = transform(s).unsqueeze(0).type(policy.dtype).to(device)
            kp = view(s_t)
            p = softmax(keypoints.models.knn.flatten().matmul(policy), dim=0)
            a = Categorical(p).sample()
            a = action_map(a)
            return a, kp

        v = UniImageViewer()

        datapack = ds.datasets[args.dataset]

        if args.model_type != 'nop':

            transporter_net = transporter.make(args, map_device='cpu')
            view = Keypoints(transporter_net).to(args.device)

        else:
            view = nop

        s = env.reset()
        a, kp = get_action(s, datapack.prepro, datapack.transforms, view,
                           policy, datapack.action_map, args.device)

        done = False
        reward = 0.0

        while not done:
            s, r, done, i = env.step(a)
            reward += r

            a, kp = get_action(s, datapack.prepro, datapack.transforms, view,
                               policy, datapack.action_map, args.device)
            if render:
                if args.model_keypoints:
                    s = datapack.prepro(s)
                    s = TVF.to_tensor(s).unsqueeze(0)
                    s = plot_keypoints_on_image(kp[0], s[0])
                    v.render(s)
                else:
                    env.render()

    return reward
コード例 #3
0
ファイル: tests.py プロジェクト: zyc00/keypoint-copycat
def test_tps_random():
    images = []
    u = UniImageViewer(screen_resolution=(2400, 1200))
    x = bad_monkey()

    for i in range(5, 10):

        set = []

        for _ in range(8):
            pass
            # set.append(tps_random(x, num_control_points=20, var=1 / i))

        st = torch.cat(set, dim=2)
        images.append(st)

    img = torch.cat(images, dim=3)

    u.render(img, block=True)
コード例 #4
0
ファイル: tests.py プロジェクト: zyc00/keypoint-copycat
def test_tps():
    u = UniImageViewer()
    x = bad_monkey()

    theta = torch.tensor([[[0.0, 0.0], [0., 0.], [0., 0.], [0., 0.], [0., 0.],
                           [0., 0.], [0.0, 0.0]]])

    c = torch.tensor([
        [0., 0],
        [1., 0],
        [1., 1],
        [0, 1],
    ]).unsqueeze(0)

    grid = tps_grid(theta, c, x.shape)

    out = F.grid_sample(x, grid)

    u.render(out[0], block=True)
コード例 #5
0
def test_pong_fill():
    l = 6000
    display = True

    ds = AtariDataset('Pong-v0', l, d.pong_prepro,
                      transforms = T.Compose([T.ToTensor(), T.ToPILImage()]),
                      end_trajectory=if_done_or_nonzero_reward)
    assert len(ds) >= l
    print(len(ds))

    disp = UniImageViewer(screen_resolution=(512, 512))

    for img1, img2 in ds:
        #i = np.concatenate((img[0], img[1]), axis=1)
        if display:
            disp.render(img1)
            time.sleep(0.03)
        else:
            plt.imshow(img1, cmap='gray', vmin=0, vmax=256.0)
            plt.show()
コード例 #6
0
ファイル: tests.py プロジェクト: zyc00/keypoint-copycat
def test_co_ords():
    height, width = 16, 16
    hm = torch.zeros(1, 1, height, width)
    hm[0, 0, 0, 15] = 20.0
    k, p = MF.spacial_softmax(hm, probs=True)
    g = MF.gaussian_like_function(k, height, width)
    #plot_heightmap3d(hm[0, 0].detach().numpy())
    #plot_heightmap3d(g[0, 0].detach().numpy(), k[0, 0])
    #plot_single_channel(hm[0, 0])
    #plot_single_channel(g[0, 0])

    d = UniImageViewer()
コード例 #7
0
def test_pong():
    v = UniImageViewer()
    l = UniImageViewer(title='processed', screen_resolution=(32, 32))
    env = gym.make('Pong-v0')

    s = env.reset()
    done = False

    while not done:
        s, r, done, info = env.step(cma_es.sample())
        v.render(s)
        s = d.pong_color_prepro(s)
        #s = cv2.cvtColor(s, cv2.COLOR_RGB2GRAY)
        #s = s[34:168, :]
        #s = cv2.resize(s, dsize=(32, 32), interpolation=cv2.INTER_AREA)
        l.render(s)
コード例 #8
0
ファイル: autoencode.py プロジェクト: zyc00/keypoint-copycat
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from colorama import Fore, Style
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn as nn
import statistics as stats
from keypoints.models import vgg, knn, autoencoder
from utils import get_lr, UniImageViewer, make_grid
from keypoints.ds import datasets as ds
from apex import amp
from config import config

scale = 4
view_in = UniImageViewer('in',
                         screen_resolution=(128 * 2 * scale, 128 * scale))
view_z = UniImageViewer('z',
                        screen_resolution=(128 // 2 * 5 * scale,
                                           128 // 2 * 4 * scale))


def log(phase):
    writer.add_scalar(f'{phase}_loss', loss.item(), global_step)

    if i % args.display_freq == 0:
        recon = torch.cat((x[0], x_[0]), dim=2)
        latent = make_grid(z[0].unsqueeze(1), 4, 4)
        if args.display:
            view_in.render(recon)
            view_z.render(latent)
        writer.add_image(f'{phase}_recon', recon, global_step)
コード例 #9
0
ファイル: atari_demo.py プロジェクト: wx-b/keypoints
import cma_es
from utils import UniImageViewer, plot_keypoints_on_image
from keypoints.ds import datasets as ds
from keypoints.models import transporter, functional as KF
import torch
import config
import time
from torchvision.transforms import functional as TVF

if __name__ == '__main__':

    args = config.config()

    with torch.no_grad():
        v = UniImageViewer()

        datapack = ds.datasets[args.dataset]
        transporter_net = transporter.make(args).to(args.device)

        if args.load is not None:
            transporter_net.load(args.load)

        env = gym.make(datapack.env)

        while True:
            s = env.reset()
            done = False

            while not done:
                s, r, done, i = env.step(cma_es.sample())
コード例 #10
0
import pyrr
import gym
import pygame

import cma_es
import main
from keypoints.models import transporter
import config
from utils import UniImageViewer
import torch
from pyrr import matrix44, Vector3
from math import floor
from time import sleep
from PIL import Image

viewer = UniImageViewer()

pygame.init()

vertex_src = """
# version 330

layout(location = 0) in vec3 a_position;
layout(location = 1) in vec2 a_texture;

uniform mat4 model; // combined translation and rotation
uniform mat4 projection;

out vec3 v_color;
out vec2 v_texture;
コード例 #11
0
ファイル: tests.py プロジェクト: zyc00/keypoint-copycat
def test_dual_tps_random_batched():
    u = UniImageViewer(screen_resolution=(2400, 1200))
    x = bad_monkey(2)