示例#1
0
    def get_background_img(self):
        if self.isDone:
            output_bg = self.bg_img.copy()

            output_bg[:, :, 0:1] /= self.count_map
            output_bg[:, :, 1:2] /= self.count_map
            output_bg[:, :, 2:] /= self.count_map
            output_bg = np.array(output_bg, dtype=np.uint8)
            return output_bg
        else:
            global_logger.info(f"Background image is not done yet: {self.update_num}/{self.max_update}")
            return None
示例#2
0
                              num_workers=cfg.num_thread)
R_optimizer = get_optimizer(refiner, mode='train', model_type='R')

for epoch in range(cfg.start_epoch, cfg.num_epoch):
    for i, (imgs, label_maps, bg_imgs) in enumerate(Dance_dataloader):
        real_imgs, label_maps, bg_imgs = imgs.cuda(), label_maps.cuda(
        ), bg_imgs.cuda()
        input = torch.cat([label_maps, bg_imgs], dim=1)

        _, fake_imgs = model(input)
        refined_imgs = refiner(fake_imgs)

        loss_L1 = L1_loss(real_imgs, fake_imgs)
        loss_MSSSIM = MSSSIM_loss(real_imgs, fake_imgs)
        R_loss = loss_MSSSIM + loss_L1

        R_optimizer.zero_grad()
        R_loss.backward()
        R_optimizer.step()

        global_logger.info(
            f"{epoch}/{cfg.num_epoch} epoch, iter: {i}/{int(Dance_dataset.__len__()/cfg.batch_size)}, R_loss: {R_loss.detach().item()}"
        )

    R_state = {
        'epoch': epoch,
        'network': refiner.state_dict(),
        'optimizer': R_optimizer.state_dict()
    }
    save_model(R_state, epoch, model_type='R')
示例#3
0
from main.config import cfg
from main.model import get_model
from data.dance_dataset import DanceDataset
from torch.utils.data import DataLoader
from utils.log_utils import global_logger

model = get_model(mode='test')
model.eval()

Dance_dataset = DanceDataset(mode='test')
Dance_dataloader = DataLoader(dataset=Dance_dataset,
                              batch_size=cfg.test_batch_size,
                              shuffle=False,
                              num_workers=cfg.num_thread)

for i, (label_maps, bg_imgs) in enumerate(Dance_dataloader):

    label_maps, bg_imgs = label_maps.cuda(), bg_imgs.cuda()
    input = torch.cat([label_maps, bg_imgs], dim=1)

    _, output = model(input)
    output = output[0].permute(1, 2, 0)
    output = output.detach().cpu().numpy()
    output = (output * cfg.pixel_std) + cfg.pixel_mean
    clip_idx = output > 1.
    output[clip_idx] == 1
    output = np.array(output * 255, dtype=np.uint8)
    cv2.imwrite(os.path.join(cfg.vis_dir, f'{i}.png'), output)

    global_logger.info(f"{i+1}/{Dance_dataset.len} img done")
示例#4
0
            global_logger.warning("Video aspect ratio is not 16(w) : 9(h)")
    else:
        assert 0, global_logger.warning("VideoCapture is not opened")

    frame_num = 0

    annotations = []
    invalids = []
    while True:
        valid, frame = vcap.read()
        if not valid:
            global_logger.warning("VideoCapture doesn't return valid frame.")
            break

        frame_num += 1
        global_logger.info(f"frame_num : {frame_num}")

        frame = cv2.resize(frame, (cfg.w, cfg.h), interpolation = cv2.INTER_CUBIC)
        frame = np.array(frame)

        with torch.no_grad():
            paf, hm, _ = get_hm_paf(frame, detector)

        humans = paf_to_pose_cpp(hm, paf, pose_cfg)
        humans = humans2array(frame, humans)

        if np.shape(humans)[0] == 0:
            invalids.append(frame_num)
            continue

        # check ankle visibility
示例#5
0
import os
import matplotlib.pyplot as plt
import numpy as np
import imageio
import glob
from main.config import cfg
from utils.log_utils import global_logger

img_path = os.path.join(cfg.output_dir, 'vis')
writer = imageio.get_writer(os.path.join(cfg.result_dir, 'result.mp4'),
                            fps=cfg.test_fps)

img_file_list = np.array(glob.glob(os.path.join(img_path, '*.png')))
sort_idx = np.array([
    int(file_name[file_name.find('vis/') + 4:file_name.find('.png')])
    for file_name in img_file_list
])
sort_idx = np.argsort(sort_idx)
img_file_list = img_file_list[sort_idx]

for i, img_path in enumerate(img_file_list):
    im = plt.imread(img_path)
    im = np.asarray(im * 255, dtype=np.uint8)
    writer.append_data(im)
    global_logger.info(f"{i+1}/{len(img_file_list)} img2video done")

writer.close()
示例#6
0
def save_model(state, epoch, model_type):
    save_path = os.path.join(cfg.output_dir, 'model_dump',
                             model_type + f'_{epoch}.pth.tar')
    torch.save(state, save_path)
    global_logger.info(f"{model_type} model is saved in {save_path}")