Esempio n. 1
0
def evaluate(model, data_loader, device, draw_path=None, use_conf=False):

    ## set model
    model.eval()
    model = model.to(device)

    ## loss
    criterion = nn.CrossEntropyLoss(reduction='none')
    loss_avg = AverageMeter()
    acc_avg = AverageMeter()
    drawer = Drawer()

    with torch.no_grad():
        for batch_idx, batch in tqdm(
                enumerate(data_loader),
                total=len(data_loader),
                ncols=80,
                desc=f'testing',
        ):

            # if batch_idx == 20: break

            data = batch[0].to(device)
            images = batch[-2]
            image_ids = batch[-1]

            ## run forward pass
            batch_size = data.shape[0]
            out = model.detection(data)  ## [B,N,H,W]
            N = out.shape[1]
            # print(out.shape)

            if draw_path is not None:
                filename = os.path.join(draw_path, f"test_{image_ids[0]}")
                cam = out[0]
                ## normalize the cam
                max_val = torch.max(cam)
                min_val = torch.min(cam)
                cam = (cam - min_val) / (max_val - min_val)
                ## convert to heatmap image
                img_numpy = images[0].permute(1, 2, 0).numpy()
                boxes = np.array(ss_box_finder(img_numpy)).squeeze()
                scores, classes = box_cam_intersection(boxes, cam.numpy(),
                                                       (32, 30.6))

                img_numpy = Drawer.draw_boxes_on_image(
                    boxes[scores > BOX_SCORE_THRESHOLD], img_numpy,
                    classes[scores > BOX_SCORE_THRESHOLD], filename)
Esempio n. 2
0
    def __call__(self, image_path, draw_path):
        with torch.no_grad():
            batch = self.data_loader(image_path)
            data = batch[0].to(self.device)
            image = batch[-1]

            ## run forward pass
            out = self.model.detection(data.unsqueeze(0))  ## [B,N,H,W]
            N = out.shape[1]
            # print(out.shape)

            if draw_path is not None:
                image_name = image_path.split('/')[-1]
                image_name = image_name.split('.')[0]
                filename = os.path.join(draw_path, f"detection_{image_name}")
                cam = out[0]

                ## normalize the cam
                max_val = torch.max(cam)
                min_val = torch.min(cam)
                cam = (cam - min_val) / (max_val - min_val)

                ## find intersected boxes
                img_numpy = image.permute(1, 2, 0).numpy()
                boxes = np.array(ss_box_finder(img_numpy)).squeeze()
                scores, classes = box_cam_intersection(boxes, cam.numpy(),
                                                       (32, 30.6))

                ## draw image
                img_numpy = Drawer.draw_boxes_on_image(
                    boxes[scores > BOX_SCORE_THRESHOLD], img_numpy,
                    classes[scores > BOX_SCORE_THRESHOLD], filename)
Esempio n. 3
0
    def train_setup(self) -> None:
        assert self._cfg is not None, "Create solver with config to train!"
        src_subfolder = "src"

        if self.output_folder is not None:
            # create output folder
            try:
                mkdir(self.output_folder)
                mkdir(join(self.output_folder, src_subfolder))
            except Exception:
                print("Can't create output folder!", file=stderr)
                ex_type, ex_inst, ex_tb = exc_info()
                raise ex_type.with_traceback(ex_inst, ex_tb)

            # copy all python modules found in directory of trained model
            module_folder = dirname(getfile(self.net.__class__))
            files = [
                file for file in listdir(module_folder) if file.endswith(".py")
            ]

            for file in files:
                copyfile(join(module_folder, file),
                         join(self.output_folder, src_subfolder, file))

            copyfile(join(dirname(module_folder), "abstract_cnn_solver.py"),
                     join(self.output_folder, "abstract_solver.py"))

            # save config
            with open(self.output_folder + '/config.ini', 'w') as f:
                self._cfg.write(f)

            self.drawer = Drawer(self.output_folder,
                                 scale_factor=self.upscale_factor)
            self.logger = Logger(self.output_folder)

        self.net.train()
        # TODO: create methods for moving nets and loss to device
        self.net.to(self.device)
Esempio n. 4
0
    def train_setup(self) -> None:
        assert self._cfg is not None, "Create solver with config to train!"
        src_subfolder = "src"

        if self.output_folder is not None:
            try:
                mkdir(self.output_folder)
                mkdir(join(self.output_folder, src_subfolder))
            except Exception:
                print("Can't create output folder!", file=stderr)
                ex_type, ex_inst, ex_tb = exc_info()
                raise ex_type.with_traceback(ex_inst, ex_tb)

            # copy all python modules found in directory of trained model
            module_folder = dirname(getfile(self.generator.__class__))
            files = [file for file in listdir(module_folder) if file.endswith(".py")]

            for file in files:
                copyfile(join(module_folder, file), join(self.output_folder, src_subfolder, file))

            copyfile(join(dirname(module_folder), "abstract_gan_solver.py"),
                     join(self.output_folder, src_subfolder, "abstract_solver.py"))

            with open(self.output_folder + '/config.ini', 'w') as f:
                self._cfg.write(f)

            self.drawer = Drawer(self.output_folder, scale_factor=self.upscale_factor)
            self.logger = Logger(self.output_folder)
        else:
            print("Warning: Training results will not be saved.", file=stderr)

        self.generator.train()
        self.discriminator.train()

        self.generator.to(self.device)
        self.discriminator.to(self.device)
Esempio n. 5
0
    if guessed_onion == onion:
        print(f"What a prophet!")
        return True
    else:
        print(f"...?")
        return False

    return True


def main():
    if not level1():
        return

    if not level2():
        return

    if not level3():
        return

    print(flag)


drawer = Drawer()
if __name__ == "__main__":
    try:
        main()
    except Exception as e:
        print("Something went wrong...")
import cv2
import numpy as np
import glob
import os
from utils import Drawer

root = f'/home/yanglei/codes/WSOL/videos/'
vid_name = 'VID_20201207_150008.mp4'

vid_file = os.path.join(root, vid_name)
save_path = os.path.join(root, vid_name).split('.')[0]

print(vid_file)
print(save_path)

if not os.path.exists(save_path):
    os.makedirs(save_path)

Drawer.get_frames_from_a_video(vid_file, save_path)
def main():
    t0 = time.time()
    # Settings
    cfg = Config(config_file='../configs/train_action_recogn_pipeline.yaml')
    cfg.merge_from_file('../configs/infer_trtpose_deepsort_dnn.yaml')
    cfg_stage = cfg[os.path.basename(__file__)]
    img_format = cfg.img_format

    ## IO folders
    get_path = lambda x: os.path.join(*x) if isinstance(x,
                                                        (list, tuple)) else x
    src_imgs_folder = get_path(cfg_stage.input.imgs_folder)
    src_valid_imgs = get_path(cfg_stage.input.valid_imgs)
    dst_skeletons_folder = get_path(cfg_stage.output.skeletons_folder)
    dst_imgs_folder = get_path(cfg_stage.output.imgs_folder)
    dst_imgs_info_txt = get_path(cfg_stage.output.imgs_info_txt)

    # initiate pose estimator
    pose_estimator = get_pose_estimator(**cfg.POSE)
    drawer = Drawer(draw_numbers=True)

    # Init output path
    print(
        f"[INFO] Creating output folder -> {os.path.dirname(dst_skeletons_folder)}"
    )
    os.makedirs(dst_imgs_folder, exist_ok=True)
    os.makedirs(dst_skeletons_folder, exist_ok=True)
    os.makedirs(os.path.dirname(dst_imgs_info_txt), exist_ok=True)

    # train val images reader
    images_loader = ReadValidImagesAndActionTypesByTxt(src_imgs_folder,
                                                       src_valid_imgs,
                                                       img_format)
    images_loader.save_images_info(dst_imgs_info_txt)
    print(f'[INFO] Total Images -> {len(images_loader)}')

    # Read images and process
    loop = tqdm(range(len(images_loader)), total=len(images_loader))
    for i in loop:
        img_bgr, label, img_info = images_loader.read_image()
        img_disp = img_bgr.copy()
        img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)

        # predict trtpose skeleton and save to file as openpose format
        predictions = pose_estimator.predict(img_rgb, get_bbox=False)

        if len(predictions) == 0: continue
        predictions = utils.convert_to_openpose_skeletons(predictions)

        # save predicted image
        save_name = img_format.format(i)
        img_name = os.path.join(dst_imgs_folder, save_name)

        img_disp = drawer.render_frame(img_disp, predictions)
        cv2.imwrite(img_name, img_disp)
        try:
            utils.show(img_disp, wait=1)
        except StopIteration:
            break

        # save skeletons in text file
        skeleton_txt = os.path.join(dst_skeletons_folder,
                                    save_name[:-4] + '.txt')
        save_data = [img_info + pred.flatten_keypoints for pred in predictions]
        with open(skeleton_txt, 'w') as f:
            json.dump(save_data, f)

        # update progress bar descriptions
        loop.set_description(f'action -> {label}')
        loop.set_postfix(num_of_person=len(predictions))

    loop.close()
    cv2.destroyAllWindows()
    t1 = time.gmtime(time.time() - t0)
    total_time = time.strftime("%H:%M:%S", t1)

    print('Total Extraction Time', total_time)
    print(
        tabulate([list(images_loader.labels_info.values())],
                 list(images_loader.labels_info.keys()), 'grid'))
Esempio n. 8
0
def evaluate(model, data_loader, device, draw_path=None, use_conf=False):

    ## set model
    model.eval()
    model = model.to(device)

    fc_weights = model.head.weight.data
    # print(fc_weights) ## [N, C]
    # print(fc_weights.shape)
    # input()

    ## loss
    criterion = nn.CrossEntropyLoss(reduction='none')
    loss_avg = AverageMeter()
    acc_avg = AverageMeter()
    drawer = Drawer()

    with torch.no_grad():
        for batch_idx, batch in tqdm(
            enumerate(data_loader), 
            total=len(data_loader),
            ncols = 80,
            desc= f'testing',
            ):

            data = batch[0].to(device)
            gt_lbls = batch[1].to(device)
            gt_gt_lbls = batch[2].to(device)
            images = batch[-2]
            image_ids = batch[-1]

            ## run forward pass
            batch_size = data.shape[0]
            out, feat = model.forward_eval(data) ## out: [B, N]; feat: [B, C, H, W] 
            preds = torch.max(out, dim=-1)[1]


            ## compute loss
            class_loss = criterion(out, gt_lbls) ## [B, 1]
            if use_conf:
                weights = model.compute_entropy_weight(out)
                loss = (class_loss * (weights**2) + (1-weights)**2).mean()
            else:
                loss = class_loss.mean()

            ## record
            loss_avg.update(loss.item(), batch_size)
            positive = ((gt_lbls == preds) + (gt_gt_lbls>2)).sum()
            batch_acc = positive.to(torch.float)/batch_size
            acc_avg.update(batch_acc.item(), batch_size)

            if draw_path is not None:
                ## get cam
                preds = torch.max(out, dim=-1)[1]
                B,C,H,W = feat.shape
                cam = fc_weights[preds,:].unsqueeze(-1) * feat.reshape(B, C, -1) ## [B, C] * [B, C, H, W]
                cam = torch.sum(cam, dim=1).reshape(B, H, W)
                ## normalize the cam    
                max_val = torch.max(cam)
                min_val = torch.min(cam)
                cam = (cam - min_val) / (max_val - min_val)
                ## convert to heatmap image
                cam_numpy = cam.permute(1,2,0).numpy()
                img_numpy = images[0].permute(1,2,0).numpy()
                filename = os.path.join(draw_path, f"test_{image_ids[0]}_{preds.item():d}_{weights.item():4.2f}")
                drawer.draw_heatmap(cam_numpy, img_numpy, filename)
     
            # print(image_ids[0])
            # print("out: ", out)
            # print("weights: ", weights)
            # input()

    return {"loss": loss_avg.avg, "acc": acc_avg.avg}
Esempio n. 9
0
from utils import Random, Drawer, crand

generator = Random(0, 20, generator=crand)
drawer = Drawer(
    generator,
    sliders=[
        {
            "label": "N",
            "valmin": 10,
            "valmax": 50_000,
            "valinit": 500,
            "valstep": 100,
        },
    ],
    title="Uniform distribution",
)
drawer.draw_distribution(n=500)
Esempio n. 10
0
import cv2
import numpy as np
import glob
import os
from utils import Drawer

# test_path = 'test_result'
test_path = 'detection'
result_name = '2020-12-09_17-53-36'
filepath = f'{test_path}/{result_name}'
videoname = f'{test_path}/{result_name}/video_{result_name}.avi'
Drawer.write_video_from_images(filepath, videoname, deleteImg=True)
print('done')
Esempio n. 11
0
class AbstractGanSolver:
    def __init__(self, config: Optional[ConfigParser], mode="train"):
        self._cfg = config

        self.device = 'cuda'
        self.generator = None
        self.learning_rate_gen = None
        self.optimizer_gen = None
        self.scheduler_gen = None

        self.discriminator = None
        self.learning_rate_disc = None
        self.optimizer_disc = None
        self.scheduler_disc = None

        self.output_folder = None
        self.drawer = None
        self.logger = None
        self.test_iter = None

        if mode == "train":
            nn_cfg: SectionProxy = config["GAN"]

            self.device = device(nn_cfg['Device'])
            self.upscale_factor = nn_cfg.getint('UpscaleFactor')

            self.iteration = 0
            self.batch_size = nn_cfg.getint('BatchSize')
            self.generator_name = nn_cfg['Generator']

            self.learning_rate_disc = nn_cfg.getfloat('DiscriminatorLearningRate')
            self.learning_rate_gen = nn_cfg.getfloat('GeneratorLearningRate')

            self.iter_limit = nn_cfg.getint('IterationLimit')
            self.iter_per_snapshot = nn_cfg.getint('IterationsPerSnapshot')
            self.iter_per_image = nn_cfg.getint('IterationsPerImage')
            self.iter_to_eval = nn_cfg.getint('IterationsToEvaluation')
            self.test_iter = nn_cfg.getint('EvaluationIterations')

            if 'OutputFolder' in nn_cfg:
                timestamp = str(datetime.fromtimestamp(time()).strftime('%Y.%m.%d-%H%M%S'))
                self.output_folder = nn_cfg['OutputFolder'] + "/" + self.discriminator_name + "-" + timestamp

            self.test_identity_extractor = build_feature_extractor(config['FeatureExtractor'])
            self.mse_loss = MSELoss()

        elif mode == "eval":
            self.mse = MSELoss().to(self.device)
            self.test_identity_extractor = build_feature_extractor(config['FeatureExtractor'])
        elif mode == "single":
            ...
        else:
            raise Exception(f"Wrong mode \"{mode}\"!")
        # torch.manual_seed(self.seed)
        # torch.cuda.manual_seed(self.seed)

    @property
    @abstractmethod
    def discriminator_name(self):
        pass

    @abstractmethod
    def build_discriminator(self, *args, **kwargs):
        pass

    @abstractmethod
    def build_generator(self, *args, **kwargs):
        pass

    @abstractmethod
    def compute_discriminator_loss(self, fake_img, real_img, precomputed=None, train=True, *args, **kwargs):
        pass

    @abstractmethod
    def compute_generator_loss(self, label, fake_img, real_img, precomputed=None, *args, **kwargs):
        pass

    def post_backward_generator(self):
        """
        May be used for gradient clipping
        """
        pass

    def post_backward_discriminator(self):
        """
        May be used for gradient clipping
        """
        pass

    def build_models(self):
        self.discriminator: Module = self.build_discriminator(self.upscale_factor).to(self.device)

        self.optimizer_disc = build_optimizer(self._cfg['DiscOptimizer'],
                                              self.discriminator.parameters(),
                                              self.learning_rate_disc)
        self.scheduler_disc = build_scheduler(self._cfg['DiscScheduler'], self.optimizer_disc)

        self.generator = self.build_generator(self.upscale_factor).to(self.device)

        self.optimizer_gen = build_optimizer(self._cfg['GenOptimizer'],
                                             self.generator.parameters(),
                                             self.learning_rate_gen)
        self.scheduler_gen = build_scheduler(self._cfg['GenScheduler'], self.optimizer_gen)

    def save_discriminator_model(self):
        checkpoint_path = self.output_folder + "/" + self.discriminator_name + "-disc-" + str(self.iteration) + ".mdl"
        dic = {
            'model_name': self.discriminator_name,
            'upscale': self.upscale_factor,
            'iteration': self.iteration,
            'model': self.discriminator.state_dict(),
            'optimizer': self.optimizer_disc.state_dict(),
            'scheduler': self.scheduler_disc.state_dict()
        }

        save(dic, checkpoint_path)
        self.discriminator.to(self.device)

    def save_generator_model(self):
        checkpoint_path = self.output_folder + "/" + self.discriminator_name + "-gen-" + str(self.iteration) + ".mdl"
        dic = {
            'model_name': self.generator_name,
            'upscale': self.upscale_factor,
            'iteration': self.iteration,
            'model': self.generator.state_dict(),
            'optimizer': self.optimizer_gen.state_dict(),
            'scheduler': self.scheduler_gen.state_dict()
        }

        save(dic, checkpoint_path)
        self.generator.to(self.device)

    def save_model(self):
        self.save_discriminator_model()
        self.save_generator_model()

    def load_discriminator_model(self, model_path: str, mode="train"):
        state = load(model_path)

        if state['model_name'] != self.discriminator_name:
            raise Exception("This snapshot is for model " + state['model_name'] + "!")

        # self.iteration = state['iteration']
        self.upscale_factor = state['upscale']

        if mode == "train":
            self.discriminator: Module = self.build_discriminator(self.upscale_factor).to(self.device)
            self.discriminator.load_state_dict(state['model'])

            self.optimizer_disc = build_optimizer(self._cfg['DiscOptimizer'], self.discriminator.parameters(),
                                                  self.learning_rate_disc)
            self.optimizer_disc.load_state_dict(state['optimizer'])

            self.scheduler_disc = build_scheduler(self._cfg['DiscScheduler'], self.optimizer_disc)
            self.scheduler_disc.load_state_dict(state['scheduler'])

    def load_generator_model(self, model_path: str, mode="train"):
        state = load(model_path)

        # self.iteration = state['iteration']
        self.upscale_factor = state['upscale']

        self.generator = self.build_generator(self.upscale_factor).to(self.device)
        self.generator.load_state_dict(state['model'])

        if mode == "train":
            self.optimizer_gen = build_optimizer(self._cfg['GenOptimizer'], self.generator.parameters(),
                                                 self.learning_rate_gen)
            self.optimizer_gen.load_state_dict(state['optimizer'])

            self.scheduler_gen = build_scheduler(self._cfg['GenScheduler'], self.optimizer_gen)
            self.scheduler_gen.load_state_dict(state['scheduler'])

    def train_setup(self) -> None:
        assert self._cfg is not None, "Create solver with config to train!"
        src_subfolder = "src"

        if self.output_folder is not None:
            try:
                mkdir(self.output_folder)
                mkdir(join(self.output_folder, src_subfolder))
            except Exception:
                print("Can't create output folder!", file=stderr)
                ex_type, ex_inst, ex_tb = exc_info()
                raise ex_type.with_traceback(ex_inst, ex_tb)

            # copy all python modules found in directory of trained model
            module_folder = dirname(getfile(self.generator.__class__))
            files = [file for file in listdir(module_folder) if file.endswith(".py")]

            for file in files:
                copyfile(join(module_folder, file), join(self.output_folder, src_subfolder, file))

            copyfile(join(dirname(module_folder), "abstract_gan_solver.py"),
                     join(self.output_folder, src_subfolder, "abstract_solver.py"))

            with open(self.output_folder + '/config.ini', 'w') as f:
                self._cfg.write(f)

            self.drawer = Drawer(self.output_folder, scale_factor=self.upscale_factor)
            self.logger = Logger(self.output_folder)
        else:
            print("Warning: Training results will not be saved.", file=stderr)

        self.generator.train()
        self.discriminator.train()

        self.generator.to(self.device)
        self.discriminator.to(self.device)

    def train(self, train_set: DataLoader, test_set: DataLoader) -> None:
        self.train_setup()

        progress_bar = tqdm(total=self.iter_limit)

        gen_train_values = []
        disc_train_values = []
        loss_component_collector: DefaultDict[str, List] = defaultdict(list)

        while self.iteration < self.iter_limit:
            for _, (labels, input_img, target_img) in enumerate(train_set):
                if len(labels) < self.batch_size:
                    continue
                input_img, target_img = input_img.to(self.device), target_img.to(self.device)

                fake_img = self.generator(input_img.contiguous()).contiguous()

                # ######## Train generator #########
                self.optimizer_gen.zero_grad()

                generator_train_loss, loss_components, precomputed = self.compute_generator_loss(labels, fake_img, target_img)

                gen_train_values.append(generator_train_loss.item())
                for loss_name, value in loss_components.items():
                    loss_component_collector[loss_name].append(value)

                generator_train_loss.backward(retain_graph=True)
                self.post_backward_generator()
                self.optimizer_gen.step()

                # ######### Train discriminator ########
                self.optimizer_disc.zero_grad()

                discriminator_train_loss = self.compute_discriminator_loss(fake_img.detach(), target_img, precomputed)
                disc_train_values.append(discriminator_train_loss.item())

                discriminator_train_loss.backward()
                self.post_backward_discriminator()
                self.optimizer_disc.step()

                self.iteration += 1
                fake_img, target_img = fake_img.cpu(), target_img.cpu()

                # TODO: create function for sched step upfront to avoid more isinstance() calls
                if isinstance(self.scheduler_gen, optim.lr_scheduler.ReduceLROnPlateau):
                    old_lr = self.optimizer_gen.state_dict()['param_groups'][0]['lr']
                    self.scheduler_gen.step(generator_train_loss)
                    new_lr = self.optimizer_gen.state_dict()['param_groups'][0]['lr']

                    if old_lr != new_lr and self.logger:
                        self.logger.log("GeneratorLearningRateAdapted")
                elif self.scheduler_gen is not None:
                    self.scheduler_gen.step()

                if isinstance(self.scheduler_disc, optim.lr_scheduler.ReduceLROnPlateau):
                    old_lr = self.optimizer_disc.state_dict()['param_groups'][0]['lr']
                    self.scheduler_disc.step(discriminator_train_loss)
                    new_lr = self.optimizer_disc.state_dict()['param_groups'][0]['lr']

                    if old_lr != new_lr and self.logger:
                        self.logger.log("DiscriminatorLearningRateAdapted")
                elif self.scheduler_disc is not None:
                    self.scheduler_disc.step()

                # ######## Statistics #########
                if self.iteration > 0 and self.iteration % self.iter_to_eval == 0:

                    # store training collage
                    if self.drawer and self.iteration % self.iter_per_image == 0:
                        input_img = interpolate(input_img, size=fake_img.shape[-2:]).cpu().numpy().transpose(
                            (0, 2, 3, 1))
                        target_img = interpolate(target_img, size=fake_img.shape[-2:]).cpu().numpy().transpose((0, 2, 3, 1))
                        fake_img = fake_img.detach().cpu().numpy().transpose((0, 2, 3, 1))

                        self.drawer.save_images(input_img, fake_img, target_img, "Train-" + str(self.iteration))

                    (generator_test_loss_value, discriminator_test_loss_value), \
                    (psnr, psnr_diff, ssim_val), distances = self.evaluate(test_set)

                    if self.logger:
                        components_line = [f"{k}:{round(np.mean(v), 5):.5f}" for k, v
                                           in loss_component_collector.items()]
                        line = " ".join([f"Iter:{self.iteration:}",
                                         f"Gen_Train_loss:{np.round(np.mean(gen_train_values), 5):.5f}",
                                         f"Disc_Train_loss:{np.round(np.mean(disc_train_values), 5):.5f}",
                                         f"Gen_Test_loss:{round(generator_test_loss_value, 5):.5f}",
                                         f"Disc_Test_loss:{round(discriminator_test_loss_value, 5):.5f}",
                                         f"PSNR:{round(psnr, 3):.3f}",
                                         f"PSNR_diff:{round(psnr_diff, 3):.3f}",
                                         f"SSIM:{round(ssim_val, 5)}",
                                         f"Identity_dist_mean:{round(distances.mean(), 5):.5f}",
                                         ] + components_line)
                        self.logger.log(line)

                    gen_train_values.clear()
                    disc_train_values.clear()

                # snapshot
                if self.iteration % self.iter_per_snapshot == 0:
                    self.save_generator_model()
                    self.save_discriminator_model()

                if self.iteration > self.iter_limit:
                    break

                progress_bar.update()

        progress_bar.close()

    def evaluate(self, test_set: DataLoader, identity_only=False):
        assert self.generator is not None, "Generator model not loaded!"
        assert self.discriminator is not None or identity_only, "Discriminator model not loaded!"

        net_psnr = 0.
        interpolation_psnr = 0.
        ssim_val = 0.

        generator_loss_value = 0.
        discriminator_loss_value = 0.
        identity_dist = 0.
        identity_dists = []

        iterations = 0

        self.generator.eval()
        self.discriminator.eval()

        with no_grad():
            for batch_num, (labels, input_img, target_img) in enumerate(test_set):
                input_img, target_img = input_img.to(self.device), target_img.to(self.device)

                fake_img = self.generator(input_img)
                fake_shape = fake_img.shape[-2:]

                # Compute discriminator
                if not identity_only:
                    if fake_shape != target_img.shape[-2:]:
                        target_img = interpolate(target_img, fake_shape, mode='bilinear', align_corners=True)

                    mse = self.mse_loss(fake_img, target_img).item()
                    net_psnr += 10 * log10(1 / mse)

                    fake_response, real_response = self.discriminator(cat((fake_img, target_img))).split(
                        fake_img.size(0))
                    # response.to(device('cpu')) # ??
                    discriminator_loss_value = self.compute_discriminator_loss(fake_img, target_img, train=False).item()

                    # ##### Test generator #####
                    tmp, _, _ = self.compute_generator_loss(labels, fake_img, target_img, real_response)
                    generator_loss_value += tmp.item()

                resized_data = interpolate(input_img, fake_shape, mode='bilinear',
                                           align_corners=True)
                interpolation_loss = self.mse_loss(resized_data, target_img).item()
                interpolation_psnr += 10 * log10(1 / interpolation_loss)

                resized_data = resized_data.cpu().numpy()

                # TODO: rewrite so that whole batch can be passed
                for label, res_img, tar_img in zip(labels, fake_img, target_img):
                    target_identity, result_identity = self.test_identity_extractor(label, tar_img, res_img)

                    if target_identity is None:
                        continue

                    identity_dists.append(self.test_identity_extractor.identity_dist(result_identity, target_identity))

                fake_img = fake_img.cpu()
                target_img = target_img.cpu()

                try:
                    ssim_val += ssim(fake_img, target_img, data_range=1.0, nonnegative_ssim=True).mean().item()
                except RuntimeError:
                    pass

                fake_img = fake_img.numpy()
                target_img = target_img.numpy()

                iterations += 1
                if self.test_iter is not None and iterations % self.test_iter == 0:
                    break

        self.generator.train()
        self.discriminator.train()

        net_psnr /= iterations
        interpolation_psnr /= iterations
        ssim_val /= iterations

        generator_loss_value /= iterations
        discriminator_loss_value /= iterations

        identity_dists = np.array(identity_dists)
        identity_dist /= iterations * self.batch_size

        if self.drawer is not None and self.iteration % self.iter_per_image == 0:
            input_img = resized_data.transpose((0, 2, 3, 1))
            fake_img = fake_img.transpose((0, 2, 3, 1))
            target_img = target_img.transpose((0, 2, 3, 1))

            self.drawer.save_images(input_img, fake_img, target_img, "Test-" + str(self.iteration))

        return (generator_loss_value, discriminator_loss_value), \
               (net_psnr, net_psnr - interpolation_psnr, ssim_val), \
               identity_dists

    def test(self, test_set: DataLoader):
        pass

    def single_pass(self, image: Image, downscale: bool) -> Tuple[np.ndarray, np.ndarray]:
        assert self.generator is not None, "Model is not loaded!"

        self.generator.to(self.device)

        factor = self.upscale_factor if downscale else 1

        # in_transform = Compose([CenterCrop((208, 176)), Resize((208 // factor, 176 // factor)), ToTensor()])
        # eval_transform = Compose([CenterCrop((216, 176)), ToTensor()])

        in_transform = Compose([ToTensor()])
        eval_transform = Compose([ToTensor()])

        # transform = Compose([Resize((256 // factor, 256 // factor)), ToTensor()])

        # add dimension so that tensor is 4d
        inp = in_transform(image).to(self.device).unsqueeze(0)

        with no_grad():
            result = self.generator(inp)

        # security through obscurity
        result = result.cpu().detach().numpy()[0].transpose((1, 2, 0))[:, :, ::-1]

        if eval_transform is not None:
            image = eval_transform(image).numpy().transpose((1, 2, 0))[:, :, ::-1]

        return image, result
Esempio n. 12
0
class AbstractCnnSolver(ABC):
    def __init__(self, config: ConfigParser, mode="train"):
        self._cfg = config

        self.device = 'cuda'
        self.learning_rate = None
        self.net = None
        self.optimizer = None
        self.scheduler = None

        self.output_folder = None
        self.drawer = None
        self.logger = None
        self.test_iter = None

        if mode == "train":
            nn_cfg: SectionProxy = config["CNN"]
            self.device = device(nn_cfg['Device'])
            self.upscale_factor = nn_cfg.getint('UpscaleFactor')

            self.iteration = 0
            self.batch_size = nn_cfg.getint('BatchSize')
            self.learning_rate = nn_cfg.getfloat('LearningRate')

            self.iter_limit = nn_cfg.getint('IterationLimit')
            self.iter_per_snapshot = nn_cfg.getint('IterationsPerSnapshot')
            self.iter_per_image = nn_cfg.getint('IterationsPerImage')
            self.iter_to_eval = nn_cfg.getint('IterationsToEvaluation')
            self.test_iter = nn_cfg.getint('EvaluationIterations')

            if 'OutputFolder' in nn_cfg:
                timestamp = str(
                    datetime.fromtimestamp(time()).strftime('%Y.%m.%d-%H%M%S'))
                self.output_folder = nn_cfg[
                    'OutputFolder'] + "/" + self.name + "-" + timestamp

            self.identity_extractor = build_feature_extractor(
                config['FeatureExtractor'])
            self.mse = MSELoss().to(self.device)

        elif mode == "eval":
            self.mse = MSELoss().to(self.device)
            self.identity_extractor = build_feature_extractor(
                config['FeatureExtractor'])

        elif mode == "single":
            ...
        else:
            raise Exception(f"Wrong mode \"{mode}\"!")

        # torch.manual_seed(self.seed)
        # torch.cuda.manual_seed(self.seed)

    @property
    @abstractmethod
    def name(self):
        pass

    @abstractmethod
    def get_net_instance(self, *args, **kwargs):
        pass

    @abstractmethod
    def compute_loss(self, label, output, target):
        pass

    def post_backward(self):
        """
        May be used for gradient clipping
        """
        pass

    def build_models(self):
        self.net: Module = self.get_net_instance(self.upscale_factor).to(
            self.device)
        self.optimizer = build_optimizer(self._cfg['Optimizer'],
                                         self.net.parameters(),
                                         self.learning_rate)
        self.scheduler = build_scheduler(self._cfg['Scheduler'],
                                         self.optimizer)

    def save_model(self):
        checkpoint_path = self.output_folder + "/" + self.name + "-" + str(
            self.iteration) + ".mdl"
        dic = {
            'model_name': self.name,
            'upscale': self.upscale_factor,
            'iteration': self.iteration,
            'model': self.net.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'scheduler': self.scheduler.state_dict()
        }

        save(dic, checkpoint_path)
        self.net.to(self.device)

    def load_model(self, model_path: str, mode="train"):
        state = load(model_path)

        if state['model_name'] != self.name:
            raise Exception("This snapshot is for model " +
                            state['model_name'] + "!")

        # self.iteration = state['iteration']
        self.upscale_factor = state['upscale']

        self.net: Module = self.get_net_instance(self.upscale_factor).to(
            self.device)
        self.net.load_state_dict(state['model'])

        if mode == "train":
            self.optimizer = build_optimizer(self._cfg['Optimizer'],
                                             self.net.parameters(),
                                             self.learning_rate)
            self.optimizer.load_state_dict(state['optimizer'])

            self.scheduler = build_scheduler(self._cfg['Scheduler'],
                                             self.optimizer)
            self.scheduler.load_state_dict(state['scheduler'])

    def train_setup(self) -> None:
        assert self._cfg is not None, "Create solver with config to train!"
        src_subfolder = "src"

        if self.output_folder is not None:
            # create output folder
            try:
                mkdir(self.output_folder)
                mkdir(join(self.output_folder, src_subfolder))
            except Exception:
                print("Can't create output folder!", file=stderr)
                ex_type, ex_inst, ex_tb = exc_info()
                raise ex_type.with_traceback(ex_inst, ex_tb)

            # copy all python modules found in directory of trained model
            module_folder = dirname(getfile(self.net.__class__))
            files = [
                file for file in listdir(module_folder) if file.endswith(".py")
            ]

            for file in files:
                copyfile(join(module_folder, file),
                         join(self.output_folder, src_subfolder, file))

            copyfile(join(dirname(module_folder), "abstract_cnn_solver.py"),
                     join(self.output_folder, "abstract_solver.py"))

            # save config
            with open(self.output_folder + '/config.ini', 'w') as f:
                self._cfg.write(f)

            self.drawer = Drawer(self.output_folder,
                                 scale_factor=self.upscale_factor)
            self.logger = Logger(self.output_folder)

        self.net.train()
        # TODO: create methods for moving nets and loss to device
        self.net.to(self.device)

    def train(self, train_set: DataLoader, test_set: DataLoader) -> None:
        self.train_setup()

        progress_bar = tqdm(total=self.iter_limit)

        train_values = []
        loss_component_collector: DefaultDict[str, List] = defaultdict(list)

        while self.iteration <= self.iter_limit:
            for _, (labels, data, target) in enumerate(train_set):
                if len(labels) < train_set.batch_size:
                    continue

                data, target = data.to(self.device), target.to(self.device)

                self.optimizer.zero_grad()

                result = self.net(data)
                loss, loss_components = self.compute_loss(
                    labels, result, target)

                train_values.append(loss.item())
                for loss_name, value in loss_components.items():
                    loss_component_collector[loss_name].append(value)

                loss.backward()
                self.post_backward()
                self.optimizer.step()

                self.iteration += 1

                if isinstance(self.scheduler,
                              optim.lr_scheduler.ReduceLROnPlateau):
                    old_lr = self.optimizer.state_dict(
                    )['param_groups'][0]['lr']
                    self.scheduler.step(loss)
                    new_lr = self.optimizer.state_dict(
                    )['param_groups'][0]['lr']

                    if old_lr != new_lr and self.logger:
                        self.logger.log("LearningRateAdapted")
                else:
                    self.scheduler.step()

                # ######## Statistics #########
                if self.iteration > 0 and self.iteration % self.iter_to_eval == 0:

                    # store training collage
                    if self.drawer and self.iteration % self.iter_per_image == 0:
                        data = interpolate(data, scale_factor=self.upscale_factor) \
                            .cpu().numpy().transpose((0, 2, 3, 1))
                        target = target.cpu().numpy().transpose((0, 2, 3, 1))
                        result = result.detach().cpu().numpy().transpose(
                            (0, 2, 3, 1))

                        self.drawer.save_images(data, result, target,
                                                "Train-" + str(self.iteration))

                    (test_loss,
                     _), (psnr, psnr_diff,
                          ssim_val), distances = self.evaluate(test_set)

                    if self.logger:
                        components_line = [
                            f"{k}:{round(np.mean(v), 5):.5f}"
                            for k, v in loss_component_collector.items()
                        ]
                        line = " ".join([
                            f"Iter:{self.iteration}",
                            f"Train_loss:{np.round(np.mean(train_values), 5)}",
                            f"Test_loss:{round(test_loss, 5)}",
                            f"PSNR:{round(psnr, 5)}",
                            f"PSNR_diff:{round(psnr_diff, 5)}",
                            f"SSIM:{round(ssim_val, 5)}",
                            f"Identity_dist_mean:{round(distances.mean(), 5)}",
                        ] + components_line)
                        self.logger.log(line)

                    train_values.clear()
                    loss_component_collector.clear()

                # snapshot
                if self.iteration % self.iter_per_snapshot == 0 and self.iteration > 0:
                    self.save_model()

                if self.iteration > self.iter_limit:
                    break

                progress_bar.update()

    def evaluate(self, test_set, identity_only=False):
        assert self.net is not None, "Net model not loaded!"

        net_psnr = 0.
        bilinear_psnr = 0.
        test_loss = 0.
        ssim_val = 0.
        iterations = 0

        identity_dists = []

        self.net.eval()

        with no_grad():
            for batch_num, (labels, input_img,
                            target_img) in enumerate(test_set):
                input_img, target_img = input_img.to(
                    self.device), target_img.to(self.device)

                fake_img = self.net(input_img)

                if not identity_only:
                    loss, _ = self.compute_loss(labels, fake_img, target_img)
                    mse_loss = self.mse(fake_img, target_img).item()
                    net_psnr += 10 * log10(1 / mse_loss)

                    test_loss += loss.item()

                resized_data = interpolate(input_img,
                                           scale_factor=self.upscale_factor,
                                           mode='bilinear',
                                           align_corners=True)
                bilinear_mse_loss = self.mse(resized_data, target_img).item()
                bilinear_psnr += 10 * log10(1 / bilinear_mse_loss)

                resized_data = resized_data.cpu().numpy()

                for label, res_img, tar_img in zip(labels, fake_img,
                                                   target_img):
                    target_identity, result_identity = self.identity_extractor(
                        label, tar_img, res_img)

                    if target_identity is None:
                        continue

                    # TODO: verify for senet
                    # TODO: mtcnn detections
                    identity_dists.append(
                        self.identity_extractor.identity_dist(
                            result_identity, target_identity))

                fake_img = fake_img.cpu()
                target_img = target_img.cpu()

                ssim_val += ssim(fake_img,
                                 target_img,
                                 data_range=1.0,
                                 nonnegative_ssim=True).mean().item()

                fake_img = fake_img.numpy()
                target_img = target_img.numpy()

                iterations += 1
                if self.test_iter is not None and iterations >= self.test_iter:
                    break

        self.net.train()

        test_loss /= iterations
        net_psnr /= iterations
        bilinear_psnr /= iterations
        ssim_val /= iterations

        if self.drawer is not None and self.iteration % self.iter_per_image == 0:
            input_img = resized_data.transpose((0, 2, 3, 1))
            fake_img = fake_img.transpose((0, 2, 3, 1))
            target_img = target_img.transpose((0, 2, 3, 1))

            self.drawer.save_images(input_img, fake_img, target_img,
                                    "Test-" + str(self.iteration))

        # self.net.train()
        return (test_loss, 0.), \
               (net_psnr, net_psnr - bilinear_psnr, ssim_val), \
               np.array(identity_dists)

    def test(self):
        pass

    def single_pass(self, image: Image,
                    downscale: bool) -> Tuple[np.ndarray, np.ndarray]:
        assert self.net is not None, "Model is not loaded!"

        self.net.to(self.device)

        factor = self.upscale_factor if downscale else 1

        # in_transform = Compose([CenterCrop((216, 176)), Resize((216 // factor, 176 // factor)), ToTensor()])
        # eval_transform = Compose([CenterCrop((216, 176)), ToTensor()])

        in_transform = Compose([ToTensor()])
        eval_transform = Compose([ToTensor()])

        # transform = Compose([Resize((256 // factor, 256 // factor)), ToTensor()])

        # add dimension so that tensor is 4d
        inp = in_transform(image).to(self.device).unsqueeze(0)

        with no_grad():
            result = self.net(inp)

        # security through obscurity
        result = result.cpu().detach().numpy()[0].transpose(
            (1, 2, 0))[:, :, ::-1]

        if eval_transform is not None:
            image = eval_transform(image).numpy().transpose(
                (1, 2, 0))[:, :, ::-1]

        return image, result