Example #1
0
    def refresh_info(self):
        info = {}
        info['mode'] = self._mode
        info['fps'] = self._fps
        if self.vid:
            info['cur_frame_index'] = self.vid.cur_frame_index
            info['num_frames'] = self.vid.vid_frames
        info['canny'] = self._canny_on
        info['komas'] = self._k
        info['onion'] = self._onion_num

        info_str = ' Playing Info \n\n'
        info_str += 'FPS of playing: {:.2f}\n'.format(self._fps)
        if self.vid:
            info_str += 'Time: {}/{}\n'.format(
                utils.sec2time(self.vid.cur_frame_index / self.vid.fps),
                utils.sec2time(self.vid.vid_frames / self.vid.fps))
            info_str += 'No. of frames: {}/{}\n'.format(
                self.vid.cur_frame_index, self.vid.vid_frames - 1)
            info_str += 'Range: {} - {}\n'.format(self.vid.start_index,
                                                  self.vid.end_index)
            info_str += '\n'
        else:
            pass
        info_str += 'Canny Mode: {}\n'.format(
            self._canny_on if self._canny_on else 'Off')
        info_str += 'Komas with \'j\'/\'k\': {}\n'.format(self._k)
        info_str += 'Onion num: {}\n'.format(
            self._onion_num if self._onion_num else 'Off')

        self.ext_print(info_str, info)
Example #2
0
 def optimize(self):
     """
     Train the network. For each iteration, call the optimization loop function.
     """
     print(colored('starting optimization with ADAM...', 'cyan'))
     self.optimizer = torch.optim.Adam(self.parameters, lr=self.args.lr)
     scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min',
                                                            factor=self.args.lr_factor,
                                                            threshold=self.args.lr_thresh,
                                                            patience=self.args.lr_patience)
     # stop after no improvements greater than a certain percentage of the previous loss
     stopper = u.EarlyStopping(patience=self.args.earlystop_patience,
                               min_delta=self.args.earlystop_min_delta,
                               percentage=True)
     start = time()
     for j in range(self.args.epochs):
         self.optimizer.zero_grad()
         loss = self.optimization_loop()
         self.optimizer.step()
         if self.args.reduce_lr:
             scheduler.step(loss)
         if stopper.step(loss):  # stopper is computed on loss, as we don't have any validation metrics
             break
     
     self.elapsed = time() - start
     print(colored(u.sec2time(self.elapsed), 'yellow'))
Example #3
0
 def run(self):
     """ This function shows the frame and all visualizations. It is also responsable for synchronization of stimulus and computer time."""
             
     """ scheduler configuration """
     cpu_time = time.time()
     self.snippets[self.snippet_id].frame_count += 1
     #act_frame_element = s.enterabs(sequence.time_started+1/stimulus.fps/SPEED*sequence.frame_count, 1, showFrame, (s,stimulus,sequence,gazess,player))    
     
     """ load image """
     frameImg = self.stimulus.get_next_frame()
     if frameImg is None:
         return False
     
     """ adjusting stimulus, due to slow computers """
     #skipped_frames = stimulus.resync(sequence.begin + (cpu_time - sequence.time_started)*SPEED)
     #sequence.skipped_frames += skipped_frames
     
     """ when snippet is over, stop scheduler through deleting next schedule element """
     if not self.snippets[self.snippet_id].contains(self.stimulus.act_pos):
         if self.snippet_id + 1 >= len(self.snippets):
             self.play_snippet(1)
         else:
             self.play_snippet(self.snippet_id + 1)
         if self.stop_after:
             self.qscheduler.stop()
         return
     
     """ bgr to gray convertion for using gray stimulus image """
     if self.gray:
         frameImg = cv2.cvtColor(frameImg,cv.CV_BGR2GRAY)
         frameImg = cv2.cvtColor(frameImg,cv.CV_GRAY2BGR)
     showPic = cv2.addWeighted(frameImg, self.movie_opacity, frameImg, 0, 0)     #TODO: find better implementation
     
     """ go through every option and do calculations """
     
     if self.show_gazes_each > 0:
         ovImg = self.gazes.get_each(self.stimulus)
         showPic = cv2.addWeighted(showPic, 1, ovImg, self.show_gazes_each, 0)
     
     if self.show_gazes_clustered > 0:
         heatMap = self.gazes.get_heat_map(self.stimulus, .5)
         ovImg = self.gazes.get_clustered(self.stimulus, heatMap)
         showPic = cv2.addWeighted(showPic, 1, ovImg, self.show_gazes_clustered, 0)
     
     """ shows final stimulus visualized image """
     self.stim_win.show_img(showPic)
     self.control_win.update_texts(self)
     self.stim_win.pos.setText('%(start)s / %(end)s' %{'start':utils.sec2time(self.stimulus.act_pos), 'end': self.stimulus.end_pos_str})
     #self.stim_win.debug.setText('skipped: %(fr)i' %{'fr':sequence.skipped_frames})
     cv2.waitKey(1)
Example #4
0
def reconstruct_patches(args,
                        return_history=False,
                        verbose=False) -> Tuple[np.ndarray, list]:

    inputs = np.load(os.path.join(args.imgdir, args.imgname),
                     allow_pickle=True)

    pe = _get_patch_extractor(inputs.shape, args.patch_shape,
                              args.patch_stride, args.datadim, args.imgchannel)
    # this is necessary for setting pe attributes
    _ = pe.extract(inputs)
    patch_array_shape = u.patch_array_shape(inputs.shape, pe.dim, pe.stride)

    patches_out = []
    elapsed = []
    history = []
    for path in glob(os.path.join('./results', args.outdir) + '/*.npy'):
        try:
            out = np.load(path, allow_pickle=True).item()
        except AttributeError:
            out = np.load(path, allow_pickle=True).item()
        patches_out.append(out['output'])
        try:
            elapsed.append(out['elapsed'])
        except KeyError:
            elapsed.append(out['elapsed time'])
        history.append(out['history'])

    patches_out = np.asarray(patches_out)
    if args.datadim == '2.5d':
        patches_out = _transpose_patches_25d(patches_out, args.slice, adj=True)
    outputs = pe.reconstruct(
        patches_out.reshape(patch_array_shape)) / args.gain

    try:
        gpu_ = u.get_gpu_name(int(out['device']))
    except:
        gpu_ = out['device']

    if verbose:
        print('\n%d patches; total elapsed time on %s: %s' %
              (len(history), gpu_,
               u.sec2time(sum([u.time2sec(e) for e in elapsed]))))

    if return_history:
        return outputs, history
    else:
        return outputs
Example #5
0
 def save_result(self):
     """
     Save the results, the model (if asked) and some info to disk in a .npy file.
     """
     np.save(os.path.join(self.outpath, self.image_name + '_run.npy'), {
         'device' : u.get_gpu_name(int(os.environ["CUDA_VISIBLE_DEVICES"])),
         'elapsed': u.sec2time(self.elapsed),
         'outpath': self.outpath,
         'history': self.history,
         'mask'   : self.mask,
         'image'  : self.img,
         'output' : self.out_best,
         'noise'  : self.input_list,
     })
     
     # save the model
     if self.args.savemodel:
         torch.save(self.net.state_dict(),
                    os.path.join(self.outpath, self.image_name + '_model.pth'))
Example #6
0
 def __init__(self, movie_path):
     self.path = movie_path
     self.data = cv2.VideoCapture(self.path)
     self.fps = self.data.get(cv.CV_CAP_PROP_FPS )
     self.nFrames = int(self.data.get(cv.CV_CAP_PROP_FRAME_COUNT ))
     self.length  = self.nFrames / self.fps
     self.width   = int(self.data.get(cv.CV_CAP_PROP_FRAME_WIDTH ))
     self.height  = int(self.data.get(cv.CV_CAP_PROP_FRAME_HEIGHT))
     
     # actual information
     self.act_pos = 0   # actual position of stimulus in seconds
     self.act_frame = None   # numpy array containing last frame image
     self.speed = 1
     
     # synchronizing information
     self.start_pos = 0
     self.start_time = 0
     
     # performance caching
     self.end_pos_str = utils.sec2time(self.nFrames/self.fps)
Example #7
0
 def run(self):
     """
     System Entry
     """
     # ray.init()
     start_time = time.time()
     start_time_str = time.strftime('%Y-%m-%d-%H:%M:%S',
                                    time.localtime(start_time))
     logger.info(
         f'*******************************Dolphin Detection System: Running Environment [{self.cfg.env}] at '
         f'[{start_time_str}]********************************')
     # self.http_server.run()
     if self.cfg.run_direct:
         self.monitor.monitor()
     else:
         self.scheduler.start()
     end_time = time.time()
     end_time_str = time.strftime('%Y-%m-%d-%H:%M:%S',
                                  time.localtime(end_time))
     run_time = sec2time(end_time - start_time)
     logger.info(
         f'*******************************Dolphin Detection System: Shut down at [{end_time_str}].'
         f'Total Running Time '
         f'[{run_time}]********************************')
def train(model,
          train_loader,
          test_loader,
          mode='EDSR_Baseline',
          save_image_every=50,
          save_model_every=10,
          test_model_every=1,
          epoch_start=0,
          num_epochs=1000,
          device=None,
          refresh=True):

    if device is None:
        device = 'cuda' if torch.cuda.is_available() else 'cpu'

    today = datetime.datetime.now().strftime('%Y.%m.%d')

    result_dir = f'./results/{today}/{mode}'
    weight_dir = f'./weights/{today}/{mode}'
    logger_dir = f'./logger/{today}_{mode}'
    csv = f'./hist_{today}_{mode}.csv'
    if refresh:
        try:
            shutil.rmtree(result_dir)
            shutil.rmtree(weight_dir)
            shutil.rmtree(logger_dir)
        except FileNotFoundError:
            pass
    os.makedirs(result_dir, exist_ok=True)
    os.makedirs(weight_dir, exist_ok=True)
    os.makedirs(logger_dir, exist_ok=True)
    logger = SummaryWriter(log_dir=logger_dir, flush_secs=2)
    model = model.to(device)

    params = list(model.parameters())
    optim = torch.optim.Adam(params, lr=1e-4)
    scheduler = torch.optim.lr_scheduler.StepLR(optim,
                                                step_size=1000,
                                                gamma=0.99)
    criterion = torch.nn.L1Loss()
    GMSD = GMSD_quality().to(device)
    mshf = MSHF(3, 3).to(device)

    start_time = time.time()
    print(f'Training Start || Mode: {mode}')

    step = 0
    pfix = OrderedDict()
    pfix_test = OrderedDict()

    hist = dict()
    hist['mode'] = f'{today}_{mode}'
    for key in ['epoch', 'psnr', 'ssim', 'ms-ssim']:
        hist[key] = []

    blurs = {}
    for ksize in [3, 5, 7]:
        blurs[ksize] = {}
        for sigma in [0.4, 0.8, 1.0, 1.2, 1.6, 2.0]:
            blurs[ksize][sigma] = Blur(ksize=ksize, sigma=sigma).to(device)
    noise_sigma = 0.3

    for epoch in range(epoch_start, epoch_start + num_epochs):

        if epoch == 0:
            torch.save(model.state_dict(),
                       f'{weight_dir}/epoch_{epoch+1:04d}.pth')

        if epoch == 0:
            with torch.no_grad():
                with tqdm(
                        test_loader,
                        desc=
                        f'{mode} || Warming Up || Test Epoch {epoch}/{num_epochs}',
                        position=0,
                        leave=True) as pbar_test:
                    psnrs = []
                    ssims = []
                    msssims = []
                    for lr, hr, fname in pbar_test:
                        lr = lr.to(device)
                        # hr = hr.to(device)

                        blur = blurs[7][2.0]

                        lr_input = blur(lr)
                        lr_input = lr_input + torch.rand_like(
                            lr, device=lr.device) * noise_sigma

                        _, features = model(lr_input)
                        dr = features[0]
                        # sr = quantize(sr)

                        psnr, ssim, msssim = evaluate(lr, dr)

                        psnrs.append(psnr)
                        ssims.append(ssim)
                        msssims.append(msssim)

                        psnr_mean = np.array(psnrs).mean()
                        ssim_mean = np.array(ssims).mean()
                        msssim_mean = np.array(msssims).mean()

                        pfix_test['psnr'] = f'{psnr:.4f}'
                        pfix_test['ssim'] = f'{ssim:.4f}'
                        pfix_test['msssim'] = f'{msssim:.4f}'
                        pfix_test['psnr_mean'] = f'{psnr_mean:.4f}'
                        pfix_test['ssim_mean'] = f'{ssim_mean:.4f}'
                        pfix_test['msssim_mean'] = f'{msssim_mean:.4f}'

                        pbar_test.set_postfix(pfix_test)
                        if len(psnrs) > 1: break

        with tqdm(train_loader,
                  desc=f'{mode} || Epoch {epoch+1}/{num_epochs}',
                  position=0,
                  leave=True) as pbar:
            psnrs = []
            ssims = []
            msssims = []
            losses = []
            for lr, hr, _ in pbar:
                lr = lr.to(device)
                # hr = hr.to(device)

                # prediction
                ksize_ = random.choice([3, 5, 7])
                sigma_ = random.choice([0.4, 0.8, 1.0, 1.2, 1.6, 2.0])
                blur = blurs[ksize_][sigma_]

                dnd = random.choice(['blur', 'noise', 'blur_and_noise'])
                if dnd == 'blur':
                    lr_input = blur(lr)
                elif dnd == 'noise':
                    lr_input = lr + torch.rand_like(
                        lr, device=lr.device) * noise_sigma
                else:
                    lr_input = blur(lr)
                    lr_input = lr_input + torch.rand_like(
                        lr, device=lr.device) * noise_sigma

                _, features = model(lr_input)
                dr = features[0]

                gmsd = GMSD(lr, dr)

                # training
                loss = criterion(lr, dr)
                loss_tot = loss
                optim.zero_grad()
                loss_tot.backward()
                optim.step()
                scheduler.step()

                # training history
                elapsed_time = time.time() - start_time
                elapsed = sec2time(elapsed_time)
                pfix['Step'] = f'{step+1}'
                pfix['Loss'] = f'{loss.item():.4f}'

                psnr, ssim, msssim = evaluate(lr, dr)

                psnrs.append(psnr)
                ssims.append(ssim)
                msssims.append(msssim)

                psnr_mean = np.array(psnrs).mean()
                ssim_mean = np.array(ssims).mean()
                msssim_mean = np.array(msssims).mean()

                pfix['PSNR'] = f'{psnr:.2f}'
                pfix['SSIM'] = f'{ssim:.4f}'
                # pfix['MSSSIM'] = f'{msssim:.4f}'
                pfix['PSNR_mean'] = f'{psnr_mean:.2f}'
                pfix['SSIM_mean'] = f'{ssim_mean:.4f}'
                # pfix['MSSSIM_mean'] = f'{msssim_mean:.4f}'

                free_gpu = get_gpu_memory()[0]

                pfix['free GPU'] = f'{free_gpu}MiB'
                pfix['Elapsed'] = f'{elapsed}'

                pbar.set_postfix(pfix)
                losses.append(loss.item())

                if step % save_image_every == 0:

                    imsave([lr_input[0], dr[0], lr[0], gmsd[0]],
                           f'{result_dir}/epoch_{epoch+1}_iter_{step:05d}.jpg')

                step += 1

            logger.add_scalar("Loss/train", np.array(losses).mean(), epoch + 1)
            logger.add_scalar("PSNR/train", psnr_mean, epoch + 1)
            logger.add_scalar("SSIM/train", ssim_mean, epoch + 1)

            if (epoch + 1) % save_model_every == 0:
                torch.save(model.state_dict(),
                           f'{weight_dir}/epoch_{epoch+1:04d}.pth')

            if (epoch + 1) % test_model_every == 0:

                with torch.no_grad():
                    with tqdm(
                            test_loader,
                            desc=f'{mode} || Test Epoch {epoch+1}/{num_epochs}',
                            position=0,
                            leave=True) as pbar_test:
                        psnrs = []
                        ssims = []
                        msssims = []
                        for lr, hr, fname in pbar_test:

                            fname = fname[0].split('/')[-1].split('.pt')[0]

                            lr = lr.to(device)
                            # hr = hr.to(device)

                            blur = blurs[7][2.0]
                            lr_input = blur(lr)
                            lr_input = lr_input + torch.rand_like(
                                lr, device=lr.device) * noise_sigma

                            _, features = model(lr_input)
                            dr = features[0]

                            mshf_lr = mshf(lr)
                            mshf_dr = mshf(dr)

                            gmsd = GMSD(lr, dr)

                            psnr, ssim, msssim = evaluate(lr, dr)

                            psnrs.append(psnr)
                            ssims.append(ssim)
                            msssims.append(msssim)

                            psnr_mean = np.array(psnrs).mean()
                            ssim_mean = np.array(ssims).mean()
                            msssim_mean = np.array(msssims).mean()

                            pfix_test['psnr'] = f'{psnr:.4f}'
                            pfix_test['ssim'] = f'{ssim:.4f}'
                            pfix_test['msssim'] = f'{msssim:.4f}'
                            pfix_test['psnr_mean'] = f'{psnr_mean:.4f}'
                            pfix_test['ssim_mean'] = f'{ssim_mean:.4f}'
                            pfix_test['msssim_mean'] = f'{msssim_mean:.4f}'

                            pbar_test.set_postfix(pfix_test)

                            imsave([lr_input[0], dr[0], lr[0], gmsd[0]],
                                   f'{result_dir}/{fname}.jpg')

                            mshf_vis = torch.cat(
                                (torch.cat([
                                    mshf_dr[:, i, :, :]
                                    for i in range(mshf_dr.shape[1])
                                ],
                                           dim=-1),
                                 torch.cat([
                                     mshf_lr[:, i, :, :]
                                     for i in range(mshf_lr.shape[1])
                                 ],
                                           dim=-1)),
                                dim=-2)

                            imsave(mshf_vis, f'{result_dir}/MSHF_{fname}.jpg')

                        hist['epoch'].append(epoch + 1)
                        hist['psnr'].append(psnr_mean)
                        hist['ssim'].append(ssim_mean)
                        hist['ms-ssim'].append(msssim_mean)

                        logger.add_scalar("PSNR/test", psnr_mean, epoch + 1)
                        logger.add_scalar("SSIM/test", ssim_mean, epoch + 1)
                        logger.add_scalar("MS-SSIM/test", msssim_mean,
                                          epoch + 1)

                        df = pd.DataFrame(hist)
                        df.to_csv(csv)
                final_boxes, final_labels, final_probs = final_boxes[good_ids], \
                    final_labels[good_ids], final_probs[good_ids]
                vis_boxes = np.asarray(
                    [[box[0], box[1], box[2] + box[0], box[3] + box[1]]
                     for box in final_boxes])
                vis_labels = [
                    "%s_%.2f" % (targetid2class[cat_id], prob)
                    for cat_id, prob in zip(final_labels, final_probs)
                ]
                newim = draw_boxes(im,
                                   vis_boxes,
                                   vis_labels,
                                   color=np.array([255, 0, 0]),
                                   font_scale=0.5,
                                   thickness=2)

                vis_file = os.path.join(vis_path, "%s.jpg" % (imgname))
                cv2.imwrite(vis_file, newim)

    if args.log_time_and_gpu:
        end_time = time.time()
        print(
            "total run time %s (%s), log gpu utilize every %s seconds and get "
            "median %.2f%% and average %.2f%%. GPU temperature median %.2f and "
            "average %.2f (C)" %
            (sec2time(end_time - start_time), end_time - start_time,
             gpu_log_interval, np.median(gpu_util_logs) * 100,
             np.mean(gpu_util_logs) * 100, np.median(gpu_temp_logs),
             np.mean(gpu_temp_logs)))
    cv2.destroyAllWindows()
Example #10
0
def train(model,
          train_loader,
          test_loader,
          mode='EDSR_Baseline',
          save_image_every=50,
          save_model_every=10,
          test_model_every=1,
          epoch_start=0,
          num_epochs=1000,
          device=None,
          refresh=True,
          scale=2,
          today=None):

    if device is None:
        device = 'cuda' if torch.cuda.is_available() else 'cpu'
    if today is None:
        today = datetime.datetime.now().strftime('%Y.%m.%d')

    result_dir = f'./results/{today}/{mode}'
    weight_dir = f'./weights/{today}/{mode}'
    logger_dir = f'./logger/{today}_{mode}'
    csv = f'./hist_{today}_{mode}.csv'
    if refresh:
        try:
            shutil.rmtree(result_dir)
            shutil.rmtree(weight_dir)
            shutil.rmtree(logger_dir)
        except FileNotFoundError:
            pass
    os.makedirs(result_dir, exist_ok=True)
    os.makedirs(weight_dir, exist_ok=True)
    os.makedirs(logger_dir, exist_ok=True)
    logger = SummaryWriter(log_dir=logger_dir, flush_secs=2)
    model = model.to(device)

    params = list(model.parameters())
    optim = torch.optim.Adam(params, lr=1e-4)
    scheduler = torch.optim.lr_scheduler.StepLR(optim,
                                                step_size=1000,
                                                gamma=0.99)
    criterion = torch.nn.L1Loss()
    GMSD = GMSD_quality().to(device)
    opening = Opening().to(device)
    blur = Blur().to(device)
    mshf = MSHF(3, 3).to(device)

    downx2_bicubic = nn.Upsample(scale_factor=1 / 2,
                                 mode='bicubic',
                                 align_corners=False)
    downx4_bicubic = nn.Upsample(scale_factor=1 / 4,
                                 mode='bicubic',
                                 align_corners=False)

    start_time = time.time()
    print(f'Training Start || Mode: {mode}')

    step = 0
    pfix = OrderedDict()
    pfix_test = OrderedDict()

    hist = dict()
    hist['mode'] = f'{today}_{mode}'
    for key in ['epoch', 'psnr', 'ssim', 'ms-ssim']:
        hist[key] = []

    soft_mask = False

    # hf_kernel = get_hf_kernel(mode='high')

    for epoch in range(epoch_start, epoch_start + num_epochs):

        if epoch == 0:
            torch.save(model.state_dict(),
                       f'{weight_dir}/epoch_{epoch+1:04d}.pth')

        if epoch == 0:
            with torch.no_grad():
                with tqdm(
                        test_loader,
                        desc=
                        f'{mode} || Warming Up || Test Epoch {epoch}/{num_epochs}',
                        position=0,
                        leave=True) as pbar_test:
                    psnrs = []
                    ssims = []
                    msssims = []
                    for lr, hr, fname in pbar_test:
                        lr = lr.to(device)
                        hr = hr.to(device)

                        sr, srx2, srx1 = model(lr)

                        sr = quantize(sr)

                        psnr, ssim, msssim = evaluate(hr, sr)

                        psnrs.append(psnr)
                        ssims.append(ssim)
                        msssims.append(msssim)

                        psnr_mean = np.array(psnrs).mean()
                        ssim_mean = np.array(ssims).mean()
                        msssim_mean = np.array(msssims).mean()

                        pfix_test['psnr_mean'] = f'{psnr_mean:.4f}'
                        pfix_test['ssim_mean'] = f'{ssim_mean:.4f}'
                        pfix_test['msssim_mean'] = f'{msssim_mean:.4f}'

                        pbar_test.set_postfix(pfix_test)
                        if len(psnrs) > 1: break

        with tqdm(train_loader,
                  desc=f'{mode} || Epoch {epoch+1}/{num_epochs}',
                  position=0,
                  leave=True) as pbar:
            psnrs = []
            ssims = []
            msssims = []
            losses = []
            for lr, hr, _ in pbar:
                lr = lr.to(device)
                hr = hr.to(device)

                hrx1 = downx4_bicubic(hr)
                hrx2 = downx2_bicubic(hr)

                # prediction
                sr, srx2, srx1 = model(lr)

                gmsd = GMSD(hr, sr)

                sr_ = quantize(sr)
                psnr, ssim, msssim = evaluate(hr, sr_)

                if psnr >= 40 - 2 * scale:
                    soft_mask = True
                else:
                    soft_mask = False

                if soft_mask:
                    # with torch.no_grad():
                    #     for _ in range(10): gmsd = opening(gmsd)
                    gmask = gmsd / gmsd.max()
                    gmask = (gmask > 0.2) * 1.0
                    gmask = blur(gmask)
                    gmask = (gmask - gmask.min()) / (gmask.max() -
                                                     gmask.min() + 1e-7)
                    gmask = (gmask + 0.25) / 1.25
                    gmask = gmask.detach()

                    gmaskx2 = downx2_bicubic(gmask)
                    gmaskx1 = downx4_bicubic(gmask)

                    # training
                    loss = criterion(sr * gmask, hr * gmask)
                    lossx2 = criterion(srx2 * gmaskx2, hrx2 * gmaskx2)
                    lossx1 = criterion(srx1 * gmaskx1, hrx1 * gmaskx1)
                else:
                    loss = criterion(sr, hr)
                    lossx2 = criterion(srx2, hrx2)
                    lossx1 = criterion(srx1, hrx1)

                # training
                loss_tot = loss + 0.25 * lossx2 + 0.125 * lossx1
                optim.zero_grad()
                loss_tot.backward()
                optim.step()
                scheduler.step()

                # training history
                elapsed_time = time.time() - start_time
                elapsed = sec2time(elapsed_time)
                pfix['Loss'] = f'{loss.item():.4f}'
                pfix['x2'] = f'{lossx2.item():.4f}'
                pfix['x1'] = f'{lossx1.item():.4f}'

                psnrs.append(psnr)
                ssims.append(ssim)
                msssims.append(msssim)

                psnr_mean = np.array(psnrs).mean()
                ssim_mean = np.array(ssims).mean()
                msssim_mean = np.array(msssims).mean()

                pfix['PSNR_mean'] = f'{psnr_mean:.2f}'
                pfix['SSIM_mean'] = f'{ssim_mean:.4f}'

                free_gpu = get_gpu_memory()[0]

                pfix['Elapsed'] = f'{elapsed}'
                pfix['free GPU'] = f'{free_gpu}MiB'

                pbar.set_postfix(pfix)
                losses.append(loss.item())

                if step % save_image_every == 0:

                    z = torch.zeros_like(lr[0])
                    _, _, llr, _ = lr.shape
                    _, _, hlr, _ = hr.shape
                    if hlr // 2 == llr:
                        xz = torch.cat((lr[0], z), dim=-2)
                    elif hlr // 4 == llr:
                        xz = torch.cat((lr[0], z, z, z), dim=-2)
                    imsave([xz, sr[0], hr[0], gmsd[0]],
                           f'{result_dir}/epoch_{epoch+1}_iter_{step:05d}.jpg')

                step += 1

            logger.add_scalar("Loss/train", np.array(losses).mean(), epoch + 1)
            logger.add_scalar("PSNR/train", psnr_mean, epoch + 1)
            logger.add_scalar("SSIM/train", ssim_mean, epoch + 1)

            if (epoch + 1) % save_model_every == 0:
                torch.save(model.state_dict(),
                           f'{weight_dir}/epoch_{epoch+1:04d}.pth')

            if (epoch + 1) % test_model_every == 0:

                with torch.no_grad():
                    with tqdm(
                            test_loader,
                            desc=f'{mode} || Test Epoch {epoch+1}/{num_epochs}',
                            position=0,
                            leave=True) as pbar_test:
                        psnrs = []
                        ssims = []
                        msssims = []
                        for lr, hr, fname in pbar_test:

                            fname = fname[0].split('/')[-1].split('.pt')[0]

                            lr = lr.to(device)
                            hr = hr.to(device)

                            sr, _, _ = model(lr)

                            mshf_hr = mshf(hr)
                            mshf_sr = mshf(sr)

                            gmsd = GMSD(hr, sr)

                            sr = quantize(sr)

                            psnr, ssim, msssim = evaluate(hr, sr)

                            psnrs.append(psnr)
                            ssims.append(ssim)
                            msssims.append(msssim)

                            psnr_mean = np.array(psnrs).mean()
                            ssim_mean = np.array(ssims).mean()
                            msssim_mean = np.array(msssims).mean()

                            pfix_test['psnr_mean'] = f'{psnr_mean:.4f}'
                            pfix_test['ssim_mean'] = f'{ssim_mean:.4f}'
                            pfix_test['msssim_mean'] = f'{msssim_mean:.4f}'

                            pbar_test.set_postfix(pfix_test)

                            z = torch.zeros_like(lr[0])
                            _, _, llr, _ = lr.shape
                            _, _, hlr, _ = hr.shape
                            if hlr // 2 == llr:
                                xz = torch.cat((lr[0], z), dim=-2)
                            elif hlr // 4 == llr:
                                xz = torch.cat((lr[0], z, z, z), dim=-2)
                            imsave([xz, sr[0], hr[0], gmsd[0]],
                                   f'{result_dir}/{fname}.jpg')

                            mshf_vis = torch.cat(
                                (torch.cat([
                                    mshf_sr[:, i, :, :]
                                    for i in range(mshf_sr.shape[1])
                                ],
                                           dim=-1),
                                 torch.cat([
                                     mshf_hr[:, i, :, :]
                                     for i in range(mshf_hr.shape[1])
                                 ],
                                           dim=-1)),
                                dim=-2)

                            imsave(mshf_vis, f'{result_dir}/MSHF_{fname}.jpg')

                        hist['epoch'].append(epoch + 1)
                        hist['psnr'].append(psnr_mean)
                        hist['ssim'].append(ssim_mean)
                        hist['ms-ssim'].append(msssim_mean)

                        logger.add_scalar("PSNR/test", psnr_mean, epoch + 1)
                        logger.add_scalar("SSIM/test", ssim_mean, epoch + 1)
                        logger.add_scalar("MS-SSIM/test", msssim_mean,
                                          epoch + 1)

                        df = pd.DataFrame(hist)
                        df.to_csv(csv)
    return model
Example #11
0
 def run(self):
     """ This function shows the frame and all visualizations. It is also responsable for synchronization of stimulus and computer time."""
             
     """ scheduler configuration """
     cpu_time = time.time()
     self.snippets[self.snippet_id].frame_count += 1
     #act_frame_element = s.enterabs(sequence.time_started+1/stimulus.fps/SPEED*sequence.frame_count, 1, showFrame, (s,stimulus,sequence,gazess,player))    
     
     """ load image """
     frameImg = self.stimulus.get_next_frame()
     if frameImg is None:
         return False
     
     """ adjusting stimulus, due to slow computers """
     if self.qscheduler.isActive():
         skipped_frames = self.stimulus.resync()
         self.snippets[self.snippet_id].skipped_frames += skipped_frames
     
     """ when snippet is over, stop scheduler through deleting next schedule element """
     if not self.snippets[self.snippet_id].contains(self.stimulus.act_pos):
         if self.snippet_id + 1 >= len(self.snippets):
             self.play_snippet(1)
         else:
             self.play_snippet(self.snippet_id + 1)
         if self.stop_after:
             self.qscheduler.stop()
         return
     
     """ bgr to gray convertion for using gray stimulus image """
     if self.movie_gray > 0:
         frameImg = cv2.cvtColor(frameImg,cv.CV_BGR2GRAY)
         frameImg = cv2.cvtColor(frameImg,cv.CV_GRAY2BGR)
     showPic = cv2.addWeighted(frameImg, self.movie_opacity, frameImg, 0, 0)     #TODO: find better implementation
     
     for ts in self.timeseries:
         ts.show(self.stimulus.act_pos)
     
     for an in self.annotations:
         an.show(int(self.stimulus.act_pos*self.stimulus.fps))
     
     
     heatMap = None
     if self.show_aperture > 0:
         heatMap = self.gazes.get_heat_map(self.stimulus, .5)
         aperture = cv2.resize(heatMap, (self.stimulus.width, self.stimulus.height)) / 255.
         ap_pic = showPic.copy()
         for i in range(3):
             ap_pic[:,:,i] = showPic[:,:,i] * aperture
         showPic = ap_pic
         
     if self.show_gazes_each > 0:
         ovImg = self.gazes.get_each(self.stimulus)
         showPic = cv2.addWeighted(showPic, 1, ovImg, self.show_gazes_each, 0)
     
     if self.show_gazes_clustered > 0:
         ovImg = self.gazes.get_clustered(self.stimulus, heatMap)
         showPic = cv2.addWeighted(showPic, 1, ovImg, self.show_gazes_clustered, 0)
     
     """ shows final stimulus visualized image """
     self.stim_win.show_img(showPic)
     self.control_win.update_texts(self)
     self.stim_win.pos.setText('%(start)s / %(end)s' %{'start':utils.sec2time(self.stimulus.act_pos), 'end': self.stimulus.end_pos_str})
     self.stim_win.debug.setText('skipped: %(fr)i' %{'fr':self.snippets[self.snippet_id].skipped_frames})
     cv2.waitKey(1)
Example #12
0
def train(model,
          train_loader,
          test_loader,
          mode='EDSR_Baseline',
          save_image_every=50,
          save_model_every=10,
          test_model_every=1,
          num_epochs=1000,
          device=None,
          refresh=True):

    if device is None:
        device = 'cuda' if torch.cuda.is_available() else 'cpu'

    today = datetime.datetime.now().strftime('%Y.%m.%d')

    result_dir = f'./results/{today}/{mode}'
    weight_dir = f'./weights/{today}/{mode}'
    logger_dir = f'./logger/{today}_{mode}'
    csv = f'./hist_{today}_{mode}.csv'
    if refresh:
        try:
            shutil.rmtree(result_dir)
            shutil.rmtree(weight_dir)
            shutil.rmtree(logger_dir)
        except FileNotFoundError:
            pass
    os.makedirs(result_dir, exist_ok=True)
    os.makedirs(weight_dir, exist_ok=True)
    os.makedirs(logger_dir, exist_ok=True)
    logger = SummaryWriter(log_dir=logger_dir, flush_secs=2)
    model = model.to(device)

    params = list(model.parameters())
    optim = torch.optim.Adam(params, lr=1e-4)
    scheduler = torch.optim.lr_scheduler.StepLR(optim,
                                                step_size=1000,
                                                gamma=0.99)
    criterion = torch.nn.L1Loss()

    ######

    ED = Edge().to(device)

    ######

    start_time = time.time()
    print(f'Training Start || Mode: {mode}')

    step = 0
    pfix = OrderedDict()
    pfix_test = OrderedDict()

    hist = dict()
    hist['mode'] = f'{today}_{mode}'
    for key in ['epoch', 'psnr', 'ssim', 'ms-ssim']:
        hist[key] = []

    for epoch in range(num_epochs):

        if epoch == 0:
            torch.save(model.state_dict(),
                       f'{weight_dir}/epoch_{epoch+1:04d}.pth')

        if epoch == 0:
            with torch.no_grad():
                with tqdm(
                        test_loader,
                        desc=
                        f'Mode: {mode} || Warming Up || Test Epoch {epoch}/{num_epochs}',
                        position=0,
                        leave=True) as pbar_test:
                    psnrs = []
                    ssims = []
                    msssims = []
                    for lr, hr, fname in pbar_test:
                        lr = lr.to(device)
                        hr = hr.to(device)

                        sr, _, features = model(lr)
                        sr = quantize(sr)

                        psnr, ssim, msssim = evaluate(hr, sr)

                        psnrs.append(psnr)
                        ssims.append(ssim)
                        msssims.append(msssim)

                        psnr_mean = np.array(psnrs).mean()
                        ssim_mean = np.array(ssims).mean()
                        msssim_mean = np.array(msssims).mean()

                        pfix_test['psnr'] = f'{psnr:.4f}'
                        pfix_test['ssim'] = f'{ssim:.4f}'
                        pfix_test['msssim'] = f'{msssim:.4f}'
                        pfix_test['psnr_mean'] = f'{psnr_mean:.4f}'
                        pfix_test['ssim_mean'] = f'{ssim_mean:.4f}'
                        pfix_test['msssim_mean'] = f'{msssim_mean:.4f}'

                        pbar_test.set_postfix(pfix_test)
                        if len(psnrs) > 1: break

        with tqdm(train_loader,
                  desc=f'Mode: {mode} || Epoch {epoch+1}/{num_epochs}',
                  position=0,
                  leave=True) as pbar:
            psnrs = []
            ssims = []
            msssims = []
            losses = []
            for lr, hr, _ in pbar:
                lr = lr.to(device)
                hr = hr.to(device)

                # prediction
                sr, lr_edge, features = model(lr)

                ####
                hr_edge = ED(hr)
                loss_edge = criterion(lr_edge, hr_edge)
                #####

                # training
                loss = criterion(hr, sr)
                loss_tot = loss + 0.1 * loss_edge
                optim.zero_grad()
                loss_tot.backward()
                optim.step()
                scheduler.step()

                # training history
                elapsed_time = time.time() - start_time
                elapsed = sec2time(elapsed_time)
                pfix['Step'] = f'{step+1}'
                pfix['Loss'] = f'{loss.item():.4f}'
                pfix['Loss Edge'] = f'{loss_edge.item():.4f}'

                sr = quantize(sr)
                psnr, ssim, msssim = evaluate(hr, sr)

                psnrs.append(psnr)
                ssims.append(ssim)
                msssims.append(msssim)

                psnr_mean = np.array(psnrs).mean()
                ssim_mean = np.array(ssims).mean()
                msssim_mean = np.array(msssims).mean()

                pfix['PSNR'] = f'{psnr:.2f}'
                pfix['SSIM'] = f'{ssim:.4f}'
                # pfix['MSSSIM'] = f'{msssim:.4f}'
                pfix['PSNR_mean'] = f'{psnr_mean:.2f}'
                pfix['SSIM_mean'] = f'{ssim_mean:.4f}'
                # pfix['MSSSIM_mean'] = f'{msssim_mean:.4f}'

                free_gpu = get_gpu_memory()[0]

                pfix['free GPU'] = f'{free_gpu}MiB'
                pfix['Elapsed'] = f'{elapsed}'

                pbar.set_postfix(pfix)
                losses.append(loss.item())

                if step % save_image_every == 0:

                    z = torch.zeros_like(lr[0])
                    _, _, llr, _ = lr.shape
                    _, _, hlr, _ = hr.shape
                    if hlr // 2 == llr:
                        xz = torch.cat((lr[0], z), dim=-2)
                    elif hlr // 4 == llr:
                        xz = torch.cat((lr[0], z, z, z), dim=-2)
                    imsave([xz, sr[0], hr[0]],
                           f'{result_dir}/epoch_{epoch+1}_iter_{step:05d}.jpg')

                step += 1

            logger.add_scalar("Loss/train", np.array(losses).mean(), epoch + 1)
            logger.add_scalar("PSNR/train", psnr_mean, epoch + 1)
            logger.add_scalar("SSIM/train", ssim_mean, epoch + 1)

            if (epoch + 1) % save_model_every == 0:
                torch.save(model.state_dict(),
                           f'{weight_dir}/epoch_{epoch+1:04d}.pth')

            if (epoch + 1) % test_model_every == 0:

                with torch.no_grad():
                    with tqdm(
                            test_loader,
                            desc=
                            f'Mode: {mode} || Test Epoch {epoch+1}/{num_epochs}',
                            position=0,
                            leave=True) as pbar_test:
                        psnrs = []
                        ssims = []
                        msssims = []
                        for lr, hr, fname in pbar_test:

                            fname = fname[0].split('/')[-1].split('.pt')[0]

                            lr = lr.to(device)
                            hr = hr.to(device)

                            sr, _, features = model(lr)
                            sr = quantize(sr)

                            psnr, ssim, msssim = evaluate(hr, sr)

                            psnrs.append(psnr)
                            ssims.append(ssim)
                            msssims.append(msssim)

                            psnr_mean = np.array(psnrs).mean()
                            ssim_mean = np.array(ssims).mean()
                            msssim_mean = np.array(msssims).mean()

                            pfix_test['psnr'] = f'{psnr:.4f}'
                            pfix_test['ssim'] = f'{ssim:.4f}'
                            pfix_test['msssim'] = f'{msssim:.4f}'
                            pfix_test['psnr_mean'] = f'{psnr_mean:.4f}'
                            pfix_test['ssim_mean'] = f'{ssim_mean:.4f}'
                            pfix_test['msssim_mean'] = f'{msssim_mean:.4f}'

                            pbar_test.set_postfix(pfix_test)

                            z = torch.zeros_like(lr[0])
                            _, _, llr, _ = lr.shape
                            _, _, hlr, _ = hr.shape
                            if hlr // 2 == llr:
                                xz = torch.cat((lr[0], z), dim=-2)
                            elif hlr // 4 == llr:
                                xz = torch.cat((lr[0], z, z, z), dim=-2)
                            imsave([xz, sr[0], hr[0]],
                                   f'{result_dir}/{fname}.jpg')

                        hist['epoch'].append(epoch + 1)
                        hist['psnr'].append(psnr_mean)
                        hist['ssim'].append(ssim_mean)
                        hist['ms-ssim'].append(msssim_mean)

                        logger.add_scalar("PSNR/test", psnr_mean, epoch + 1)
                        logger.add_scalar("SSIM/test", ssim_mean, epoch + 1)
                        logger.add_scalar("MS-SSIM/test", msssim_mean,
                                          epoch + 1)

                        df = pd.DataFrame(hist)
                        df.to_csv(csv)
Example #13
0
def train(config):
    self_summary_strs = []  # summary string to print out for later

    # first, read both data and filter stuff,  to get the word2vec idx,
    train_data = read_data(config, 'train', config.load)
    val_data = read_data(
        config, 'val', True
    )  # dev should always load model shared data(word2idx etc.) from train

    config_vars = vars(config)
    str_ = "threshold setting--\n" + "\t" + " ,".join(
        ["%s:%s" % (key, config_vars[key]) for key in config.thresmeta])
    print str_
    self_summary_strs.append(str_)

    # cap the numbers
    # max sentence word count etc.
    update_config(config, [train_data, val_data],
                  showMeta=True)  # all word num is <= max_thres

    str_ = "renewed ----\n" + "\t" + " ,".join(
        ["%s:%s" % (key, config_vars[key]) for key in config.maxmeta])
    print str_
    self_summary_strs.append(str_)

    # now we initialize the matrix for word embedding for word not in glove
    word2vec_dict = train_data.shared['word2vec']
    word2idx_dict = train_data.shared[
        'word2idx']  # this is the word not in word2vec

    # we are not fine tuning , so this should be empty
    idx2vec_dict = {
        word2idx_dict[word]: vec
        for word, vec in word2vec_dict.items() if word in word2idx_dict
    }

    # random initial embedding matrix for new words
    config.emb_mat = np.array([
        idx2vec_dict[idx]
        if idx2vec_dict.has_key(idx) else np.random.multivariate_normal(
            np.zeros(config.word_emb_size), np.eye(config.word_emb_size))
        for idx in xrange(config.word_vocab_size)
    ],
                              dtype="float32")

    model = get_model(config)  # construct model under gpu0

    trainer = Trainer(model, config)
    tester = Tester(model, config)
    saver = tf.train.Saver(max_to_keep=5)  # how many model to keep
    bestsaver = tf.train.Saver(max_to_keep=5)  # just for saving the best model

    save_period = config.save_period  # also the eval period

    # for debug, show the batch content
    if (config.showspecs):
        for batch in train_data.get_batches(2, num_steps=20):

            batchIdx, batchDs = batch

            print "showing a batch with batch_size=2"
            # show each data point
            print "keys:%s" % batchDs.data.keys()
            for key in sorted(batchDs.data.keys()):
                print "\t%s:%s" % (key, batchDs.data[key])

            # show some image feature
            photo_idx1 = batchDs.data['photo_idxs'][0][0][
                0]  # [bacth_num][album_num][photo_num]
            photo_id1 = batchDs.data['photo_ids'][0][0][0]
            photo_idx2 = batchDs.data['photo_idxs'][1][0][0]
            photo_id2 = batchDs.data['photo_ids'][1][0][0]

            print "pidx:%s,pid:%s,feature:\n %s (%s)\n,should be:\n %s (%s)" % (
                photo_idx1, photo_id1,
                batchDs.data['pidx2feat'][photo_idx1][:10],
                batchDs.data['pidx2feat'][photo_idx1].shape,
                train_data.shared['pid2feat'][photo_id1][:10],
                train_data.shared['pid2feat'][photo_id1].shape)

            print "pidx:%s,pid:%s,feature:\n %s (%s)\n,should be:\n %s (%s)" % (
                photo_idx2, photo_id2,
                batchDs.data['pidx2feat'][photo_idx2][:10],
                batchDs.data['pidx2feat'][photo_idx2].shape,
                train_data.shared['pid2feat'][photo_id2][:10],
                train_data.shared['pid2feat'][photo_id2].shape)

            # get the feed_dict to check
            #feed_dict = model.get_feed_dict(batchDs,is_train=True)

            feed_dict = model.get_feed_dict(batchDs, is_train=False)

            sys.exit()

    # start training!
    # allow_soft_placement :  tf will auto select other device if the tf.device(*) not available
    tfconfig = tf.ConfigProto(allow_soft_placement=True)
    tfconfig.gpu_options.allow_growth = True  # this way it will only allocate nessasary gpu, not take all
    with tf.Session(config=tfconfig) as sess:

        # calculate total parameters
        totalParam = cal_total_param()
        str_ = "total parameters: %s" % (totalParam)
        print str_
        self_summary_strs.append(str_)

        initialize(load=config.load,
                   load_best=config.load_best,
                   model=model,
                   config=config,
                   sess=sess)

        # the total step (iteration) the model will run
        last_time = time.time()
        # total / batchSize  * epoch
        num_steps = int(
            math.ceil(train_data.num_examples /
                      float(config.batch_size))) * config.num_epochs
        # get_batches is a generator, run on the fly
        # there will be num_steps batch
        str_ = " batch_size:%s, epoch:%s,total step:%s,eval/save every %s steps" % (
            config.batch_size, config.num_epochs, num_steps,
            config.save_period)
        print str_
        self_summary_strs.append(str_)

        best = {
            "acc": 0.0,
            "step": -1
        }  # remember the best eval acc during training

        finalAcc = None
        isStart = True

        for batch in tqdm(train_data.get_batches(config.batch_size,
                                                 num_steps=num_steps),
                          total=num_steps):
            # each batch has (batch_idxs,Dataset(batch_data, full_shared))
            # batch_data has {"q":,"y":..."pidx2feat",.."photo_idxs"..}

            global_step = sess.run(model.global_step) + 1  # start from 0

            # if load from existing model, save if first
            if config.load and isStart:
                tqdm.write("saving original model...")
                tqdm.write("\tsaving model...")
                saver.save(sess,
                           config.save_dir_model,
                           global_step=global_step)
                tqdm.write("\tdone")
                isStart = False

                id2predanswers = {}
                id2realanswers = {}
                for evalbatch in val_data.get_batches(
                        config.batch_size,
                        num_steps=config.val_num_batches,
                        shuffle=False,
                        cap=True):
                    yp = tester.step(
                        sess, evalbatch
                    )  # [N,4] # id2realanswersprob for each answer
                    pred, gt = getAnswers(
                        yp, evalbatch)  # from here we get the qid:yindx,
                    id2predanswers.update(pred)
                    id2realanswers.update(gt)
                evalAcc = getEvalScore(id2predanswers, id2realanswers)

                tqdm.write(
                    "\teval on validation %s batches Acc:%s, (best:%s at step %s) "
                    % (config.val_num_batches, evalAcc, best['acc'],
                       best['step']))
                # remember the best acc
                if (evalAcc > best['acc']):
                    best['acc'] = evalAcc
                    best['step'] = global_step
                    # save the best model
                    tqdm.write("\t saving best model...")
                    bestsaver.save(sess,
                                   config.save_dir_best_model,
                                   global_step=global_step)
                    tqdm.write("\t done.")

                finalAcc = evalAcc

            loss, summary, train_op = trainer.step(sess,
                                                   batch,
                                                   get_summary=False)

            if global_step % save_period == 0:  # time to save model

                duration = time.time() - last_time  # in seconds
                sec_per_step = duration / float(save_period)
                last_time = time.time()
                #use tqdm to print
                tqdm.write(
                    "step:%s/%s (epoch:%.3f), took %s, loss:%s, estimate remaining:%s"
                    % (global_step, num_steps,
                       (config.num_epochs * global_step / float(num_steps)),
                       sec2time(duration), loss,
                       sec2time((num_steps - global_step) * sec_per_step)))
                tqdm.write("\tsaving model...")
                saver.save(sess,
                           config.save_dir_model,
                           global_step=global_step)
                tqdm.write("\tdone")

                id2predanswers = {}
                id2realanswers = {}
                for evalbatch in val_data.get_batches(
                        config.batch_size,
                        num_steps=config.val_num_batches,
                        shuffle=False,
                        cap=True):
                    yp = tester.step(
                        sess, evalbatch
                    )  # [N,4] # id2realanswersprob for each answer
                    pred, gt = getAnswers(
                        yp, evalbatch)  # from here we get the qid:yindx,
                    id2predanswers.update(pred)
                    id2realanswers.update(gt)
                evalAcc = getEvalScore(id2predanswers, id2realanswers)

                tqdm.write(
                    "\teval on validation %s batches Acc:%s, (best:%s at step %s) "
                    % (config.val_num_batches, evalAcc, best['acc'],
                       best['step']))
                # remember the best acc
                if (evalAcc > best['acc']):
                    best['acc'] = evalAcc
                    best['step'] = global_step
                    # save the best model
                    tqdm.write("\t saving best model...")
                    bestsaver.save(sess,
                                   config.save_dir_best_model,
                                   global_step=global_step)
                    tqdm.write("\t done.")

                finalAcc = evalAcc

        if global_step % save_period != 0:  # time to save model
            saver.save(sess, config.save_dir_model, global_step=global_step)
        str_ = "best eval on val Accurucy: %s at %s step, final step %s Acc is %s" % (
            best['acc'], best['step'], global_step, finalAcc)
        print str_
        self_summary_strs.append(str_)
        if config.write_self_sum:
            f = open(config.self_summary_path, "w")
            f.writelines("%s" % ("\n".join(self_summary_strs)))
            f.close()
Example #14
0
                        video_writer.write(frame)
                    video_writer.release()
                    cv2.destroyAllWindows()

                    del cube_track_datas[gid]
                    del frame_data
                    gc.collect()
    print("Total [ori_track/global]: Person %s, Vehicle %s" %
          (total_person_num, total_vehicle_num))
    for cat_name in ["Person", "Vehicle"]:
        g_lengths_sorted = sorted(global_track_lengths[cat_name],
                                  key=lambda x: x[0])
        print(
            "\t %s: %s global track total length %s. shortest %.1f, longest %.1f (g%s), median %.1f"
            % (cat_name, len(global_track_lengths[cat_name]),
               sec2time(sum([o[0] for o in global_track_lengths[cat_name]
                             ])), g_lengths_sorted[0][0],
               g_lengths_sorted[-1][0], g_lengths_sorted[-1][1],
               np.median([o[0] for o in global_track_lengths[cat_name]])))
        print("\tlongest track gid %s, from %s: %s" %
              (longest_track[cat_name][1][0], longest_track[cat_name][1][1],
               longest_track[cat_name][2]))
        g_cams_sorted = sorted(global_track_cams[cat_name], key=lambda x: x[0])
        print(
            "\t %s: %s global track total local tracks %s. least cams %d, most %d (g%s), median %.1f"
            % (cat_name, len(global_track_cams[cat_name]),
               sum([o[0] for o in global_track_cams[cat_name]]),
               g_cams_sorted[0][0], g_cams_sorted[-1][0], g_cams_sorted[-1][1],
               np.median([o[0] for o in global_track_cams[cat_name]])))
        print("\tmost_camera track gid %s, length %.1f, from %s: %s" %
              (most_camera_track[cat_name][1][0],
               most_camera_track[cat_name][1][2],
Example #15
0
        for lr, hr, _ in pbar:

            # prediction
            pred = model(lr)

            # training
            loss = criterion(hr, pred)
            optim.zero_grad()
            loss.backward()
            optim.step()
            scheduler.step()

            # training history
            free_gpu = get_gpu_memory()[0]
            elapsed_time = time.time() - start_time
            elapsed = sec2time(elapsed_time)
            pfix['Step'] = f'{step+1}'
            pfix['Loss'] = f'{loss.item():.4f}'
            pfix['free GPU'] = f'{free_gpu}MiB'
            pfix['Elapsed'] = f'{elapsed}'
            hist['Iter'].append(step)
            hist['Loss'].append(loss.item())
            pbar.set_postfix(pfix)

            if step % save_image_every == 0:

                z = torch.zeros_like(lr[0])
                xz = torch.cat((lr[0], z), dim=-2)
                img = torch.cat((xz, pred[0], hr[0]), dim=-1)
                img = torch.clamp(img, 0, 1).cpu()
                img = t2img(img)
Example #16
0
def train(config):
    self_summary_strs = []  # summary string to print out for later

    # first, read both data and filter stuff,  to get the word2vec idx,
    train_data = read_data(config, 'train', config.load)
    val_data = read_data(
        config, 'val', True
    )  # dev should always load model shared data(word2idx etc.) from train

    # now that the dataset is loaded , we get the max_word_size from the dataset
    # then adjust the max based on the threshold as well
    # also get the vocab size
    config_vars = vars(config)
    str_ = "threshold setting--\n" + "\t" + " ,".join(
        ["%s:%s" % (key, config_vars[key]) for key in config.thresmeta])
    print str_
    self_summary_strs.append(str_)

    # cap the numbers
    # max sentence word count etc.
    update_config(config, [train_data, val_data],
                  showMeta=True)  # all word num is <= max_thres

    str_ = "renewed ----\n" + "\t" + " ,".join(
        ["%s:%s" % (key, config_vars[key]) for key in config.maxmeta])
    print str_
    self_summary_strs.append(str_)

    # now we initialize the matrix for word embedding for word not in glove
    word2vec_dict = train_data.shared['word2vec']
    word2idx_dict = train_data.shared[
        'word2idx']  # this is the word not in word2vec

    # we are not fine tuning , so this should be empty
    idx2vec_dict = {
        word2idx_dict[word]: vec
        for word, vec in word2vec_dict.items() if word in word2idx_dict
    }
    #print len(idx2vec_dict) # 0

    # config.word_vocab_size = len(train_data.shared['word2idx']) # the word not in word2vec
    # so the emb_mat should all be a random vector
    # np.random.multivariate_normal gets mean of zero and co of 1 for each dim, like
    #>>> np.random.multivariate_normal(np.zeros(5),np.eye(5))
    #array([-0.73663652, -1.16417783, -0.74083293, -0.80016731,  0.060182  ])

    # random initial embedding matrix for new words
    config.emb_mat = np.array([
        idx2vec_dict[idx]
        if idx2vec_dict.has_key(idx) else np.random.multivariate_normal(
            np.zeros(config.word_emb_size), np.eye(config.word_emb_size))
        for idx in xrange(config.word_vocab_size)
    ],
                              dtype="float32")

    model = get_model(config)  # construct model under gpu0

    trainer = Trainer(model, config)
    tester = Tester(model, config)
    saver = tf.train.Saver(max_to_keep=5)  # how many model to keep
    bestsaver = tf.train.Saver(max_to_keep=5)  # just for saving the best model

    save_period = config.save_period  # also the eval period

    # start training!
    # allow_soft_placement :  tf will auto select other device if the tf.device(*) not available
    tfconfig = tf.ConfigProto(allow_soft_placement=True)
    tfconfig.gpu_options.allow_growth = True  # this way it will only allocate nessasary gpu, not take all
    # or you can set hard limit
    #tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.4
    with tf.Session(config=tfconfig) as sess:

        # calculate total parameters
        totalParam = cal_total_param()
        str_ = "total parameters: %s" % (totalParam)
        print str_
        self_summary_strs.append(str_)

        initialize(load=config.load,
                   load_best=config.load_best,
                   model=model,
                   config=config,
                   sess=sess)

        # the total step (iteration) the model will run
        last_time = time.time()
        # total / batchSize  * epoch
        num_steps = int(
            math.ceil(train_data.num_examples /
                      float(config.batch_size))) * config.num_epochs
        # get_batches is a generator, run on the fly
        # there will be num_steps batch
        str_ = " batch_size:%s, epoch:%s,total step:%s,eval/save every %s steps" % (
            config.batch_size, config.num_epochs, num_steps,
            config.save_period)
        print str_
        self_summary_strs.append(str_)

        best = {
            "acc": 0.0,
            "step": -1
        }  # remember the best eval acc during training

        finalAcc = None
        isStart = True

        for batch in tqdm(train_data.get_batches(config.batch_size,
                                                 num_steps=num_steps),
                          total=num_steps):
            # each batch has (batch_idxs,Dataset(batch_data, full_shared))
            # batch_data has {"q":,"y":..."pidx2feat",.."photo_idxs"..}

            global_step = sess.run(model.global_step) + 1  # start from 0

            # if load from existing model, save if first
            if config.load and isStart:
                tqdm.write("saving original model...")
                tqdm.write("\tsaving model...")
                saver.save(sess,
                           config.save_dir_model,
                           global_step=global_step)
                tqdm.write("\tdone")
                isStart = False

                id2predanswers = {}
                id2realanswers = {}
                for evalbatch in val_data.get_batches(
                        config.batch_size,
                        num_steps=config.val_num_batches,
                        shuffle=False,
                        cap=True):
                    yp = tester.step(
                        sess, evalbatch
                    )  # [N,4] # id2realanswersprob for each answer
                    pred, gt = getAnswers(
                        yp, evalbatch)  # from here we get the qid:yindx,
                    id2predanswers.update(pred)
                    id2realanswers.update(gt)
                evalAcc = getEvalScore(id2predanswers, id2realanswers)

                tqdm.write(
                    "\teval on validation %s batches Acc:%s, (best:%s at step %s) "
                    % (config.val_num_batches, evalAcc, best['acc'],
                       best['step']))
                # remember the best acc
                if (evalAcc > best['acc']):
                    best['acc'] = evalAcc
                    best['step'] = global_step
                    # save the best model
                    tqdm.write("\t saving best model...")
                    bestsaver.save(sess,
                                   config.save_dir_best_model,
                                   global_step=global_step)
                    tqdm.write("\t done.")

                finalAcc = evalAcc

            loss, train_op = trainer.step(sess, batch)

            if global_step % save_period == 0:  # time to save model

                duration = time.time() - last_time  # in seconds
                sec_per_step = duration / float(save_period)
                last_time = time.time()
                #use tqdm to print
                tqdm.write(
                    "step:%s/%s (epoch:%.3f), took %s, loss:%s, estimate remaining:%s"
                    % (global_step, num_steps,
                       (config.num_epochs * global_step / float(num_steps)),
                       sec2time(duration), loss,
                       sec2time((num_steps - global_step) * sec_per_step)))
                tqdm.write("\tsaving model...")
                saver.save(sess,
                           config.save_dir_model,
                           global_step=global_step)
                tqdm.write("\tdone")

                id2predanswers = {}
                id2realanswers = {}
                for evalbatch in val_data.get_batches(
                        config.batch_size,
                        num_steps=config.val_num_batches,
                        shuffle=False,
                        cap=True):
                    yp = tester.step(
                        sess, evalbatch
                    )  # [N,4] # id2realanswersprob for each answer
                    pred, gt = getAnswers(
                        yp, evalbatch)  # from here we get the qid:yindx,
                    id2predanswers.update(pred)
                    id2realanswers.update(gt)
                evalAcc = getEvalScore(id2predanswers, id2realanswers)

                tqdm.write(
                    "\teval on validation %s batches Acc:%s, (best:%s at step %s) "
                    % (config.val_num_batches, evalAcc, best['acc'],
                       best['step']))
                # remember the best acc
                if (evalAcc > best['acc']):
                    best['acc'] = evalAcc
                    best['step'] = global_step
                    # save the best model
                    tqdm.write("\t saving best model...")
                    bestsaver.save(sess,
                                   config.save_dir_best_model,
                                   global_step=global_step)
                    tqdm.write("\t done.")

                finalAcc = evalAcc

        if global_step % save_period != 0:  # time to save model
            saver.save(sess, config.save_dir_model, global_step=global_step)
        str_ = "best eval on val Accurucy: %s at %s step, final step %s Acc is %s" % (
            best['acc'], best['step'], global_step, finalAcc)
        print str_
        self_summary_strs.append(str_)
        if config.write_self_sum:
            f = open(config.self_summary_path, "w")
            f.writelines("%s" % ("\n".join(self_summary_strs)))
            f.close()
def train(model, model_sr, train_loader, test_loader, mode='EDSR_Baseline', save_image_every=50, save_model_every=10, test_model_every=1, epoch_start=0, num_epochs=1000, device=None, refresh=True):

    if device is None:
        device = 'cuda' if torch.cuda.is_available() else 'cpu'

    today = datetime.datetime.now().strftime('%Y.%m.%d')
    
    result_dir = f'./results/{today}/{mode}'
    weight_dir = f'./weights/{today}/{mode}'
    logger_dir = f'./logger/{today}_{mode}'
    csv = f'./hist_{today}_{mode}.csv'
    if refresh:
        try:
            shutil.rmtree(result_dir)
            shutil.rmtree(weight_dir)
            shutil.rmtree(logger_dir)
        except FileNotFoundError:
            pass
    os.makedirs(result_dir, exist_ok=True)
    os.makedirs(weight_dir, exist_ok=True)
    os.makedirs(logger_dir, exist_ok=True)
    logger = SummaryWriter(log_dir=logger_dir, flush_secs=2)
    model = model.to(device)
    model_sr = model_sr.to(device)

    params = list(model.parameters())
    optim = torch.optim.Adam(params, lr=1e-4)
    scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=1000, gamma= 0.99)
    criterion = torch.nn.L1Loss()
    GMSD = GMSD_quality().to(device)
    mshf = MSHF(3, 3).to(device)

    start_time = time.time()
    print(f'Training Start || Mode: {mode}')

    step = 0
    pfix = OrderedDict()
    pfix_test = OrderedDict()

    hist = dict()
    hist['mode'] = f'{today}_{mode}'
    for key in ['epoch', 'psnr', 'ssim', 'ms-ssim']:
        hist[key] = []

    for epoch in range(epoch_start, epoch_start+num_epochs):

        if epoch == 0:
            torch.save(model.state_dict(), f'{weight_dir}/epoch_{epoch+1:04d}.pth')
            
        if epoch == 0:
            with torch.no_grad():
                with tqdm(test_loader, desc=f'{mode} || Warming Up || Test Epoch {epoch}/{num_epochs}', position=0, leave=True) as pbar_test:
                    psnrs = []
                    ssims = []
                    msssims = []
                    for lr, hr, fname in pbar_test:
                        lr = lr.to(device)
                        hr = hr.to(device)
                                                
                        sr, deep = model_sr(lr)
                        
                        fake = model(sr)
                        
                        sr = quantize(sr)
                        
                        psnr, ssim, msssim = evaluate(hr, sr)
                        
                        psnrs.append(psnr)
                        ssims.append(ssim)
                        msssims.append(msssim)
                        
                        psnr_mean = np.array(psnrs).mean()
                        ssim_mean = np.array(ssims).mean()
                        msssim_mean = np.array(msssims).mean()

                        pfix_test['psnr'] = f'{psnr:.4f}'
                        pfix_test['ssim'] = f'{ssim:.4f}'
                        pfix_test['msssim'] = f'{msssim:.4f}'
                        pfix_test['psnr_mean'] = f'{psnr_mean:.4f}'
                        pfix_test['ssim_mean'] = f'{ssim_mean:.4f}'
                        pfix_test['msssim_mean'] = f'{msssim_mean:.4f}'

                        pbar_test.set_postfix(pfix_test)
                        if len(psnrs) > 1: break
                        

        with tqdm(train_loader, desc=f'{mode} || Epoch {epoch+1}/{num_epochs}', position=0, leave=True) as pbar:
            psnrs = []
            ssims = []
            msssims = []
            losses = []
            for lr, hr, _ in pbar:
                lr = lr.to(device)
                hr = hr.to(device)
                                
                # prediction
                sr, deep = model_sr(lr)
                
                fake = model(sr)
                loss_fake = criterion(fake, torch.zeros_like(fake, device=fake.device))
                
                real = model(hr)
                loss_real = criterion(real, torch.ones_like(real, device=real.device))
                
                # training
                loss_tot = loss_fake + loss_real
                optim.zero_grad()
                loss_tot.backward()
                optim.step()
                scheduler.step()
                
                # training history 
                elapsed_time = time.time() - start_time
                elapsed = sec2time(elapsed_time)            
                pfix['Step'] = f'{step+1}'
                pfix['Loss real'] = f'{loss_real.item():.4f}'
                pfix['Loss fake'] = f'{loss_fake.item():.4f}'
                
                free_gpu = get_gpu_memory()[0]
                
                pbar.set_postfix(pfix)
                step += 1
                
            if (epoch+1) % save_model_every == 0:
                torch.save(model.state_dict(), f'{weight_dir}/epoch_{epoch+1:04d}.pth')
                
            if (epoch+1) % test_model_every == 0:
                
                with torch.no_grad():
                    with tqdm(test_loader, desc=f'{mode} || Test Epoch {epoch+1}/{num_epochs}', position=0, leave=True) as pbar_test:
                        psnrs = []
                        ssims = []
                        msssims = []
                        for lr, hr, fname in pbar_test:
                                        
                            lr = lr.to(device)
                            hr = hr.to(device)
                                            
                            # prediction
                            sr, deep = model_sr(lr)
                            
                            fake = model(sr)
                            loss_fake = criterion(fake, torch.zeros_like(fake, device=fake.device))
                            
                            real = model(hr)
                            loss_real = criterion(real, torch.ones_like(real, device=real.device))
                            
                            # training history 
                            elapsed_time = time.time() - start_time
                            elapsed = sec2time(elapsed_time)            
                            pfix_test['Step'] = f'{step+1}'
                            pfix_test['Loss real'] = f'{loss_real.item():.4f}'
                            pfix_test['Loss fake'] = f'{loss_fake.item():.4f}'
                            
                            pbar_test.set_postfix(pfix_test)
def train(model, train_loader, test_loader, mode='EDSR_Baseline', save_image_every=50, save_model_every=10, test_model_every=1, epoch_start=0, num_epochs=1000, device=None, refresh=True, scale=2, today=None):

    if device is None:
        device = 'cuda' if torch.cuda.is_available() else 'cpu'
    if today is None:
        today = datetime.datetime.now().strftime('%Y.%m.%d')
    
    result_dir = f'./results/{today}/{mode}'
    weight_dir = f'./weights/{today}/{mode}'
    logger_dir = f'./logger/{today}_{mode}'
    csv = f'./hist_{today}_{mode}.csv'
    if refresh:
        try:
            shutil.rmtree(result_dir)
            shutil.rmtree(weight_dir)
            shutil.rmtree(logger_dir)
        except FileNotFoundError:
            pass
    os.makedirs(result_dir, exist_ok=True)
    os.makedirs(weight_dir, exist_ok=True)
    os.makedirs(logger_dir, exist_ok=True)
    logger = SummaryWriter(log_dir=logger_dir, flush_secs=2)
    model = model.to(device)

    params = list(model.parameters())
    optim = torch.optim.Adam(params, lr=1e-4)
    scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=1000, gamma= 0.99)
    criterion = torch.nn.L1Loss()
    
    start_time = time.time()
    print(f'Training Start || Mode: {mode}')

    step = 0
    pfix = OrderedDict()
    pfix_test = OrderedDict()

    hist = dict()
    hist['mode'] = f'{today}_{mode}'
    for key in ['epoch', 'psnr', 'ssim', 'ms-ssim']:
        hist[key] = []

    soft_mask = False
    
    sigmas = [10/255, 30/255, 50/255]
    
    # hf_kernel = get_hf_kernel(mode='high')

    for epoch in range(epoch_start, epoch_start+num_epochs):
        sigma = 0.0004 * (epoch+1)
        if epoch == 0:
            torch.save(model.state_dict(), f'{weight_dir}/epoch_{epoch+1:04d}.pth')
            
        if epoch == 0:
            with torch.no_grad():
                with tqdm(test_loader, desc=f'{mode} || Warming Up || Test Epoch {epoch}/{num_epochs}', position=0, leave=True) as pbar_test:
                    psnrs = []
                    ssims = []
                    msssims = []
                    for lr, hr, fname in pbar_test:
                        lr = lr.to(device)                        
                        hr = hr.to(device)
                        
                        sigma = np.random.choice(sigmas)
                        hr_input = hr + torch.randn_like(hr, device=hr.device)*sigma
                        hr_input = torch.clamp(hr_input, 0, 1)
                        
                        sr = model(hr_input)
                        
                        sr = quantize(sr)
                        
                        psnr, ssim, msssim = evaluate(hr, sr)
                        
                        psnrs.append(psnr)
                        ssims.append(ssim)
                        msssims.append(msssim)
                        
                        psnr_mean = np.array(psnrs).mean()
                        ssim_mean = np.array(ssims).mean()
                        msssim_mean = np.array(msssims).mean()

                        pfix_test['psnr_mean'] = f'{psnr_mean:.4f}'
                        pfix_test['ssim_mean'] = f'{ssim_mean:.4f}'
                        pfix_test['msssim_mean'] = f'{msssim_mean:.4f}'

                        pbar_test.set_postfix(pfix_test)
                        if len(psnrs) > 1: break
                        

        with tqdm(train_loader, desc=f'{mode} || Epoch {epoch+1}/{num_epochs}', position=0, leave=True) as pbar:
            psnrs = []
            ssims = []
            msssims = []
            losses = []
            for lr, hr, _ in pbar:
                lr = lr.to(device)
                hr = hr.to(device)

                sigma = np.random.choice(sigmas)
                hr_input = hr + torch.randn_like(hr, device=hr.device)*sigma
                hr_input = torch.clamp(hr_input, 0, 1)
                
                sr = model(hr_input)
                
                sr_ = quantize(sr)      
                psnr, ssim, msssim = evaluate(hr, sr_)
                
                loss = criterion(sr, hr)
                # training
                loss_tot = loss
                optim.zero_grad()
                loss_tot.backward()
                optim.step()
                scheduler.step()
                
                # training history 
                elapsed_time = time.time() - start_time
                elapsed = sec2time(elapsed_time)            
                pfix['Step'] = f'{step+1}'
                pfix['Loss'] = f'{loss.item():.4f}'
                
                psnrs.append(psnr)
                ssims.append(ssim)
                msssims.append(msssim)

                psnr_mean = np.array(psnrs).mean()
                ssim_mean = np.array(ssims).mean()
                msssim_mean = np.array(msssims).mean()

                pfix['PSNR_mean'] = f'{psnr_mean:.2f}'
                pfix['SSIM_mean'] = f'{ssim_mean:.4f}'
                           
                free_gpu = get_gpu_memory()[0]
                
                pfix['free GPU'] = f'{free_gpu}MiB'
                pfix['Elapsed'] = f'{elapsed}'
                
                pbar.set_postfix(pfix)
                losses.append(loss.item())
                
                step += 1
                
            logger.add_scalar("Loss/train", np.array(losses).mean(), epoch+1)
            logger.add_scalar("PSNR/train", psnr_mean, epoch+1)
            logger.add_scalar("SSIM/train", ssim_mean, epoch+1)
            
            if (epoch+1) % save_model_every == 0:
                torch.save(model.state_dict(), f'{weight_dir}/epoch_{epoch+1:04d}.pth')
                
            if (epoch+1) % test_model_every == 0:
                
                with torch.no_grad():
                    with tqdm(test_loader, desc=f'{mode} || Test Epoch {epoch+1}/{num_epochs}', position=0, leave=True) as pbar_test:
                        psnrs = []
                        ssims = []
                        msssims = []
                        for lr, hr, fname in pbar_test:
                            
                            fname = fname[0].split('/')[-1].split('.pt')[0]
                            
                            # lr = lr.to(device)
                            hr = hr.to(device)
                            
                            sigma = np.random.choice(sigmas)
                            hr_input = hr + torch.randn_like(hr, device=hr.device)*sigma
                            hr_input = torch.clamp(hr_input, 0, 1)
                            
                            sr = model(hr_input)
                        
                            sr = quantize(sr)

                            psnr, ssim, msssim = evaluate(hr, sr)

                            psnrs.append(psnr)
                            ssims.append(ssim)
                            msssims.append(msssim)

                            psnr_mean = np.array(psnrs).mean()
                            ssim_mean = np.array(ssims).mean()
                            msssim_mean = np.array(msssims).mean()

                            pfix_test['psnr_mean'] = f'{psnr_mean:.4f}'
                            pfix_test['ssim_mean'] = f'{ssim_mean:.4f}'
                            pfix_test['msssim_mean'] = f'{msssim_mean:.4f}'
                            
                            pbar_test.set_postfix(pfix_test)
                            
                            
                            imsave([hr_input[0], sr[0], hr[0]], f'{result_dir}/{fname}.jpg')
                            
                        hist['epoch'].append(epoch+1)
                        hist['psnr'].append(psnr_mean)
                        hist['ssim'].append(ssim_mean)
                        hist['ms-ssim'].append(msssim_mean)
                        
                        logger.add_scalar("PSNR/test", psnr_mean, epoch+1)
                        logger.add_scalar("SSIM/test", ssim_mean, epoch+1)
                        logger.add_scalar("MS-SSIM/test", msssim_mean, epoch+1)
                        
                        df = pd.DataFrame(hist)
                        df.to_csv(csv)