Пример #1
0
def ShowImage(tensor1,tensor2,tensor3,showiter,max_num,writer,tag):
    show_imgs = []
    for i in range(max_num):
        show_imgs += [  data.tensor2im(tensor1,rgb2bgr = False,batch_index=i),
                        data.tensor2im(tensor2,rgb2bgr = False,batch_index=i),
                        data.tensor2im(tensor3,rgb2bgr = False,batch_index=i)]
    show_img = impro.splice(show_imgs,  (opt.showresult_num,3))
    writer.add_image(tag, show_img,showiter,dataformats='HWC')
Пример #2
0
def ImageQualityEvaluation(tensor1,tensor2,showiter,writer,tag):
    batch_len = len(tensor1)
    psnr,ssmi = 0,0
    for i in range(len(tensor1)):
        img1,img2 = data.tensor2im(tensor1,rgb2bgr=False,batch_index=i), data.tensor2im(tensor2,rgb2bgr=False,batch_index=i)
        psnr += impro.psnr(img1,img2)
        ssmi += structural_similarity(img1,img2,multichannel=True)
    writer.add_scalars('quality/psnr', {tag:psnr/batch_len}, showiter)
    writer.add_scalars('quality/ssmi', {tag:ssmi/batch_len}, showiter)
    return psnr/batch_len,ssmi/batch_len
Пример #3
0
def run_styletransfer(opt, net, img):

    if opt.output_size != 0:
        if 'resize' in opt.preprocess and 'resize_scale_width' not in opt.preprocess:
            img = impro.resize(img, opt.output_size)
        elif 'resize_scale_width' in opt.preprocess:
            img = cv2.resize(img, (opt.output_size, opt.output_size))
        img = img[0:4 * int(img.shape[0] / 4), 0:4 * int(img.shape[1] / 4), :]

    if 'edges' in opt.preprocess:
        if opt.canny > 100:
            canny_low = opt.canny - 50
            canny_high = np.clip(opt.canny + 50, 0, 255)
        elif opt.canny < 50:
            canny_low = np.clip(opt.canny - 25, 0, 255)
            canny_high = opt.canny + 25
        else:
            canny_low = opt.canny - int(opt.canny / 2)
            canny_high = opt.canny + int(opt.canny / 2)
        img = cv2.Canny(img, opt.canny - 50, opt.canny + 50)
        if opt.only_edges:
            return img
        img = data.im2tensor(img,
                             use_gpu=opt.use_gpu,
                             gray=True,
                             use_transform=False,
                             is0_1=False)
    else:
        img = data.im2tensor(img, use_gpu=opt.use_gpu)
    img = net(img)
    img = data.tensor2im(img)
    return img
Пример #4
0
def run_pix2pix(img, net, opt):
    if opt.netG == 'HD':
        img = impro.resize(img, 512)
    else:
        img = impro.resize(img, 128)
    img = data.im2tensor(img, use_gpu=opt.use_gpu)
    img_fake = net(img)
    img_fake = data.tensor2im(img_fake)
    return img_fake
Пример #5
0
def run_segment(img, net, size=360, use_gpu=0):
    img = impro.resize(img, size)
    img = data.im2tensor(img,
                         use_gpu=use_gpu,
                         bgr2rgb=False,
                         use_transform=False,
                         is0_1=True)
    mask = net(img)
    mask = data.tensor2im(mask, gray=True, rgb2bgr=False, is0_1=True)
    return mask
Пример #6
0
def cleanmosaic_video_fusion(opt,netG,netM):
    path = opt.media_path
    N = 25
    INPUT_SIZE = 128
    fps,imagepaths = video_init(opt,path)
    positions = []
    # get position
    for i,imagepath in enumerate(imagepaths,1):
        img_origin = impro.imread(os.path.join('./tmp/video2image',imagepath))
        # x,y,size = runmodel.get_mosaic_position(img_origin,net_mosaic_pos,opt)[:3]
        x,y,size,mask = runmodel.get_mosaic_position(img_origin,netM,opt)
        cv2.imwrite(os.path.join('./tmp/mosaic_mask',imagepath), mask)
        positions.append([x,y,size])
        print('\r','Find mosaic location:'+str(i)+'/'+str(len(imagepaths)),util.get_bar(100*i/len(imagepaths),num=40),end='')
    print('\nOptimize mosaic locations...')
    positions =np.array(positions)
    for i in range(3):positions[:,i] = filt.medfilt(positions[:,i],opt.medfilt_num)

    # clean mosaic
    for i,imagepath in enumerate(imagepaths,0):
        x,y,size = positions[i][0],positions[i][1],positions[i][2]
        img_origin = impro.imread(os.path.join('./tmp/video2image',imagepath))
        mask = cv2.imread(os.path.join('./tmp/mosaic_mask',imagepath),0)
        
        if size==0:
            cv2.imwrite(os.path.join('./tmp/replace_mosaic',imagepath),img_origin)
        else:
            mosaic_input = np.zeros((INPUT_SIZE,INPUT_SIZE,3*N+1), dtype='uint8')
            for j in range(0,N):
                img = impro.imread(os.path.join('./tmp/video2image',imagepaths[np.clip(i+j-12,0,len(imagepaths)-1)]))
                img = img[y-size:y+size,x-size:x+size]
                img = impro.resize(img,INPUT_SIZE)
                mosaic_input[:,:,j*3:(j+1)*3] = img
            mask = impro.resize(mask,np.min(img_origin.shape[:2]))
            mask = mask[y-size:y+size,x-size:x+size]
            mask = impro.resize(mask, INPUT_SIZE)
            mosaic_input[:,:,-1] = mask
            mosaic_input = data.im2tensor(mosaic_input,bgr2rgb=False,use_gpu=opt.use_gpu,use_transform = False,is0_1 = False)
            unmosaic_pred = netG(mosaic_input)
            
            #unmosaic_pred = (unmosaic_pred.cpu().detach().numpy()*255)[0]
            #img_fake = unmosaic_pred.transpose((1, 2, 0))
            img_fake = data.tensor2im(unmosaic_pred,rgb2bgr = False ,is0_1 = False)
            img_result = impro.replace_mosaic(img_origin,img_fake,x,y,size,opt.no_feather)
            cv2.imwrite(os.path.join('./tmp/replace_mosaic',imagepath),img_result)
        print('\r','Clean Mosaic:'+str(i+1)+'/'+str(len(imagepaths)),util.get_bar(100*i/len(imagepaths),num=40),end='')
    print()
    ffmpeg.image2video( fps,
                './tmp/replace_mosaic/output_%05d.'+opt.tempimage_type,
                './tmp/voice_tmp.mp3',
                 os.path.join(opt.result_dir,os.path.splitext(os.path.basename(path))[0]+'_clean.mp4'))        
Пример #7
0
def cleanmosaic_video_fusion(opt,netG,netM):
    path = opt.media_path
    N = 25
    if 'HD' in os.path.basename(opt.model_path):
        INPUT_SIZE = 256
    else:
        INPUT_SIZE = 128
    fps,imagepaths,height,width = video_init(opt,path)
    positions = get_mosaic_positions(opt,netM,imagepaths,savemask=True)
    
    # clean mosaic
    img_pool = np.zeros((height,width,3*N), dtype='uint8')
    for i,imagepath in enumerate(imagepaths,0):
        x,y,size = positions[i][0],positions[i][1],positions[i][2]
        
        # image read stream
        mask = cv2.imread(os.path.join('./tmp/mosaic_mask',imagepath),0)
        if i==0 :
            for j in range(0,N):
                img_pool[:,:,j*3:(j+1)*3] = impro.imread(os.path.join('./tmp/video2image',imagepaths[np.clip(i+j-12,0,len(imagepaths)-1)]))
        else:
            img_pool[:,:,0:(N-1)*3] = img_pool[:,:,3:N*3]
            img_pool[:,:,(N-1)*3:] = impro.imread(os.path.join('./tmp/video2image',imagepaths[np.clip(i+12,0,len(imagepaths)-1)]))
        img_origin = img_pool[:,:,int((N-1)/2)*3:(int((N-1)/2)+1)*3]
        
        if size==0: # can not find mosaic,
            cv2.imwrite(os.path.join('./tmp/replace_mosaic',imagepath),img_origin)
        else:

            mosaic_input = np.zeros((INPUT_SIZE,INPUT_SIZE,3*N+1), dtype='uint8')
            mosaic_input[:,:,0:N*3] = impro.resize(img_pool[y-size:y+size,x-size:x+size,:], INPUT_SIZE)
            mask_input = impro.resize(mask,np.min(img_origin.shape[:2]))[y-size:y+size,x-size:x+size]
            mosaic_input[:,:,-1] = impro.resize(mask_input, INPUT_SIZE)

            mosaic_input = data.im2tensor(mosaic_input,bgr2rgb=False,use_gpu=opt.use_gpu,use_transform = False,is0_1 = False)
            unmosaic_pred = netG(mosaic_input)
            img_fake = data.tensor2im(unmosaic_pred,rgb2bgr = False ,is0_1 = False)
            img_result = impro.replace_mosaic(img_origin,img_fake,mask,x,y,size,opt.no_feather)
            cv2.imwrite(os.path.join('./tmp/replace_mosaic',imagepath),img_result)
        print('\r','Clean Mosaic:'+str(i+1)+'/'+str(len(imagepaths)),util.get_bar(100*i/len(imagepaths),num=35),end='')
    print()
    ffmpeg.image2video( fps,
                './tmp/replace_mosaic/output_%05d.'+opt.tempimage_type,
                './tmp/voice_tmp.mp3',
                 os.path.join(opt.result_dir,os.path.splitext(os.path.basename(path))[0]+'_clean.mp4'))        
Пример #8
0
def run_pix2pix(img, net, size=128, use_gpu=True):
    img = impro.resize(img, size)
    img = data.im2tensor(img, use_gpu=use_gpu)
    img_fake = net(img)
    img_fake = data.tensor2im(img_fake)
    return img_fake
Пример #9
0
        #test
        if os.path.isdir('./test'):  
            netG.eval()
            
            test_names = os.listdir('./test')
            test_names.sort()
            result = np.zeros((opt.finesize*2,opt.finesize*len(test_names),3), dtype='uint8')

            for cnt,test_name in enumerate(test_names,0):
                img_names = os.listdir(os.path.join('./test',test_name,'image'))
                img_names.sort()
                inputdata = np.zeros((opt.finesize,opt.finesize,3*N+1), dtype='uint8')
                for i in range(0,N):
                    img = impro.imread(os.path.join('./test',test_name,'image',img_names[i]))
                    img = impro.resize(img,opt.finesize)
                    inputdata[:,:,i*3:(i+1)*3] = img

                mask = impro.imread(os.path.join('./test',test_name,'mask.png'),'gray')
                mask = impro.resize(mask,opt.finesize)
                mask = impro.mask_threshold(mask,15,128)
                inputdata[:,:,-1] = mask
                result[0:opt.finesize,opt.finesize*cnt:opt.finesize*(cnt+1),:] = inputdata[:,:,int((N-1)/2)*3:(int((N-1)/2)+1)*3]
                inputdata = data.im2tensor(inputdata,bgr2rgb=False,use_gpu=opt.use_gpu,use_transform = False,is0_1 = False)
                pred = netG(inputdata)
     
                pred = data.tensor2im(pred,rgb2bgr = False, is0_1 = False)
                result[opt.finesize:opt.finesize*2,opt.finesize*cnt:opt.finesize*(cnt+1),:] = pred

            cv2.imwrite(os.path.join(dir_checkpoint,str(iter+1)+'_test.jpg'), result)
            netG.train()
Пример #10
0
def run_segment(img, net, size=360, gpu_id='-1'):
    img = impro.resize(img, size)
    img = data.im2tensor(img, gpu_id=gpu_id, bgr2rgb=False, is0_1=True)
    mask = net(img)
    mask = data.tensor2im(mask, gray=True, is0_1=True)
    return mask
Пример #11
0
def cleanmosaic_video_fusion(opt, netG, netM):
    path = opt.media_path
    N, T, S = 2, 5, 3
    LEFT_FRAME = (N * S)
    POOL_NUM = LEFT_FRAME * 2 + 1
    INPUT_SIZE = 256
    FRAME_POS = np.linspace(0, (T - 1) * S, T, dtype=np.int64)
    img_pool = []
    previous_frame = None
    init_flag = True

    fps, imagepaths, height, width = video_init(opt, path)
    positions = get_mosaic_positions(opt, netM, imagepaths, savemask=True)
    t1 = time.time()
    if not opt.no_preview:
        cv2.namedWindow('clean', cv2.WINDOW_NORMAL)

    # clean mosaic
    print('Step:3/4 -- Clean Mosaic:')
    length = len(imagepaths)

    for i, imagepath in enumerate(imagepaths, 0):
        x, y, size = positions[i][0], positions[i][1], positions[i][2]
        input_stream = []
        # image read stream
        if i == 0:  # init
            for j in range(POOL_NUM):
                img_pool.append(
                    impro.imread(
                        os.path.join(
                            opt.temp_dir + '/video2image',
                            imagepaths[np.clip(i + j - LEFT_FRAME, 0,
                                               len(imagepaths) - 1)])))
        else:  # load next frame
            img_pool.pop(0)
            img_pool.append(
                impro.imread(
                    os.path.join(
                        opt.temp_dir + '/video2image',
                        imagepaths[np.clip(i + LEFT_FRAME, 0,
                                           len(imagepaths) - 1)])))
        img_origin = img_pool[LEFT_FRAME]
        img_result = img_origin.copy()

        if size > 50:
            try:  #Avoid unknown errors
                for pos in FRAME_POS:
                    input_stream.append(
                        impro.resize(
                            img_pool[pos][y - size:y + size,
                                          x - size:x + size],
                            INPUT_SIZE)[:, :, ::-1])
                if init_flag:
                    init_flag = False
                    previous_frame = input_stream[N]
                    previous_frame = data.im2tensor(previous_frame,
                                                    bgr2rgb=True,
                                                    gpu_id=opt.gpu_id)

                input_stream = np.array(input_stream).reshape(
                    1, T, INPUT_SIZE, INPUT_SIZE, 3).transpose((0, 4, 1, 2, 3))
                input_stream = data.to_tensor(data.normalize(input_stream),
                                              gpu_id=opt.gpu_id)
                with torch.no_grad():
                    unmosaic_pred = netG(input_stream, previous_frame)
                img_fake = data.tensor2im(unmosaic_pred, rgb2bgr=True)
                previous_frame = unmosaic_pred
                # previous_frame = data.tensor2im(unmosaic_pred,rgb2bgr = True)
                mask = cv2.imread(
                    os.path.join(opt.temp_dir + '/mosaic_mask', imagepath), 0)
                img_result = impro.replace_mosaic(img_origin, img_fake, mask,
                                                  x, y, size, opt.no_feather)
            except Exception as e:
                init_flag = True
                print('Error:', e)
        else:
            init_flag = True
        cv2.imwrite(os.path.join(opt.temp_dir + '/replace_mosaic', imagepath),
                    img_result)
        os.remove(os.path.join(opt.temp_dir + '/video2image', imagepath))

        #preview result and print
        if not opt.no_preview:
            cv2.imshow('clean', img_result)
            cv2.waitKey(1) & 0xFF
        t2 = time.time()
        print('\r',
              str(i + 1) + '/' + str(length),
              util.get_bar(100 * i / length, num=35),
              util.counttime(t1, t2, i + 1, len(imagepaths)),
              end='')
    print()
    if not opt.no_preview:
        cv2.destroyAllWindows()
    print('Step:4/4 -- Convert images to video')
    ffmpeg.image2video(
        fps,
        opt.temp_dir + '/replace_mosaic/output_%06d.' + opt.tempimage_type,
        opt.temp_dir + '/voice_tmp.mp3',
        os.path.join(
            opt.result_dir,
            os.path.splitext(os.path.basename(path))[0] + '_clean.mp4'))
Пример #12
0
                loss_L2.item(),loss_VGG.item(),psnr,ssmi) )
            t_strat = time.time()

    '''
    --------------------------Test--------------------------
    '''
    if train_iter % opt.showresult_freq == 0 and os.path.isdir(opt.dataset_test):
        show_imgs = []
        videos = os.listdir(opt.dataset_test)
        sorted(videos)
        for video in videos:
            frames = os.listdir(os.path.join(opt.dataset_test,video,'image'))
            sorted(frames)
            for step in range(5):
                mosaic_stream = []
                for i in range(opt.T):
                    _mosaic = impro.imread(os.path.join(opt.dataset_test,video,'image',frames[i*opt.S+step]),loadsize=opt.finesize,rgb=True)
                    mosaic_stream.append(_mosaic)
                if step == 0:
                    previous = impro.imread(os.path.join(opt.dataset_test,video,'image',frames[opt.N*opt.S-1]),loadsize=opt.finesize,rgb=True)
                    previous = data.im2tensor(previous,bgr2rgb = False, gpu_id = opt.gpu_id, is0_1 = False)
                mosaic_stream = (np.array(mosaic_stream).astype(np.float32)/255.0-0.5)/0.5
                mosaic_stream = mosaic_stream.reshape(1,opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3))
                mosaic_stream = data.to_tensor(mosaic_stream, opt.gpu_id)
                with torch.no_grad():
                    out = netG(mosaic_stream,previous)
                previous = out
            show_imgs+= [data.tensor2im(mosaic_stream[:,:,opt.N],rgb2bgr = False),data.tensor2im(out,rgb2bgr = False)]

        show_img = impro.splice(show_imgs, (len(videos),2))
        TBGlobalWriter.add_image('test', show_img,train_iter,dataformats='HWC')
Пример #13
0
def cleanmosaic_video_fusion(opt, netG, netM):
    path = opt.media_path
    N = 25
    if 'HD' in os.path.basename(opt.model_path):
        INPUT_SIZE = 256
    else:
        INPUT_SIZE = 128
    fps, imagepaths, height, width = video_init(opt, path)
    positions = get_mosaic_positions(opt, netM, imagepaths, savemask=True)
    t1 = time.time()
    if not opt.no_preview:
        cv2.namedWindow('clean', cv2.WINDOW_NORMAL)

    # clean mosaic
    print('Clean Mosaic:')
    length = len(imagepaths)
    img_pool = np.zeros((height, width, 3 * N), dtype='uint8')
    mosaic_input = np.zeros((INPUT_SIZE, INPUT_SIZE, 3 * N + 1), dtype='uint8')
    for i, imagepath in enumerate(imagepaths, 0):
        x, y, size = positions[i][0], positions[i][1], positions[i][2]

        # image read stream
        mask = cv2.imread(
            os.path.join(opt.temp_dir + '/mosaic_mask', imagepath), 0)
        if i == 0:
            for j in range(0, N):
                img_pool[:, :, j * 3:(j + 1) * 3] = impro.imread(
                    os.path.join(
                        opt.temp_dir + '/video2image',
                        imagepaths[np.clip(i + j - 12, 0,
                                           len(imagepaths) - 1)]))
        else:
            img_pool[:, :, 0:(N - 1) * 3] = img_pool[:, :, 3:N * 3]
            img_pool[:, :, (N - 1) * 3:] = impro.imread(
                os.path.join(
                    opt.temp_dir + '/video2image',
                    imagepaths[np.clip(i + 12, 0,
                                       len(imagepaths) - 1)]))
        img_origin = img_pool[:, :,
                              int((N - 1) / 2) * 3:(int((N - 1) / 2) + 1) * 3]
        img_result = img_origin.copy()

        if size > 100:
            try:  #Avoid unknown errors
                #reshape to network input shape

                mosaic_input[:, :, 0:N * 3] = impro.resize(
                    img_pool[y - size:y + size, x - size:x + size, :],
                    INPUT_SIZE)
                mask_input = impro.resize(mask, np.min(
                    img_origin.shape[:2]))[y - size:y + size,
                                           x - size:x + size]
                mosaic_input[:, :, -1] = impro.resize(mask_input, INPUT_SIZE)

                mosaic_input_tensor = data.im2tensor(mosaic_input,
                                                     bgr2rgb=False,
                                                     use_gpu=opt.use_gpu,
                                                     use_transform=False,
                                                     is0_1=False)
                unmosaic_pred = netG(mosaic_input_tensor)
                img_fake = data.tensor2im(unmosaic_pred,
                                          rgb2bgr=False,
                                          is0_1=False)
                img_result = impro.replace_mosaic(img_origin, img_fake, mask,
                                                  x, y, size, opt.no_feather)
            except Exception as e:
                print('Warning:', e)
        cv2.imwrite(os.path.join(opt.temp_dir + '/replace_mosaic', imagepath),
                    img_result)

        #preview result and print
        if not opt.no_preview:
            cv2.imshow('clean', img_result)
            cv2.waitKey(1) & 0xFF
        t2 = time.time()
        print('\r',
              str(i + 1) + '/' + str(length),
              util.get_bar(100 * i / length, num=35),
              util.counttime(t1, t2, i + 1, len(imagepaths)),
              end='')
    print()
    if not opt.no_preview:
        cv2.destroyAllWindows()
    ffmpeg.image2video(
        fps,
        opt.temp_dir + '/replace_mosaic/output_%06d.' + opt.tempimage_type,
        opt.temp_dir + '/voice_tmp.mp3',
        os.path.join(
            opt.result_dir,
            os.path.splitext(os.path.basename(path))[0] + '_clean.mp4'))