Beispiel #1
0
# Load models
device = torch.device("cuda")
models = [None] * len(MODELS)
for i, m in enumerate(MODELS):
    p = m['path']
    if p is not MODEL_INPUT and p is not MODEL_GROUND_TRUTH:
        models[i] = inference.LoadedModel(os.path.join(MODEL_DIR, p), device,
                                          UPSCALING)

# create output folder
os.makedirs(OUTPUT_FOLDER, exist_ok=True)

# Render each dataset
for i in range(len(DATASETS)):
    # create renderer
    camera = inference.Camera(RESOLUTION[0], RESOLUTION[0], CAMERA_ORIGIN)
    camera.currentDistance = DATASETS[i]['distance']
    camera.currentPitch = DATASETS[i]['pitch']
    camera.orientation = DATASETS[i]['orientation']
    material = inference.Material(DATASETS[i]['iso'])
    renderer_path = RENDERER_CPU if DATASETS[i]['file'].endswith(
        'vdb') else RENDERER_GPU
    data_dir = DATA_DIR_CPU if DATASETS[i]['file'].endswith(
        'vdb') else DATA_DIR_GPU
    datasetfile = os.path.join(data_dir, DATASETS[i]['file'])
    print('Open', datasetfile)
    renderer = inference.Renderer(renderer_path, datasetfile, material, camera)
    time.sleep(5)
    renderer.send_command("aoradius=%5.3f\n" % float(AO_RADIUS))
    # create shading
    shading = ScreenSpaceShading(torch.device('cpu'))
    p = m['path']
    if p is not MODEL_INPUT and p is not MODEL_GROUND_TRUTH:
        models[i] = inference.LoadedModel(os.path.join(MODEL_DIR, p), device,
                                          UPSCALING)

# Render each dataset
for i in range(len(DATASETS)):
    if i > 0:
        #write empty frames
        empty = np.zeros((RESOLUTION[1], RESOLUTION[0], 3), dtype=np.uint8)
        for j in range(EMPTY_FRAMES):
            writer.append_data(empty)

    background = createBackgroundImage(DATASETS[i]['name'])
    # create renderer
    camera = inference.Camera(CanvasWidth, CanvasHeight, CAMERA_ORIGIN)
    camera.currentDistance = DATASETS[i]['distance']
    material = inference.Material(DATASETS[i]['iso'])
    renderer_path = RENDERER_CPU if DATASETS[i]['file'].endswith(
        'vdb') else RENDERER_GPU
    data_dir = DATA_DIR_CPU if DATASETS[i]['file'].endswith(
        'vdb') else DATA_DIR_GPU
    datasetfile = os.path.join(data_dir, DATASETS[i]['file'])
    print('Open', datasetfile)
    renderer = inference.Renderer(renderer_path, datasetfile, material, camera)
    renderer.send_command("aoradius=%5.3f\n" % float(AO_RADIUS))
    # create shading
    shading = ScreenSpaceShading(torch.device('cpu'))
    shading.fov(30)
    shading.light_direction(np.array([0.1, 0.1, 1.0]))
    shading.ambient_light_color(np.array(DATASETS[i]['ambient']) / 255.0)
MODEL_DIR = "D:/VolumeSuperResolution"

UPSCALING = 4

OUTPUT_FOLDER = 'D:/VolumeSuperResolution/comparisonVideo3' + (
    "_diff" if SHOW_DIFFERENCE else "")
FPS = 25
BACKGROUND = [1, 1, 1]
RESOLUTION = (1920, 1080)
RESOLUTION_LOW = (RESOLUTION[0] // UPSCALING, RESOLUTION[1] // UPSCALING)

########################################
# Material + Camera
########################################

camera = inference.Camera(RESOLUTION[0], RESOLUTION[0], [0, 0, -1])
camera.currentDistance = 2.3
camera.currentPitch = 0.38
camera.orientation = inference.Orientation.Yp


class Scene:
    file = None
    isovalue = 0.36
    light = "camera"
    temporalConsistency = False
    depthMin = None
    depthMax = None
    aoSamples = 4  #256 #4
    aoRadius = 0.05
def benchmark(scene):
    DEBUG = False
    IMAGE_EXPORT = [(512, 512)]  #[(2**9, 1024)] # screen, volume resolution

    #SETTINGS
    SCREEN_RESOLUTIONS = [2**i for i in range(6, 12)]
    print("Screen resolutions: ", SCREEN_RESOLUTIONS)
    MIN_VOLUME_RESOLUTION = 32
    NUM_SAMPLES = 50

    NETWORK_DIR = "D:/VolumeSuperResolution/adaptive-dvr-modeldir/"
    NETWORK = ("network", NETWORK_DIR + "adapDvr5-rgb-temp001-perc01-epoch500")
    VOLUME_FOLDER = "../../isosurface-super-resolution-data/volumes/cvol-filtered/"
    SAMPLING_FILE = "D:/VolumeSuperResolution-InputData/samplingPattern.hdf5"
    SETTINGS_FOLDER = "../network/video/"

    device_cpu = torch.device("cpu")
    device_gpu = torch.device("cuda")

    # load sampling pattern
    print("load sampling patterns")
    SAMPLING_PATTERNS = ['halton', 'plastic', 'random', 'regular']
    with h5py.File(SAMPLING_FILE, 'r') as f:
        SAMPLING_PATTERN = torch.from_numpy(
            f['plastic'][...]).to(device_gpu).unsqueeze(0)

    # load networks
    print("Load networks")
    importanceNetwork = ImportanceModel(NETWORK, device_gpu)
    reconNetwork = ReconstructionModel(NETWORK, device_gpu)

    # scenes
    if scene == 1:
        # EJECTA 512
        VOLUME = "snapshot_272_512.cvol"
        STEPSIZE = 0.125
        POSITION_SAMPLER = lambda: (1.1**0.3) * randomPointOnSphere()
        SETTINGS = "Dvr-Ejecta-settings.json"
        UPSCALING = 8
        POSTPROCESS = importance.PostProcess(0.001, 0.05, 1, 0, 'basic')
        IMAGE_PATH = "exportDvrEjecta512_%d_%d_%d.png"
        OUTPUT_FILE = "../result-stats/DvrBenchmarkEjecta512.tsv"
    elif scene == 2:
        # RM
        VOLUME = "ppmt273_1024.cvol"
        STEPSIZE = 0.25

        def rmPositionSampler():
            pos = (1.1**0.3) * randomPointOnSphere()
            pos[2] = -abs(pos[2])
            return pos

        POSITION_SAMPLER = rmPositionSampler
        SETTINGS = "Dvr-RM-settings.json"
        UPSCALING = 8
        POSTPROCESS = importance.PostProcess(0.001, 0.05, 1, 0, 'basic')
        IMAGE_PATH = "exportDvrRM1024_%d_%d_%d.png"
        OUTPUT_FILE = "../result-stats/DvrBenchmarkRM1024.tsv"
    elif scene == 3:
        # RM
        VOLUME = "cleveland70.cvol"
        STEPSIZE = 0.25
        POSITION_SAMPLER = lambda: (1.1**2.4) * randomPointOnSphere()
        SETTINGS = "Dvr-Thorax-settings.json"
        UPSCALING = 8
        POSTPROCESS = importance.PostProcess(0.001, 0.05, 1, 0, 'basic')
        IMAGE_PATH = "exportDvrThorax512_%d_%d_%d.png"
        OUTPUT_FILE = "../result-stats/DvrBenchmarkThorax512.tsv"

    ###################################################
    ######## RUN BECHMARK##############################
    ###################################################

    # no gradients anywhere
    torch.set_grad_enabled(False)

    # load volume
    err = torch.ops.renderer.load_volume_from_binary(VOLUME_FOLDER + VOLUME)
    assert err == 1
    resX, resY, resZ = torch.ops.renderer.get_volume_resolution()
    print("volume resolution:", resX, resY, resZ)
    minRes = max(resX, resY, resZ)
    numMipmapLevels = 0
    while minRes >= MIN_VOLUME_RESOLUTION:
        numMipmapLevels += 1
        minRes = minRes // 2
    print("Num mipmap levels:", numMipmapLevels)

    # load settings
    settings = inference.RenderSettings()
    camera = inference.Camera(512, 512, [0, 0, -1])
    with open(SETTINGS_FOLDER + SETTINGS, "r") as f:
        o = json.load(f)
        settings.from_dict(o)
        camera.from_dict(o['Camera'])
    settings.update_camera(camera)
    settings.RENDER_MODE = 2

    # run scenes
    start = torch.cuda.Event(enable_timing=True)
    end = torch.cuda.Event(enable_timing=True)
    with open(OUTPUT_FILE, "w") as stat_file:
        stat_file.write(
            "VolumeResolution\tScreenResolution\tRenderingLowMillis\tImportanceMillis\tRenderingSamplesMillis\tReconstructionMillis\tRenderingHighMillis\tSamplePercentage\n"
        )
        with std_out_err_redirect_tqdm() as orig_stdout:
            for mipmapLevel in range(numMipmapLevels):
                # load mipmap level
                if mipmapLevel > 0:
                    torch.ops.renderer.create_mipmap_level(
                        mipmapLevel, "average")
                settings.MIPMAP_LEVEL = mipmapLevel
                volumeResolution = max(resX, resY, resZ) / (2**mipmapLevel)

                for resolution in SCREEN_RESOLUTIONS:
                    settings.RESOLUTION = [resolution, resolution]
                    settings.VIEWPORT = [0, 0, resolution, resolution]

                    print("volume resolution: %d, screen resolution %d" %
                          (volumeResolution, resolution))

                    # loop over sample positions
                    renderingLowMillis = 0
                    importanceMillis = 0
                    renderingSamplesMillis = 0
                    reconstructionMillis = 0
                    renderingHighMillis = 0
                    samplePercentage = 0

                    for i in trange(NUM_SAMPLES,
                                    desc='Samples',
                                    file=orig_stdout,
                                    dynamic_ncols=True,
                                    leave=True):
                        pos = list(POSITION_SAMPLER())
                        settings.CAM_ORIGIN_START = pos
                        settings.CAM_ORIGIN_END = pos
                        settings.send()

                        render_settings = settings.clone()
                        render_settings.send()

                        # high
                        torch.cuda.synchronize()
                        start.record()
                        high_res = torch.ops.renderer.render()
                        end.record()
                        torch.cuda.synchronize()
                        renderingHighMillis += start.elapsed_time(end)

                        if (resolution, volumeResolution) in IMAGE_EXPORT:
                            filename = IMAGE_PATH % (resolution,
                                                     volumeResolution, i)
                            image = high_res[0:3, :, :]
                            image = image.detach().cpu().numpy().transpose(
                                (1, 2, 0))
                            imageio.imwrite(filename, image)
                            print("Image saved to %s" % filename)

                        # low
                        settingsTmp = settings.clone()
                        settingsTmp.downsampling = UPSCALING
                        settingsTmp.send()
                        torch.cuda.synchronize()
                        start.record()
                        low_res = torch.ops.renderer.render()
                        end.record()
                        torch.cuda.synchronize()
                        renderingLowMillis += start.elapsed_time(end)

                        # prepare for importance map
                        low_res = low_res.unsqueeze(0)
                        low_res_input = low_res[:, :-2, :, :]
                        previous_input = torch.zeros(
                            1,
                            1,
                            low_res_input.shape[2] *
                            importanceNetwork.networkUpscaling(),
                            low_res_input.shape[3] *
                            importanceNetwork.networkUpscaling(),
                            dtype=low_res_input.dtype,
                            device=low_res_input.device)

                        # compute importance map
                        torch.cuda.synchronize()
                        start.record()
                        importance_map = importanceNetwork.call(
                            low_res_input[:, 0:5, :, :], previous_input)
                        end.record()
                        torch.cuda.synchronize()
                        importanceMillis += start.elapsed_time(end)
                        if len(importance_map.shape) == 3:
                            importance_map = importance_map.unsqueeze(1)
                        if DEBUG:
                            print("importance map min=%f, max=%f" %
                                  (torch.min(importance_map),
                                   torch.max(importance_map)))

                        # prepare sampling
                        settings.send()
                        pattern = SAMPLING_PATTERN[:, :importance_map.
                                                   shape[-2], :importance_map.
                                                   shape[-1]]
                        normalized_importance_map = POSTPROCESS(
                            importance_map)[0]
                        if DEBUG:
                            print("normalized importance map min=%f, max=%f" %
                                  (torch.min(normalized_importance_map),
                                   torch.max(normalized_importance_map)))
                            print("pattern min=%f, max=%f" %
                                  (torch.min(pattern), torch.max(pattern)))
                        sampling_mask = normalized_importance_map > pattern
                        sample_positions = torch.nonzero(sampling_mask[0].t_())
                        sample_positions = sample_positions.to(
                            torch.float32).transpose(0, 1).contiguous()
                        samplePercentage += sample_positions.size(1) / (
                            importance_map.shape[-2] *
                            importance_map.shape[-1])
                        if DEBUG:
                            print("sample count: %d" %
                                  sample_positions.size(1))

                        # do the sampling
                        torch.cuda.synchronize()
                        start.record()
                        sample_data = torch.ops.renderer.render_samples(
                            sample_positions)
                        reconstruction_input = torch.ops.renderer.scatter_samples(
                            sample_positions, sample_data, resolution,
                            resolution, [0] * 10)
                        end.record()
                        torch.cuda.synchronize()
                        renderingSamplesMillis += start.elapsed_time(end)

                        # reconstruction
                        reconstruction_input = reconstruction_input[:
                                                                    9, :, :].unsqueeze(
                                                                        0)
                        previous_input = torch.zeros(
                            1,
                            8,
                            reconstruction_input.shape[2],
                            reconstruction_input.shape[3],
                            dtype=reconstruction_input.dtype,
                            device=reconstruction_input.device)
                        torch.cuda.synchronize()
                        start.record()
                        reconNetwork.call(reconstruction_input, sampling_mask,
                                          previous_input)
                        end.record()
                        torch.cuda.synchronize()
                        reconstructionMillis += start.elapsed_time(end)

                    # write stats
                    renderingLowMillis /= NUM_SAMPLES
                    importanceMillis /= NUM_SAMPLES
                    renderingSamplesMillis /= NUM_SAMPLES
                    reconstructionMillis /= NUM_SAMPLES
                    renderingHighMillis /= NUM_SAMPLES
                    samplePercentage /= NUM_SAMPLES
                    stat_file.write(
                        "%d\t%d\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\n" %
                        (volumeResolution, resolution, renderingLowMillis,
                         importanceMillis, renderingSamplesMillis,
                         reconstructionMillis, renderingHighMillis,
                         samplePercentage))
                    stat_file.flush()
# Render each dataset
for i in range(len(DATASETS)):
    #output
    outputFolder = os.path.join(OUTPUT_FOLDER, DATASETS[i]['name'])
    os.makedirs(outputFolder, exist_ok = True)
    statistics = open(os.path.join(outputFolder, "AA_AllFrames.txt"), "w")
    statistics.write("Origin\tRotation\t" + 
                     "\t".join((m['name']+"-PSNR-normal\t"+m['name']+"-PSNR-color\t"+m['name']+"-SSIM-normal\t"+m['name']+"-SSIM-color") for m in MODELS) + 
                     "\n")
    # create renderer
    material = inference.Material(DATASETS[i]['iso'])
    renderer_path = RENDERER_CPU if DATASETS[i]['file'].endswith('vdb') else RENDERER_GPU
    data_dir = DATA_DIR_CPU if DATASETS[i]['file'].endswith('vdb') else DATA_DIR_GPU
    datasetfile = os.path.join(data_dir, DATASETS[i]['file'])
    print('Open', datasetfile)
    renderer = inference.Renderer(renderer_path, datasetfile, material, inference.Camera(RESOLUTION[0], RESOLUTION[1]))
    time.sleep(5)
    renderer.send_command("aoradius=%5.3f\n"%float(AO_RADIUS))
    # create shading
    shading = ScreenSpaceShading(torch.device('cpu'))
    shading.fov(30)
    shading.light_direction(np.array([0.1,0.1,1.0]))
    shading.ambient_light_color(np.array(DATASETS[i]['ambient'])/255.0)
    shading.diffuse_light_color(np.array(DATASETS[i]['diffuse'])/255.0)
    shading.specular_light_color(np.array(DATASETS[i]['specular'])/255.0)
    shading.specular_exponent(SPECULAR_EXPONENT)
    shading.material_color(np.array(DATASETS[i]['material'])/255.0)
    shading.ambient_occlusion(AO_STRENGTH)
    shading.background(np.array(BACKGROUND))

    # prepare running stats