示例#1
0
def draw_goal_cornelbox(output, device=-1):
    materials = {}
    materials["light"] = DiffuseMaterial([1.0, 1.0, 1.0])
    materials["white"] = DiffuseMaterial([0.5, 0.5, 0.5])
    materials["green"] = DiffuseMaterial([0.0, 1.0, 0.0])
    materials["red"] = DiffuseMaterial([1.0, 0.0, 0.0])
    #shape_light = create_light(materials)
    shape_floor = create_floor(materials)
    shape_shortblock = create_shortblock(materials)
    shape_tallblock = create_tallblock(materials)
    shape = CompositeShape([shape_floor, shape_shortblock, shape_tallblock])
    """
    light = np.array(GOAL_POS, dtype=np.float32)
    model = ArrayLink(light)
    light = PointLight(origin=model.data, color=[1, 1, 1])
    """

    light = PointLight(origin=GOAL_POS, color=[1, 1, 1])
    model = chainer.Link()
    AP(model, 'data', light.origin)

    fov = math.atan2(0.025, 0.035) * 180.0 / math.pi
    camera = PerspectiveCamera(512, 512, fov, [278.0, 273.0, -800.0])

    func = RaytraceFunc(shape=shape, light=light, camera=camera)

    if device >= 0:
        chainer.cuda.get_device_from_id(device).use()
        model.to_gpu()
        func.to_gpu()

    y_data = func(1)
    y_data = y_data.data

    if device >= 0:
        y_data = y_data.get()
        cuda.get_device_from_id(device).synchronize()

    img = y_data[0]
    img = np.transpose(img, (1, 2, 0))
    img = np.clip(img * 255, 0, 255).astype(np.uint8)

    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

    cv2.imwrite(output, img)
    return 0
示例#2
0
def draw_start_cornelbox(output, device=-1):
    materials = {}
    materials["light"] = DiffuseMaterial([1.0, 1.0, 1.0])
    materials["white"] = DiffuseMaterial([0.5, 0.5, 0.5])
    materials["green"] = DiffuseMaterial([0.0, 1.0, 0.0])
    materials["red"] = DiffuseMaterial([1.0, 0.0, 0.0])

    shape_floor = create_floor(materials)
    shape_shortblock = create_shortblock(materials)
    shape_tallblock = create_tallblock(materials)
    shape = CompositeShape([shape_floor, shape_shortblock, shape_tallblock])

    fov = math.atan2(0.025, 0.035) * 180.0 / math.pi
    camera = PerspectiveCamera(512,
                               512,
                               fov,
                               origin=[400.0, 300, -800.0],
                               direction=norm([-0.1, 0, 1]))
    light = PointLight(origin=START_POS, color=[0.1, 0.1, 0.1])

    func = RaytraceFunc(shape=shape, light=light, camera=camera)

    if device >= 0:
        chainer.cuda.get_device_from_id(device).use()
        func.to_gpu()

    y_data = func(1)
    y_data = y_data.data

    if device >= 0:
        y_data = y_data.get()
        cuda.get_device_from_id(device).synchronize()

    img = y_data[0]
    img = np.transpose(img, (1, 2, 0))
    img = np.clip(img * 255, 0, 255).astype(np.uint8)

    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

    cv2.imwrite(output, img)
    return 0
示例#3
0
def calc_goal_cornelbox(output, device=-1):
    epoch = 50

    outdir = os.path.dirname(output)

    materials = {}
    materials["light"] = DiffuseMaterial([1.0, 1.0, 1.0])
    materials["white"] = DiffuseMaterial([0.5, 0.5, 0.5])
    materials["green"] = DiffuseMaterial([0.0, 1.0, 0.0])
    materials["red"] = DiffuseMaterial([1.0, 0.0, 0.0])
    #shape_light = create_light(materials)
    shape_floor = create_floor(materials)
    shape_shortblock = create_shortblock(materials)
    shape_tallblock = create_tallblock(materials)
    shape = CompositeShape([shape_floor, shape_shortblock, shape_tallblock])

    light = PointLight(origin=START_POS, color=[1, 1, 1])
    model = chainer.Link()
    AP(model, 'data', light.origin)

    fov = math.atan2(0.025, 0.035) * 180.0 / math.pi
    camera = PerspectiveCamera(512, 512, fov, [278.0, 273.0, -800.0])

    func = RaytraceFunc(shape=shape, light=light, camera=camera)

    if device >= 0:
        chainer.cuda.get_device_from_id(device).use()
        model.to_gpu()
        func.to_gpu()

    chainer.config.autotune = True
    chainer.cudnn_fast_batch_normalization = True

    #optimizer = optimizers.Adam(alpha=1e-1, beta1=0.9, beta2=0.999, eps=1e-08)
    optimizer = optimizers.SGD(lr=0.001)
    optimizer.setup(model)

    #dataset
    train_dataset = RaytraceDataset(output)
    train_iter = chainer.iterators.SerialIterator(train_dataset,
                                                  1,
                                                  shuffle=True)

    #updator
    updater = RaytraceUpdater(train_iter,
                              model,
                              func,
                              optimizer,
                              outdir,
                              device=device)

    #trainer
    trainer = training.Trainer(updater, (epoch, 'epoch'), outdir)

    log_interval = (1, 'iteration')

    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch',
        'iteration',
        'main/loss',
        'pos/light_x',
        'pos/light_y',
        'pos/light_z',
    ]),
                   trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=1))

    trainer.run()

    return 0
示例#4
0
def calc_goal_cornelbox(output, device=-1):
    epoch = 100
    outdir = os.path.dirname(output)

    materials = {}
    materials["light"] = DiffuseMaterial([1.0, 1.0, 1.0])
    materials["white"] = DiffuseMaterial([0.5, 0.5, 0.5])
    materials["green"] = DiffuseMaterial([0.0, 1.0, 0.0])
    materials["red"] = DiffuseMaterial([1.0, 0.01, 0.01])

    shape_floor = create_floor(materials)
    shape_shortblock = create_shortblock(materials)
    shape_tallblock = create_tallblock(materials)
    shape = CompositeShape([shape_floor, shape_shortblock, shape_tallblock])

    # origin=[278.0, 273.0, -800.0], direction=[0, 0, 1])
    # origin=[400.0, 300, -800.0], direction=norm([-0.1, 0, 1])
    fov = math.atan2(0.025, 0.035) * 180.0 / math.pi
    camera = PerspectiveCamera(512,
                               512,
                               fov,
                               origin=[270.0, 273.0, -800.0],
                               direction=norm([-0.1, 0, 0.8]))
    light = PointLight(origin=START_POS, color=[0.1, 0.1, 0.1])

    func = RaytraceFunc(shape=shape, light=light, camera=camera)

    model1 = chainer.Link()
    AP(model1, 'camera_position', camera.origin)

    model2 = chainer.Link()
    AP(model2, 'camera_zaxis', camera.zaxis)
    AP(model2, 'camera_xaxis', camera.xaxis)
    AP(model2, 'camera_yaxis', camera.yaxis)

    if device >= 0:
        chainer.cuda.get_device_from_id(device).use()
        model1.to_gpu()
        model2.to_gpu()
        func.to_gpu()

    chainer.config.autotune = True
    chainer.cudnn_fast_batch_normalization = True

    optimizer1 = O.SGD(lr=1e-3)
    optimizer1.setup(model1)

    optimizer2 = O.SGD(lr=1e-7)
    optimizer2.setup(model2)

    #dataset
    train_dataset = RaytraceDataset(output)
    train_iter = chainer.iterators.SerialIterator(train_dataset,
                                                  1,
                                                  shuffle=True)

    dict_models = {'position': model1, 'direction': model2}
    dict_optimizers = {'position': optimizer1, 'direction': optimizer2}

    #updator
    updater = RaytraceUpdater(train_iter,
                              dict_models,
                              func,
                              dict_optimizers,
                              outdir,
                              device=device)

    #trainer
    trainer = training.Trainer(updater, (epoch, 'epoch'), outdir)

    log_interval = (1, 'iteration')

    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'main/loss', 'camera_position/x',
        'camera_position/y', 'camera_position/z', 'camera_direction/x',
        'camera_direction/y', 'camera_direction/z'
    ]),
                   trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=1))

    trainer.run()

    return 0