def record_render_commands(): # Render commands recording begin_info = hvk.command_buffer_begin_info() width, height = window.dimensions() render_pass_begin = hvk.render_pass_begin_info( render_pass=render_pass, framebuffer=0, render_area=hvk.rect_2d(0, 0, width, height), clear_values=(hvk.clear_value(color=(0.0, 0.0, 0.0, 1.0)), hvk.clear_value(depth=1.0, stencil=0))) for framebuffer, cmd in zip(framebuffers, cmd_draw): render_pass_begin.framebuffer = framebuffer hvk.begin_command_buffer(api, cmd, begin_info) hvk.begin_render_pass(api, cmd, render_pass_begin, vk.SUBPASS_CONTENTS_INLINE) hvk.bind_pipeline(api, cmd, pipeline, vk.PIPELINE_BIND_POINT_GRAPHICS) hvk.bind_index_buffer(api, cmd, mesh_buffer, mesh_indices['offset'], vk.INDEX_TYPE_UINT16) hvk.bind_vertex_buffers(api, cmd, (mesh_buffer, ), (mesh_positions['offset'], )) hvk.draw_indexed(api, cmd, mesh_indices['count']) hvk.end_render_pass(api, cmd) hvk.end_command_buffer(api, cmd)
def compute_noise(): hvk.begin_command_buffer(api, staging_cmd, hvk.command_buffer_begin_info()) # Execute the compute shader hvk.bind_pipeline(api, staging_cmd, compute_pipeline, vk.PIPELINE_BIND_POINT_COMPUTE) hvk.bind_descriptor_sets(api, staging_cmd, vk.PIPELINE_BIND_POINT_COMPUTE, compute_pipeline_layout, (compute_descriptor_set, )) hvk.dispatch(api, staging_cmd, 256 // 16, 256 // 16, 1) # Move the image layout to shader read optimal for rendering barrier = hvk.image_memory_barrier( image=noise_image, old_layout=vk.IMAGE_LAYOUT_GENERAL, new_layout=vk.IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, src_access_mask=vk.ACCESS_TRANSFER_WRITE_BIT, dst_access_mask=vk.ACCESS_SHADER_READ_BIT, ) hvk.pipeline_barrier(api, staging_cmd, (barrier, ), dst_stage_mask=vk.PIPELINE_STAGE_FRAGMENT_SHADER_BIT) hvk.end_command_buffer(api, staging_cmd) # Submit the staging command buffer hvk.reset_fences(api, device, (staging_fence, )) submit_info = hvk.submit_info(command_buffers=(staging_cmd, )) hvk.queue_submit(api, render_queue.handle, (submit_info, ), fence=staging_fence) hvk.wait_for_fences(api, device, (staging_fence, ))
def _setup_meshes_resources(self, staging_alloc, staging_buffer, mesh_buffer_size): engine, api, device = self.ctx mem = engine.memory_manager cmd = engine.setup_command_buffer # Final buffer allocation mesh_buffer = hvk.create_buffer( api, device, hvk.buffer_create_info(size=mesh_buffer_size, usage=vk.BUFFER_USAGE_INDEX_BUFFER_BIT | vk.BUFFER_USAGE_VERTEX_BUFFER_BIT | vk.BUFFER_USAGE_TRANSFER_DST_BIT)) mesh_alloc = mem.alloc(mesh_buffer, vk.STRUCTURE_TYPE_BUFFER_CREATE_INFO, (vk.MEMORY_PROPERTY_DEVICE_LOCAL_BIT, )) # Uploading commands region = vk.BufferCopy(src_offset=0, dst_offset=0, size=mesh_buffer_size) regions = (region, ) hvk.begin_command_buffer(api, cmd, hvk.command_buffer_begin_info()) hvk.copy_buffer(api, cmd, staging_buffer, mesh_buffer, regions) hvk.end_command_buffer(api, cmd) # Submitting engine.submit_setup_command(wait=True) return mesh_alloc, mesh_buffer
def mesh_to_device(): global mesh_buffer, mesh_memory, staging_mesh_buffer, staging_mesh_memory, mesh_data # Create mesh resources mesh_buffer = hvk.create_buffer( api, device, hvk.buffer_create_info(size=total_mesh_size, usage=vk.BUFFER_USAGE_TRANSFER_DST_BIT | vk.BUFFER_USAGE_INDEX_BUFFER_BIT | vk.BUFFER_USAGE_VERTEX_BUFFER_BIT)) mesh_req = hvk.buffer_memory_requirements(api, device, mesh_buffer) mt_index = find_memory_type(vk.MEMORY_HEAP_DEVICE_LOCAL_BIT, vk.MEMORY_PROPERTY_DEVICE_LOCAL_BIT) mesh_memory = hvk.allocate_memory( api, device, hvk.memory_allocate_info(allocation_size=mesh_req.size, memory_type_index=mt_index)) hvk.bind_buffer_memory(api, device, mesh_buffer, mesh_memory, 0) # Upload mesh to device memory (recording) hvk.begin_command_buffer(api, staging_cmd, hvk.command_buffer_begin_info()) region = vk.BufferCopy(src_offset=0, dst_offset=0, size=mesh_req.size) hvk.copy_buffer(api, staging_cmd, staging_mesh_buffer, mesh_buffer, (region, )) hvk.end_command_buffer(api, staging_cmd) # Upload mesh to device memory (submiting) submit_info = hvk.submit_info(command_buffers=(staging_cmd, )) hvk.queue_submit(api, render_queue.handle, (submit_info, ), fence=staging_fence) hvk.wait_for_fences(api, device, (staging_fence, )) # Free staging resources hvk.destroy_buffer(api, device, staging_mesh_buffer) hvk.free_memory(api, device, staging_mesh_memory) del mesh_data, staging_mesh_buffer, staging_mesh_memory
def run(self, data_scene, data_compute, group, sync, after, before, callback): if data_compute in self.running: raise RuntimeError(f"Compute shader {data_compute.compute.name} is already running") engine, api, device = self.ctx queue = data_compute.queue cmd = data_scene.compute_commands[data_compute.command_index] pipeline = data_scene.compute_pipelines[data_compute.pipeline] x, y, z = group before = () if before is None else before after = () if after is None else after # Record the commands hvk.begin_command_buffer(api, cmd, hvk.command_buffer_begin_info()) hvk.bind_pipeline(api, cmd, pipeline, vk.PIPELINE_BIND_POINT_COMPUTE) hvk.bind_descriptor_sets(api, cmd, vk.PIPELINE_BIND_POINT_COMPUTE, data_compute.pipeline_layout, data_compute.descriptor_sets) CommandsRunner.run_device(before, api, cmd, queue, data_scene) hvk.dispatch(api, cmd, x, y, z) CommandsRunner.run_device(after, api, cmd, queue, data_scene) hvk.end_command_buffer(api, cmd) # Execute the command buffer cmds = (cmd,) fence = data_compute.fence infos = (hvk.submit_info(command_buffers=cmds),) hvk.queue_submit(api, queue.handle, infos, fence) if sync: f = (fence,) hvk.wait_for_fences(api, device, f) hvk.reset_fences(api, device, f) CommandsRunner.run_app(before, data_scene) CommandsRunner.run_app(after, data_scene) callback() else: self.running.add(data_compute) raise NotImplementedError("Compute without sync is not yet implemented")
def noise_layout_to_general(): # Image that support write operation must have the layout `IMAGE_LAYOUT_GENERAL` hvk.begin_command_buffer(api, staging_cmd, hvk.command_buffer_begin_info()) barrier = hvk.image_memory_barrier( image=noise_image, new_layout=vk.IMAGE_LAYOUT_GENERAL, dst_access_mask=vk.ACCESS_TRANSFER_WRITE_BIT, ) hvk.pipeline_barrier(api, staging_cmd, (barrier, ), dst_stage_mask=vk.PIPELINE_STAGE_TRANSFER_BIT) hvk.end_command_buffer(api, staging_cmd) # Submit the staging command buffer hvk.reset_fences(api, device, (staging_fence, )) submit_info = hvk.submit_info(command_buffers=(staging_cmd, )) hvk.queue_submit(api, render_queue.handle, (submit_info, ), fence=staging_fence) hvk.wait_for_fences(api, device, (staging_fence, ))
def texture_to_device(): global texture_image, texture_image_memory, texture_image_layout, texture_view, texture_sampler global texture, staging_texture_buffer, staging_texture_memory # Create the vulkan image texture_image_layout = vk.IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL texture_image = hvk.create_image( api, device, hvk.image_create_info( format=texture.format, mip_levels=len(texture.mipmaps), extent=vk.Extent3D(texture.width, texture.height, texture.depth), usage=vk.IMAGE_USAGE_TRANSFER_DST_BIT | vk.IMAGE_USAGE_SAMPLED_BIT, )) img_req = hvk.image_memory_requirements(api, device, texture_image) mt_index = find_memory_type(vk.MEMORY_HEAP_DEVICE_LOCAL_BIT, vk.MEMORY_HEAP_DEVICE_LOCAL_BIT) texture_image_memory = hvk.allocate_memory( api, device, hvk.memory_allocate_info(allocation_size=img_req.size, memory_type_index=mt_index)) hvk.bind_image_memory(api, device, texture_image, texture_image_memory, 0) # Build the copy regions (1 for each mipmap) regions = [] for i, m in enumerate(texture.mipmaps): region = hvk.buffer_image_copy( image_subresource=hvk.image_subresource_layers(mip_level=i), image_extent=vk.Extent3D(m.width, m.height, 1), buffer_offset=m.offset) regions.append(region) # Build the image barrier for the image layout transitions barrier = hvk.image_memory_barrier( image=texture_image, new_layout=0, dst_access_mask=0, subresource_range=hvk.image_subresource_range( level_count=len(texture.mipmaps))) # Transfer the staging data to device hvk.begin_command_buffer(api, staging_cmd, hvk.command_buffer_begin_info()) barrier.new_layout = vk.IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL barrier.dst_access_mask = vk.ACCESS_TRANSFER_WRITE_BIT hvk.pipeline_barrier(api, staging_cmd, (barrier, ), dst_stage_mask=vk.PIPELINE_STAGE_TRANSFER_BIT) hvk.copy_buffer_to_image(api, staging_cmd, staging_texture_buffer, texture_image, vk.IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, regions) barrier.old_layout = vk.IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL barrier.new_layout = vk.IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL barrier.src_access_mask = vk.ACCESS_TRANSFER_WRITE_BIT barrier.dst_access_mask = vk.ACCESS_SHADER_READ_BIT hvk.pipeline_barrier(api, staging_cmd, (barrier, ), dst_stage_mask=vk.PIPELINE_STAGE_FRAGMENT_SHADER_BIT) hvk.end_command_buffer(api, staging_cmd) # Submit the staging command buffer hvk.reset_fences(api, device, (staging_fence, )) submit_info = hvk.submit_info(command_buffers=(staging_cmd, )) hvk.queue_submit(api, render_queue.handle, (submit_info, ), fence=staging_fence) hvk.wait_for_fences(api, device, (staging_fence, )) # Create the image view and the sampler texture_view = hvk.create_image_view( api, device, hvk.image_view_create_info( image=texture_image, format=texture.format, subresource_range=hvk.image_subresource_range( level_count=len(texture.mipmaps)))) texture_sampler = hvk.create_sampler( api, device, hvk.sampler_create_info( mag_filter=vk.FILTER_LINEAR, min_filter=vk.FILTER_LINEAR, max_lod=len(texture.mipmaps), )) # Free staging resources hvk.destroy_buffer(api, device, staging_texture_buffer) hvk.free_memory(api, device, staging_texture_memory) del texture, staging_texture_buffer, staging_texture_memory
def _setup_image_layouts(self, staging_buffer, data_images): engine, api, device = self.ctx to_transfer = hvk.image_memory_barrier( image=0, new_layout=vk.IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, dst_access_mask=vk.ACCESS_TRANSFER_WRITE_BIT, subresource_range=hvk.image_subresource_range(level_count=0)) to_final_layout = hvk.image_memory_barrier( image=0, old_layout=vk.IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, new_layout=0, src_access_mask=vk.ACCESS_TRANSFER_WRITE_BIT, dst_access_mask=0, subresource_range=hvk.image_subresource_range(level_count=0)) cmd = engine.setup_command_buffer hvk.begin_command_buffer(api, cmd, hvk.command_buffer_begin_info()) for data_image in data_images: image = data_image.image image_handle = data_image.image_handle regions = [] for m in image.iter_mipmaps(): r = hvk.buffer_image_copy( image_subresource=hvk.image_subresource_layers( mip_level=m.level, base_array_layer=m.layer), image_extent=vk.Extent3D(m.width, m.height, 1), buffer_offset=data_image.base_staging_offset + m.offset) regions.append(r) to_transfer.image = image_handle to_transfer.subresource_range.level_count = image.mipmaps_levels to_transfer.subresource_range.layer_count = image.array_layers to_final_layout.image = image_handle to_final_layout.new_layout = data_image.target_layout to_final_layout.dst_access_mask = data_image.target_access_mask to_final_layout.subresource_range.level_count = image.mipmaps_levels to_final_layout.subresource_range.layer_count = image.array_layers hvk.pipeline_barrier(api, cmd, (to_transfer, ), dst_stage_mask=vk.PIPELINE_STAGE_TRANSFER_BIT) hvk.copy_buffer_to_image(api, cmd, staging_buffer, image_handle, vk.IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, regions) hvk.pipeline_barrier( api, cmd, (to_final_layout, ), dst_stage_mask=hvk.dst_stage_mask_for_access_mask( to_final_layout.dst_access_mask)) hvk.end_command_buffer(api, cmd) # Sumbit the images and update the layer values in the images engine.submit_setup_command(wait=True) for img in data_images: img.layout = img.target_layout img.access_mask = img.target_access_mask