Archived
1
Fork 0

Add more WebGPU draw commands

This commit is contained in:
Joshua Goins 2022-03-06 21:40:58 -05:00
parent 088fe9875c
commit e1767e9363
7 changed files with 482 additions and 17 deletions

View file

@ -4,6 +4,8 @@
#include "gfx.hpp"
class GFXWebGPUPipeline;
class GFXWebGPU : public GFX {
public:
bool initialize(const GFXCreateInfo& createInfo) override;
@ -45,7 +47,20 @@ private:
WGPUSwapChain create_swapchain();
WGPUShaderModule create_shader(const uint32_t* code, uint32_t size, std::string_view label);
uint64_t get_bind_group_hash(GFXWebGPUPipeline* pipeline);
void cache_bind_group_state(GFXWebGPUPipeline* pipeline, WGPUBindGroupLayout group_layout);
void reset_bind_state();
WGPUDevice device;
WGPUQueue queue;
WGPUSwapChain swapchain;
struct BoundShaderBuffer {
GFXBuffer* buffer = nullptr;
uint32_t size = 0, offset = 0;
};
std::array<BoundShaderBuffer, 25> boundShaderBuffers;
std::array<GFXTexture*, 25> boundTextures;
std::array<GFXSampler*, 25> boundSamplers;
};

View file

@ -338,6 +338,7 @@ GFXPipeline* GFXWebGPU::create_graphics_pipeline(const GFXGraphicsPipelineCreate
}
prism::log("building pipeline {}", info.label);
prism::log("--------");
// alright, webgpu in their infinite wisdom does not allow arbitrary binding locations
// so, to be consistent with what vulkan, metal and virtually every other API allows,
@ -359,8 +360,6 @@ GFXPipeline* GFXWebGPU::create_graphics_pipeline(const GFXGraphicsPipelineCreate
description.format = toVertFormat(attribute.format);
description.offset = attribute.offset;
prism::log("{} of {}", attribute.binding, info.vertex_input.inputs.size());
attributes[attribute.binding].push_back(description);
}
@ -369,13 +368,6 @@ GFXPipeline* GFXWebGPU::create_graphics_pipeline(const GFXGraphicsPipelineCreate
for (auto& binding : info.vertex_input.inputs) {
prism::log("binding loc {}", binding.location);
prism::log("debugging attributes at loc {}", binding.location);
for(auto attr : attributes[binding.location]) {
prism::log("- attrib {}", attr.shaderLocation);
prism::log("fmt: {}", utility::enum_to_string(attr.format));
}
WGPUVertexBufferLayout b;
b.attributes = attributes[binding.location].data();
b.attributeCount = attributes[binding.location].size();
@ -411,6 +403,52 @@ GFXPipeline* GFXWebGPU::create_graphics_pipeline(const GFXGraphicsPipelineCreate
descriptor.primitive.stripIndexFormat = WGPUIndexFormat_Uint16;
descriptor.primitive.topology = WGPUPrimitiveTopology_TriangleList;
// create bind group layout
std::vector<WGPUBindGroupLayoutEntry> group_entries = {};
for (auto& binding : info.shader_input.bindings) {
// ignore push constants
if (binding.type == GFXBindingType::PushConstant)
continue;
WGPUBindGroupLayoutEntry entry = {};
entry.binding = binding.binding;
entry.visibility = WGPUShaderStage_Vertex | WGPUShaderStage_Fragment;
switch (binding.type) {
case GFXBindingType::StorageBuffer:
{
entry.buffer.type = WGPUBufferBindingType_Uniform;
}
break;
case GFXBindingType::Texture: {
entry.texture.sampleType = WGPUTextureSampleType_Force32;
}
break;
case GFXBindingType::StorageImage:
{
entry.storageTexture.access = WGPUStorageTextureAccess_WriteOnly;
}
break;
case GFXBindingType::SampledImage:
{
entry.texture.sampleType = WGPUTextureSampleType_Force32;
}
break;
case GFXBindingType::Sampler: {
entry.sampler.type = WGPUSamplerBindingType_Comparison;
}
break;
}
group_entries.push_back(entry);
}
WGPUBindGroupLayoutDescriptor bind_group_layout_descriptor = {};
bind_group_layout_descriptor.entryCount = group_entries.size();
bind_group_layout_descriptor.entries = group_entries.data();
pipeline->bind_group_layout = wgpuDeviceCreateBindGroupLayout(device, &bind_group_layout_descriptor);
pipeline->render_handle = wgpuDeviceCreateRenderPipeline(device, &descriptor);
return pipeline;
@ -446,10 +484,415 @@ GFXPipeline* GFXWebGPU::create_compute_pipeline(const GFXComputePipelineCreateIn
void GFXWebGPU::submit(GFXCommandBuffer* command_buffer, const platform::window_ptr window) {
WGPUTextureView backBufView = wgpuSwapChainGetCurrentTextureView(swapchain);
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
WGPUCommandBuffer commands = wgpuCommandEncoderFinish(encoder, nullptr);
WGPUCommandEncoder command_encoder = wgpuDeviceCreateCommandEncoder(device, nullptr);
GFXWebGPUPipeline* current_pipeline = nullptr;
WGPURenderPassEncoder render_encoder = nullptr;
WGPUComputePassEncoder compute_encoder = nullptr;
GFXWebGPURenderPass* current_render_pass = nullptr;
GFXWebGPUFramebuffer* current_framebuffer = nullptr;
WGPUColor current_clear_color = {};
Viewport current_viewport {}; // lol webgpu doesn't even have a viewport type??
enum class CurrentEncoder {
None,
Render,
Compute,
} current_encoder = CurrentEncoder::None;
const auto need_encoder = [&](CurrentEncoder encoder, bool needs_reset = false) {
if(encoder != current_encoder || needs_reset) {
if(render_encoder != nullptr)
wgpuRenderPassEncoderEndPass(render_encoder);
if(compute_encoder != nullptr)
wgpuComputePassEncoderEndPass(compute_encoder);
render_encoder = nullptr;
compute_encoder = nullptr;
}
if(current_encoder == encoder && !needs_reset)
return;
switch(encoder) {
case CurrentEncoder::None:
break;
case CurrentEncoder::Render:
{
WGPURenderPassDescriptor render_pass_descriptor = {};
std::vector<WGPURenderPassColorAttachment> color_attachments;
if(current_framebuffer != nullptr) {
unsigned int i = 0;
for(const auto& attachment : current_framebuffer->attachments) {
if(attachment->format == WGPUTextureFormat_Depth32Float) {
// TODO: lol what
auto depth_attachment = new WGPURenderPassDepthStencilAttachment();
depth_attachment->view = attachment->view;
depth_attachment->depthLoadOp = WGPULoadOp_Clear;
depth_attachment->depthStoreOp = WGPUStoreOp_Store;
render_pass_descriptor.depthStencilAttachment = depth_attachment;
} else {
WGPURenderPassColorAttachment color_attachment = {};
color_attachment.view = attachment->view;
color_attachment.loadOp = WGPULoadOp_Clear;
color_attachment.storeOp = WGPUStoreOp_Store;
color_attachment.clearColor = current_clear_color;
color_attachments.push_back(color_attachment);
}
}
} else {
// we are rendering to the screen
WGPURenderPassColorAttachment color_attachment = {};
color_attachment.view = backBufView;
color_attachment.loadOp = WGPULoadOp_Clear;
color_attachment.storeOp = WGPUStoreOp_Store;
color_attachment.clearColor = current_clear_color;
color_attachments.push_back(color_attachment);
}
render_pass_descriptor.colorAttachmentCount = color_attachments.size();
render_pass_descriptor.colorAttachments = color_attachments.data();
render_encoder = wgpuCommandEncoderBeginRenderPass(command_encoder, &render_pass_descriptor);
//if(currentViewport.width != 0.0f && currentViewport.height != 0.0f)
// renderEncoder->setViewport(currentViewport);
}
break;
case CurrentEncoder::Compute:
{
WGPUComputePassDescriptor compute_pass_descriptor = {};
compute_encoder = wgpuCommandEncoderBeginComputePass(command_encoder, &compute_pass_descriptor);
}
break;
}
current_encoder = encoder;
};
uint64_t last_bind_group_hash = 0;
const auto try_bind_group = [&] -> bool {
if(current_pipeline == nullptr)
return false;
if(last_bind_group_hash != get_bind_group_hash(current_pipeline)) {
if(!current_pipeline->cached_bind_groups.count(get_bind_group_hash(current_pipeline)))
cache_bind_group_state(current_pipeline, current_pipeline->bind_group_layout);
auto& bind_group = current_pipeline->cached_bind_groups[get_bind_group_hash(current_pipeline)];
if(bind_group == nullptr)
return false;
wgpuRenderPassEncoderSetBindGroup(render_encoder, 0, bind_group, 0, nullptr);
last_bind_group_hash = get_bind_group_hash(current_pipeline);
}
return true;
};
for(auto command : command_buffer->commands) {
switch (command.type) {
case GFXCommandType::Invalid:
break;
case GFXCommandType::SetRenderPass:
{
current_clear_color = {};
current_clear_color.r = command.data.set_render_pass.clear_color.r;
current_clear_color.g = command.data.set_render_pass.clear_color.g;
current_clear_color.b = command.data.set_render_pass.clear_color.b;
current_clear_color.a = command.data.set_render_pass.clear_color.a;
current_framebuffer = (GFXWebGPUFramebuffer*)command.data.set_render_pass.framebuffer;
current_render_pass = (GFXWebGPURenderPass*)command.data.set_render_pass.render_pass;
current_viewport = {};
need_encoder(CurrentEncoder::Render, true);
}
break;
case GFXCommandType::EndRenderPass:
{
current_render_pass = nullptr;
if(current_encoder == CurrentEncoder::Render)
wgpuRenderPassEncoderEndPass(render_encoder);
}
case GFXCommandType::SetGraphicsPipeline:
{
need_encoder(CurrentEncoder::Render);
current_pipeline = (GFXWebGPUPipeline*)command.data.set_graphics_pipeline.pipeline;
if(current_pipeline != nullptr) {
wgpuRenderPassEncoderSetPipeline(render_encoder, current_pipeline->render_handle);
reset_bind_state();
last_bind_group_hash = 0;
}
}
break;
case GFXCommandType::SetComputePipeline:
{
need_encoder(CurrentEncoder::Compute);
current_pipeline = (GFXWebGPUPipeline*)command.data.set_compute_pipeline.pipeline;
if(current_pipeline != nullptr) {
wgpuComputePassEncoderSetPipeline(compute_encoder, current_pipeline->compute_handle);
reset_bind_state();
last_bind_group_hash = 0;
}
}
break;
case GFXCommandType::SetVertexBuffer:
{
need_encoder(CurrentEncoder::Render);
wgpuRenderPassEncoderSetVertexBuffer(render_encoder,
command.data.set_vertex_buffer.index,
((GFXWebGPUBuffer*)command.data.set_vertex_buffer.buffer)->handle,
command.data.set_vertex_buffer.offset,
((GFXWebGPUBuffer*)command.data.set_vertex_buffer.buffer)->size);
}
break;
case GFXCommandType::SetIndexBuffer:
{
need_encoder(CurrentEncoder::Render);
wgpuRenderPassEncoderSetIndexBuffer(render_encoder,
((GFXWebGPUBuffer*)command.data.set_index_buffer.buffer)->handle,
command.data.set_index_buffer.index_type == IndexType::UINT32 ? WGPUIndexFormat_Uint32 : WGPUIndexFormat_Uint16,
0,
((GFXWebGPUBuffer*)command.data.set_index_buffer.buffer)->size);
}
break;
case GFXCommandType::BindShaderBuffer:
{
BoundShaderBuffer bsb;
bsb.buffer = command.data.bind_shader_buffer.buffer;
bsb.offset = command.data.bind_shader_buffer.offset;
bsb.size = command.data.bind_shader_buffer.size;
boundShaderBuffers[command.data.bind_shader_buffer.index] = bsb;
}
break;
case GFXCommandType::BindTexture:
{
boundTextures[command.data.bind_texture.index] = command.data.bind_texture.texture;
}
break;
case GFXCommandType::BindSampler:
{
boundSamplers[command.data.bind_sampler.index] = command.data.bind_sampler.sampler;
}
break;
case GFXCommandType::Draw:
{
if(current_pipeline == nullptr)
continue;
if(try_bind_group()) {
wgpuRenderPassEncoderDraw(render_encoder,
command.data.draw.vertex_count,
command.data.draw.instance_count,
command.data.draw.vertex_offset,
command.data.draw.base_instance);
}
}
break;
case GFXCommandType::DrawIndexed:
{
if(current_pipeline == nullptr)
continue;
if(try_bind_group()) {
wgpuRenderPassEncoderDrawIndexed(render_encoder,
command.data.draw_indexed.index_count,
1,
command.data.draw_indexed.vertex_offset,
command.data.draw_indexed.vertex_offset,
command.data.draw_indexed.base_instance);
}
}
break;
case GFXCommandType::MemoryBarrier:
// not supported
break;
case GFXCommandType::CopyTexture:
{
// TODO: blit op
}
break;
case GFXCommandType::SetViewport:
{
need_encoder(CurrentEncoder::Render);
current_viewport = command.data.set_viewport.viewport;
wgpuRenderPassEncoderSetViewport(render_encoder,
current_viewport.x,
current_viewport.y,
current_viewport.width,
current_viewport.height,
current_viewport.min_depth,
current_viewport.max_depth);
}
break;
case GFXCommandType::SetScissor:
{
need_encoder(CurrentEncoder::Render);
wgpuRenderPassEncoderSetScissorRect(render_encoder,
command.data.set_scissor.rect.offset.x,
command.data.set_scissor.rect.offset.y,
command.data.set_scissor.rect.extent.width,
command.data.set_scissor.rect.extent.height);
}
break;
case GFXCommandType::GenerateMipmaps:
{
// TODO: not supported by webgpu?
}
break;
case GFXCommandType::SetDepthBias: {
need_encoder(CurrentEncoder::Render);
// TODO: not supported by webgpu?
}
break;
case GFXCommandType::PushGroup:
{
// TOOD: stub
}
break;
case GFXCommandType::PopGroup:
{
// TOOD: stub
}
break;
case GFXCommandType::InsertLabel:
{
// TOOD: stub
}
break;
case GFXCommandType::Dispatch:
{
need_encoder(CurrentEncoder::Compute);
if(try_bind_group()) {
wgpuComputePassEncoderDispatch(compute_encoder,
command.data.dispatch.group_count_x,
command.data.dispatch.group_count_y,
command.data.dispatch.group_count_z);
}
}
break;
default:
prism::log("Unhandled GFX command {}", utility::enum_to_string(command.type));
}
}
if(render_encoder != nullptr)
wgpuRenderPassEncoderEndPass(render_encoder);
if(compute_encoder != nullptr)
wgpuComputePassEncoderEndPass(compute_encoder);
WGPUCommandBuffer commands = wgpuCommandEncoderFinish(command_encoder, nullptr);
wgpuQueueSubmit(queue, 1, &commands);
wgpuCommandBufferRelease(commands);
wgpuTextureViewRelease(backBufView);
}
}
uint64_t GFXWebGPU::get_bind_group_hash(GFXWebGPUPipeline *pipeline) {
uint64_t hash = 0;
hash += (int64_t)pipeline;
int i = 0;
for (auto& buffer : boundShaderBuffers) {
if (buffer.buffer != nullptr) {
hash += (uint64_t)buffer.buffer * (i + 1);
}
}
i = 0;
for (auto& texture : boundTextures) {
if (texture != nullptr) {
hash += (uint64_t)texture * (i + 1);
}
}
return hash;
}
void GFXWebGPU::cache_bind_group_state(GFXWebGPUPipeline* pipeline, WGPUBindGroupLayout group_layout) {
uint64_t hash = get_bind_group_hash(pipeline);
std::vector<WGPUBindGroupEntry> group_entries;
for (auto [i, buffer] : utility::enumerate(boundShaderBuffers)) {
if (buffer.buffer != nullptr) {
auto wgpu_buffer = (GFXWebGPUBuffer*)buffer.buffer;
WGPUBindGroupEntry entry = {};
entry.buffer = wgpu_buffer->handle;
entry.size = buffer.size;
entry.offset = buffer.offset;
entry.binding = i;
group_entries.push_back(entry);
}
}
for (auto [i, texture] : utility::enumerate(boundTextures)) {
if (texture != nullptr) {
auto wgpu_texture = (GFXWebGPUTexture*) texture;
WGPUBindGroupEntry entry = {};
entry.textureView = wgpu_texture->view;
entry.sampler = wgpu_texture->sampler;
entry.binding = i;
group_entries.push_back(entry);
}
}
for (auto [i, sampler] : utility::enumerate(boundSamplers)) {
if (sampler != nullptr) {
auto wgpu_sampler = (GFXWebGPUSampler*) sampler;
WGPUBindGroupEntry entry = {};
entry.sampler = wgpu_sampler->handle;
entry.binding = i;
group_entries.push_back(entry);
}
}
WGPUBindGroupDescriptor group_descriptor = {};
group_descriptor.layout = group_layout;
group_descriptor.entryCount = group_entries.size();
group_descriptor.entries = group_entries.data();
pipeline->cached_bind_groups[hash] = wgpuDeviceCreateBindGroup(device, &group_descriptor);
}
void GFXWebGPU::reset_bind_state() {
for (auto& buffer : boundShaderBuffers)
buffer.buffer = nullptr;
for (auto& texture : boundTextures)
texture = nullptr;
for (auto& sampler : boundSamplers)
sampler = nullptr;
}

View file

@ -2,7 +2,9 @@
#include "gfx_framebuffer.hpp"
class GFXWebGPUTexture;
class GFXWebGPUFramebuffer : public GFXFramebuffer {
public:
std::vector<GFXWebGPUTexture*> attachments;
};

View file

@ -1,5 +1,7 @@
#pragma once
#include <map>
#include "gfx_pipeline.hpp"
class GFXWebGPUPipeline : public GFXPipeline {
@ -7,5 +9,7 @@ public:
WGPURenderPipeline render_handle = nullptr;
WGPUComputePipeline compute_handle = nullptr;
WGPUBindGroup bind_group = nullptr;
WGPUBindGroupLayout bind_group_layout = nullptr;
std::map<uint64_t, WGPUBindGroup> cached_bind_groups;
};

View file

@ -4,5 +4,5 @@
class GFXWebGPUSampler : public GFXSampler {
public:
WGPUSampler handle = nullptr;
};

View file

@ -5,4 +5,7 @@
class GFXWebGPUTexture : public GFXTexture {
public:
WGPUTexture handle = nullptr;
WGPUTextureFormat format = WGPUTextureFormat_Undefined;
WGPUTextureView view = nullptr;
WGPUSampler sampler = nullptr;
};

View file

@ -12,8 +12,6 @@
GFX* gfx_interface = nullptr;
EM_BOOL draw(double time, void *userData) {
printf("Draw\n");
engine->update(time);
engine->begin_frame(time);