Merge ../wgpu-rs into reunion

This commit is contained in:
Dzmitry Malyshau 2021-06-02 21:31:02 -04:00
commit 4fc478513a
100 changed files with 23141 additions and 10 deletions

2
.gitattributes vendored Normal file
View File

@ -0,0 +1,2 @@
*.mtl binary
*.obj binary

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: Issue or enhancement in wgpu-core
url: https://github.com/gfx-rs/wgpu/issues/new/choose
about: Issues with or enhancements for the core logic, validation, or the backends should go here.
- name: Question about wgpu
url: https://github.com/gfx-rs/wgpu-rs/discussions/new
about: Any questions about how to use wgpu should go here.

View File

@ -4,7 +4,4 @@ about: Strange things you want to tell us
title: '' title: ''
labels: question labels: question
assignees: '' assignees: ''
--- ---

View File

@ -34,13 +34,18 @@ jobs:
- name: Additional core features - name: Additional core features
run: cargo check --manifest-path wgpu-core/Cargo.toml --features trace --target ${{ env.TARGET }} run: cargo check --manifest-path wgpu-core/Cargo.toml --features trace --target ${{ env.TARGET }}
webgl_build: wasm:
name: Web Assembly name: Web Assembly
runs-on: ubuntu-18.04 runs-on: ubuntu-18.04
env:
RUSTFLAGS: --cfg=web_sys_unstable_apis
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- run: rustup target add wasm32-unknown-unknown - run: rustup target add wasm32-unknown-unknown
- run: cargo build --manifest-path wgpu-core/Cargo.toml --target wasm32-unknown-unknown - name: Check WebGPU
run: cargo check --all-targets --target=wasm32-unknown-unknown
- name: Check WebGL
run: cargo check --all-targets --target=wasm32-unknown-unknown --features webgl
build: build:
name: ${{ matrix.name }} name: ${{ matrix.name }}
@ -117,6 +122,20 @@ jobs:
- if: matrix.channel == 'nightly' - if: matrix.channel == 'nightly'
run: cargo test -- --nocapture run: cargo test -- --nocapture
docs:
runs-on: [ubuntu-18.04]
steps:
- uses: actions/checkout@v2
- name: Install latest nightly
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
continue-on-error: true
- name: cargo doc
run: cargo --version; cargo doc --no-deps
continue-on-error: true
lint: lint:
name: Clippy name: Clippy
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -132,3 +151,7 @@ jobs:
with: with:
command: clippy command: clippy
args: -- -D warnings args: -- -D warnings
- uses: actions-rs/cargo@v1
with:
command: fmt
args: -- --check

45
.github/workflows/docs.yml vendored Normal file
View File

@ -0,0 +1,45 @@
name: Documentation
on:
push:
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout the code
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Install latest nightly
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
continue-on-error: true
- name: Add EGL for OpenGL
run: |
sudo apt-get update -y -qq
sudo apt-get install -y -qq libegl1-mesa-dev
- name: Build the docs (nightly)
run: |
cargo +nightly doc --lib --all-features
- name: Build the docs (stable)
run: cargo +stable doc --lib --all-features
if: ${{ failure() }}
- name: Deploy the docs
uses: JamesIves/github-pages-deploy-action@releases/v3
with:
ACCESS_TOKEN: ${{ secrets.WEB_DEPLOY }}
FOLDER: target/doc
REPOSITORY_NAME: gfx-rs/wgpu-rs.github.io
BRANCH: master
TARGET_FOLDER: doc

47
.github/workflows/publish.yml vendored Normal file
View File

@ -0,0 +1,47 @@
name: Publish
on:
push:
branches:
- gecko
env:
RUSTFLAGS: --cfg=web_sys_unstable_apis
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout the code
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Install Rust WASM toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
target: wasm32-unknown-unknown
- name: Build the examples
run: cargo build --release --target wasm32-unknown-unknown --examples
- name: Install wasm-bindgen-cli
run: cargo install wasm-bindgen-cli
- name: Generate JS bindings for the examples
run: |
for i in target/wasm32-unknown-unknown/release/examples/*.wasm;
do
wasm-bindgen --no-typescript --out-dir target/generated --web "$i";
done
- name: Deploy the examples
uses: JamesIves/github-pages-deploy-action@releases/v3
with:
ACCESS_TOKEN: ${{ secrets.WEB_DEPLOY }}
FOLDER: target/generated
REPOSITORY_NAME: gfx-rs/wgpu-rs.github.io
BRANCH: master
TARGET_FOLDER: examples/wasm

15
.gitignore vendored
View File

@ -1,8 +1,17 @@
/target # Generated by Cargo
# will have compiled files and executables
/target/
# These are backup files generated by rustfmt
**/*.rs.bk **/*.rs.bk
#Cargo.lock
# Other
.fuse_hidden* .fuse_hidden*
.DS_Store .DS_Store
# IDE/Editor configuration files
.vscode .vscode
.vs
.idea .idea
# Output from capture example
wgpu/red.png

19
wgpu/CHANGELOG.md Normal file
View File

@ -0,0 +1,19 @@
# Change Log
### v0.8 (2021-04-29)
- See https://github.com/gfx-rs/wgpu/blob/v0.8/CHANGELOG.md#v08-2021-04-29
- Naga is the default shader conversion path on Metal, Vulkan, and OpenGL
- SPIRV-Cross is optionally enabled with "cross" feature
- All of the examples (except "texture-array") now use WGSL
### v0.7 (2021-01-31)
- See https://github.com/gfx-rs/wgpu/blob/v0.7/CHANGELOG.md#v07-2020-08-30
- Features:
- (beta) WGSL support
- better error messages
- API changes:
- new `ShaderModuleDescriptor`
- new `RenderEncoder`
### v0.6.2 (2020-11-24)
- don't panic in the staging belt if the channel is dropped

247
wgpu/Cargo.toml Normal file
View File

@ -0,0 +1,247 @@
[package]
name = "wgpu"
version = "0.8.0"
authors = ["wgpu developers"]
edition = "2018"
description = "Rusty WebGPU API wrapper"
homepage = "https://github.com/gfx-rs/wgpu-rs"
repository = "https://github.com/gfx-rs/wgpu-rs"
keywords = ["graphics"]
license = "MPL-2.0"
exclude = ["etc/**/*", "examples/**/*", "tests/**/*", "Cargo.lock", "target/**/*"]
[package.metadata.docs.rs]
all-features = true
[lib]
[features]
default = []
trace = ["serde", "wgc/trace"]
replay = ["serde", "wgc/replay"]
webgl = ["wgc"]
# Enable SPIRV-Cross
cross = ["wgc/cross"]
[target.'cfg(not(target_arch = "wasm32"))'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "eadaa1b7d8f585761e28445904fe619b180aca0d"
features = ["raw-window-handle"]
[target.'cfg(target_arch = "wasm32")'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "eadaa1b7d8f585761e28445904fe619b180aca0d"
features = ["raw-window-handle"]
optional = true
[dependencies.wgt]
package = "wgpu-types"
git = "https://github.com/gfx-rs/wgpu"
rev = "eadaa1b7d8f585761e28445904fe619b180aca0d"
[dependencies]
arrayvec = "0.5"
log = "0.4"
parking_lot = "0.11"
raw-window-handle = "0.3"
serde = { version = "1", features = ["derive"], optional = true }
smallvec = "1"
[dev-dependencies]
bytemuck = { version = "1.4", features = ["derive"] }
cgmath = "0.18"
ddsfile = "0.4"
log = "0.4"
noise = "0.7"
obj = "0.10"
png = "0.16"
rand = { version = "0.7.2", features = ["wasm-bindgen"] }
winit = { version = "0.24", features = ["web-sys"] }
[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies]
async-executor = "1.0"
pollster = "0.2"
env_logger = "0.8"
# used to test all the example shaders
[dev-dependencies.naga]
git = "https://github.com/gfx-rs/naga"
tag = "gfx-25"
features = ["wgsl-in"]
# used to generate SPIR-V for the Web target
[target.'cfg(target_arch = "wasm32")'.dependencies.naga]
git = "https://github.com/gfx-rs/naga"
tag = "gfx-25"
features = ["wgsl-in", "spv-out"]
[[example]]
name="hello-compute"
path="examples/hello-compute/main.rs"
test = true
[patch."https://github.com/gfx-rs/wgpu"]
#wgpu-types = { path = "../wgpu/wgpu-types" }
#wgpu-core = { path = "../wgpu/wgpu-core" }
[patch."https://github.com/gfx-rs/subscriber"]
#wgpu-subscriber = { version = "0.1", path = "../subscriber" }
[patch."https://github.com/gfx-rs/naga"]
#naga = { path = "../naga" }
[patch."https://github.com/zakarumych/gpu-descriptor"]
#gpu-descriptor = { path = "../gpu-descriptor/gpu-descriptor" }
[patch."https://github.com/zakarumych/gpu-alloc"]
#gpu-alloc = { path = "../gpu-alloc/gpu-alloc" }
[patch."https://github.com/gfx-rs/gfx"]
#gfx-hal = { path = "../gfx/src/hal" }
#gfx-backend-empty = { path = "../gfx/src/backend/empty" }
#gfx-backend-vulkan = { path = "../gfx/src/backend/vulkan" }
#gfx-backend-gl = { path = "../gfx/src/backend/gl" }
#gfx-backend-dx12 = { path = "../gfx/src/backend/dx12" }
#gfx-backend-dx11 = { path = "../gfx/src/backend/dx11" }
#gfx-backend-metal = { path = "../gfx/src/backend/metal" }
[patch.crates-io]
#web-sys = { path = "../wasm-bindgen/crates/web-sys" }
#js-sys = { path = "../wasm-bindgen/crates/js-sys" }
#wasm-bindgen = { path = "../wasm-bindgen" }
[target.'cfg(target_arch = "wasm32")'.dependencies]
wasm-bindgen = "0.2.73" # remember to change version in wiki as well
web-sys = { version = "=0.3.50", features = [
"Document",
"Navigator",
"Node",
"NodeList",
"Gpu",
"GpuAdapter",
"GpuAdapterFeatures",
"GpuAdapterLimits",
"GpuAddressMode",
"GpuBindGroup",
"GpuBindGroupDescriptor",
"GpuBindGroupEntry",
"GpuBindGroupLayout",
"GpuBindGroupLayoutDescriptor",
"GpuBindGroupLayoutEntry",
"GpuBlendComponent",
"GpuBlendFactor",
"GpuBlendOperation",
"GpuBlendState",
"GpuBuffer",
"GpuBufferBinding",
"GpuBufferBindingLayout",
"GpuBufferBindingType",
"GpuBufferDescriptor",
"GpuBufferUsage",
"GpuCanvasContext",
"GpuColorDict",
"GpuColorTargetState",
"GpuColorWrite",
"GpuCommandBuffer",
"GpuCommandBufferDescriptor",
"GpuCommandEncoder",
"GpuCommandEncoderDescriptor",
"GpuCompareFunction",
"GpuCompilationInfo",
"GpuCompilationMessage",
"GpuCompilationMessageType",
"GpuComputePassDescriptor",
"GpuComputePassEncoder",
"GpuComputePipeline",
"GpuComputePipelineDescriptor",
"GpuCullMode",
"GpuDepthStencilState",
"GpuDevice",
"GpuDeviceDescriptor",
"GpuDeviceLostInfo",
"GpuDeviceLostReason",
"GpuErrorFilter",
"GpuExtent3dDict",
"GpuFeatureName",
"GpuFilterMode",
"GpuFragmentState",
"GpuFrontFace",
"GpuImageCopyBuffer",
"GpuImageCopyImageBitmap",
"GpuImageCopyTexture",
"GpuImageDataLayout",
"GpuIndexFormat",
"GpuInputStepMode",
"GpuLoadOp",
"GpuMapMode",
"GpuMultisampleState",
"GpuObjectDescriptorBase",
"GpuOrigin2dDict",
"GpuOrigin3dDict",
"GpuOutOfMemoryError",
"GpuPipelineDescriptorBase",
"GpuPipelineLayout",
"GpuPipelineLayoutDescriptor",
"GpuPipelineStatisticName",
"GpuPowerPreference",
"GpuPrimitiveState",
"GpuPrimitiveTopology",
"GpuProgrammableStage",
"GpuQuerySet",
"GpuQuerySetDescriptor",
"GpuQueryType",
"GpuQueue",
"GpuRenderBundle",
"GpuRenderBundleDescriptor",
"GpuRenderBundleEncoder",
"GpuRenderBundleEncoderDescriptor",
"GpuRenderPassColorAttachment",
"GpuRenderPassDepthStencilAttachment",
"GpuRenderPassDescriptor",
"GpuRenderPassEncoder",
"GpuRenderPipeline",
"GpuRenderPipelineDescriptor",
"GpuRequestAdapterOptions",
"GpuSampler",
"GpuSamplerBindingLayout",
"GpuSamplerBindingType",
"GpuSamplerDescriptor",
"GpuShaderModule",
"GpuShaderModuleDescriptor",
"GpuShaderStage",
"GpuStencilFaceState",
"GpuStencilOperation",
"GpuStorageTextureAccess",
"GpuStorageTextureBindingLayout",
"GpuStoreOp",
"GpuSwapChain",
"GpuSwapChainDescriptor",
"GpuTexture",
"GpuTextureAspect",
"GpuTextureBindingLayout",
"GpuTextureDescriptor",
"GpuTextureDimension",
"GpuTextureFormat",
"GpuTextureSampleType",
"GpuTextureUsage",
"GpuTextureView",
"GpuTextureViewDescriptor",
"GpuTextureViewDimension",
"GpuUncapturedErrorEvent",
"GpuUncapturedErrorEventInit",
"GpuValidationError",
"GpuVertexAttribute",
"GpuVertexBufferLayout",
"GpuVertexFormat",
"GpuVertexState",
"HtmlCanvasElement",
"Window",
]}
js-sys = "0.3.50"
wasm-bindgen-futures = "0.4.23"
[target.'cfg(target_arch = "wasm32")'.dev-dependencies]
console_error_panic_hook = "0.1.6"
console_log = "0.1.2"

373
wgpu/LICENSE Normal file
View File

@ -0,0 +1,373 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

20
wgpu/Makefile Normal file
View File

@ -0,0 +1,20 @@
# This Makefile generates SPIR-V shaders from GLSL shaders in the examples.
shader_compiler = glslangValidator
# All input shaders.
glsls = $(wildcard examples/*/*.vert examples/*/*.frag examples/*/*.comp)
# All SPIR-V targets.
spirvs = $(addsuffix .spv,$(glsls))
.PHONY: default
default: $(spirvs)
# Rule for making a SPIR-V target.
$(spirvs): %.spv: %
$(shader_compiler) -V $< -o $@
.PHONY: clean
clean:
rm -f $(spirvs)

101
wgpu/README.md Normal file
View File

@ -0,0 +1,101 @@
<img align="right" width="25%" src="logo.png">
# wgpu-rs
[![Build Status](https://github.com/gfx-rs/wgpu-rs/workflows/CI/badge.svg?branch=master)](https://github.com/gfx-rs/wgpu-rs/actions)
[![Crates.io](https://img.shields.io/crates/v/wgpu.svg)](https://crates.io/crates/wgpu)
[![Docs.rs](https://docs.rs/wgpu/badge.svg)](https://docs.rs/wgpu)
[![Matrix](https://img.shields.io/badge/Dev_Matrix-%23wgpu%3Amatrix.org-blueviolet.svg)](https://matrix.to/#/#wgpu:matrix.org)
[![Matrix](https://img.shields.io/badge/User_Matrix-%23wgpu--users%3Amatrix.org-blueviolet.svg)](https://matrix.to/#/#wgpu-users:matrix.org)
wgpu-rs is an idiomatic Rust wrapper over [wgpu-core](https://github.com/gfx-rs/wgpu). It's designed to be suitable for general purpose graphics and computation needs of Rust community.
wgpu-rs can target both the natively supported backends and WASM directly.
See our [gallery](https://wgpu.rs/#showcase) and the [wiki page](https://github.com/gfx-rs/wgpu-rs/wiki/Applications-and-Libraries) for the list of libraries and applications using `wgpu-rs`.
## Usage
### How to Run Examples
All examples are located under the [examples](examples) directory.
These examples use the default syntax for running examples, as found in the [Cargo](https://doc.rust-lang.org/cargo/reference/manifest.html#examples) documentation. For example, to run the `cube` example:
```bash
cargo run --example cube
```
The `hello*` examples show bare-bones setup without any helper code. For `hello-compute`, pass 4 numbers separated by spaces as arguments:
```bash
cargo run --example hello-compute 1 2 3 4
```
The following environment variables can be used to configure how the framework examples run:
- `WGPU_BACKEND`
Options: `vulkan`, `metal`, `dx11`, `dx12`, `gl`, `webgpu`
If unset a default backend is chosen based on what is supported
by your system.
- `WGPU_POWER_PREF`
Options: `low`, `high`
If unset a low power adapter is preferred.
#### Run Examples on the Web (`wasm32-unknown-unknown`)
See [wiki article](https://github.com/gfx-rs/wgpu-rs/wiki/Running-on-the-Web-with-WebGPU-and-WebGL).
## Logging
`wgpu-core` uses `tracing` for logging and `wgpu-rs` uses `log` for logging.
### Simple Setup
If you just want log messages to show up and to use the chrome tracing infrastructure,
take a dependency on the `wgpu-subscriber` crate then call `initialize_default_subscriber`. It will
set up logging to stdout/stderr based on the `RUST_LOG` environment variable.
### Manual Conversion
`tracing` also has tools available to convert all `tracing` events into `log` events and vise versa.
#### `log` events -> `tracing` events
The `tracing_log` crate has a `log` logger to translate all events into `tracing` events. Call:
```rust
tracing_log::LogTracer::init().unwrap()
```
#### `tracing` events -> `log` events
The `tracing` crate has a `log` feature which will automatically use `log` if no subscriber is added:
```toml
tracing = { version = "0.1", features = ["log"] }
```
If you want events to be handled both by `tracing` and `log`, enable the `log-always` feature of `tracing`:
```toml
tracing = { version = "0.1", features = ["log-always"] }
```
## Development
If you need to test local fixes to gfx or other dependencies, the simplest way is to add a Cargo patch. For example, when working on DX12 backend on Windows, you can check out the latest release branch in the [gfx-hal repository](https://github.com/gfx-rs/gfx) (e.g. currently `hal-0.8`) and add this patch to the end of `Cargo.toml`:
```toml
[patch."https://github.com/gfx-rs/gfx"]
gfx-backend-dx12 = { path = "../gfx/src/backend/dx12" }
gfx-hal = { path = "../gfx/src/hal" }
```
If a version needs to be changed, you need to do `cargo update -p gfx-backend-dx12`.

7
wgpu/bors.toml Normal file
View File

@ -0,0 +1,7 @@
status = [
"build (macos-10.15)",
"build (ubuntu-18.04)",
"build (windows-2019)",
"wasm",
"docs",
]

56
wgpu/examples/README.md Normal file
View File

@ -0,0 +1,56 @@
## Structure
For the simplest examples without using any helping code (see `framework.rs` here), check out:
- `hello ` for printing adapter information
- `hello-triangle` for graphics and presentation
- `hello-compute` for pure computing
Notably, `capture` example shows rendering without a surface/window. It reads back the contents and saves them to a file.
All the examples use [WGSL](https://gpuweb.github.io/gpuweb/wgsl.html) shaders unless specified otherwise.
All framework-based examples render to the window.
## Feature matrix
| Feature | boids | bunnymark | cube | mipmap | msaa-line | shadow | skybox | texture-arrays | water | conservative-raster |
| ---------------------------- | ------ | --------- | ------ | ------ | --------- | ------ | ------ | -------------- | ------ | ------------------- |
| vertex attributes | :star: | | :star: | | :star: | :star: | :star: | :star: | :star: | |
| instancing | :star: | | | | | | | | | |
| lines and points | | | | | :star: | | | | | :star: |
| dynamic buffer offsets | | :star: | | | | :star: | | | | |
| implicit layout | | | | :star: | | | | | | |
| sampled color textures | :star: | :star: | :star: | :star: | | | :star: | :star: | :star: | :star: |
| storage textures | :star: | | | | | | | | | |
| binding array | | | | | | | | :star: | | |
| comparison samplers | | | | | | :star: | | | | |
| subresource views | | | | :star: | | :star: | | | | |
| cubemaps | | | | | | | :star: | | | |
| multisampling | | | | | :star: | | | | | |
| off-screen rendering | | | | | | :star: | | | :star: | :star: |
| stencil testing | | | | | | | | | | |
| depth testing | | | | | | :star: | :star: | | :star: | |
| depth biasing | | | | | | :star: | | | | |
| read-only depth | | | | | | | | | :star: | |
| blending | | :star: | :star: | | | | | | :star: | |
| render bundles | | | | | :star: | | | | :star: | |
| compute passes | :star: | | | | | | | | | |
| *optional extensions* | | | | | | | | :star: | | |
| - SPIR-V shaders | | | | | | | | :star: | | |
| - binding indexing | | | | | | | | :star: | | |
| - push constants | | | | | | | | :star: | | |
| - depth clamping | | | | | | :star: | | | | |
| - compressed textures | | | | | | | :star: | | | |
| - polygon mode | | | :star: | | | | | | | |
| - queries | | | | :star: | | | | | | |
| - conservative rasterization | | | | | | | | | | :star: |
| *integrations* | | | | | | | | | | |
| - staging belt | | | | | | | | | | |
| - typed arena | | | | | | | | | | |
| - obj loading | | | | | | | :star: | | | |
## Hacking
You can record an API trace any of the framework-based examples by starting them as:
```sh
mkdir -p trace && WGPU_TRACE=trace cargo run --features trace --example <example-name>
```

View File

@ -0,0 +1,13 @@
# boids
Flocking boids example with gpu compute update pass
## To Run
```
cargo run --example boids
```
## Screenshots
![Boids example](./screenshot.png)

View File

@ -0,0 +1,108 @@
struct Particle {
pos : vec2<f32>;
vel : vec2<f32>;
};
[[block]]
struct SimParams {
deltaT : f32;
rule1Distance : f32;
rule2Distance : f32;
rule3Distance : f32;
rule1Scale : f32;
rule2Scale : f32;
rule3Scale : f32;
};
[[block]]
struct Particles {
particles : [[stride(16)]] array<Particle>;
};
[[group(0), binding(0)]] var<uniform> params : SimParams;
[[group(0), binding(1)]] var<storage> particlesSrc : [[access(read)]] Particles;
[[group(0), binding(2)]] var<storage> particlesDst : [[access(read_write)]] Particles;
// https://github.com/austinEng/Project6-Vulkan-Flocking/blob/master/data/shaders/computeparticles/particle.comp
[[stage(compute), workgroup_size(64)]]
fn main([[builtin(global_invocation_id)]] global_invocation_id: vec3<u32>) {
let total = arrayLength(&particlesSrc.particles);
let index = global_invocation_id.x;
if (index >= total) {
return;
}
var vPos : vec2<f32> = particlesSrc.particles[index].pos;
var vVel : vec2<f32> = particlesSrc.particles[index].vel;
var cMass : vec2<f32> = vec2<f32>(0.0, 0.0);
var cVel : vec2<f32> = vec2<f32>(0.0, 0.0);
var colVel : vec2<f32> = vec2<f32>(0.0, 0.0);
var cMassCount : i32 = 0;
var cVelCount : i32 = 0;
var pos : vec2<f32>;
var vel : vec2<f32>;
var i : u32 = 0u;
loop {
if (i >= total) {
break;
}
if (i == index) {
continue;
}
pos = particlesSrc.particles[i].pos;
vel = particlesSrc.particles[i].vel;
if (distance(pos, vPos) < params.rule1Distance) {
cMass = cMass + pos;
cMassCount = cMassCount + 1;
}
if (distance(pos, vPos) < params.rule2Distance) {
colVel = colVel - (pos - vPos);
}
if (distance(pos, vPos) < params.rule3Distance) {
cVel = cVel + vel;
cVelCount = cVelCount + 1;
}
continuing {
i = i + 1u;
}
}
if (cMassCount > 0) {
cMass = cMass * (1.0 / f32(cMassCount)) - vPos;
}
if (cVelCount > 0) {
cVel = cVel * (1.0 / f32(cVelCount));
}
vVel = vVel + (cMass * params.rule1Scale) +
(colVel * params.rule2Scale) +
(cVel * params.rule3Scale);
// clamp velocity for a more pleasing simulation
vVel = normalize(vVel) * clamp(length(vVel), 0.0, 0.1);
// kinematic update
vPos = vPos + (vVel * params.deltaT);
// Wrap around boundary
if (vPos.x < -1.0) {
vPos.x = 1.0;
}
if (vPos.x > 1.0) {
vPos.x = -1.0;
}
if (vPos.y < -1.0) {
vPos.y = 1.0;
}
if (vPos.y > 1.0) {
vPos.y = -1.0;
}
// Write back
particlesDst.particles[index].pos = vPos;
particlesDst.particles[index].vel = vVel;
}

View File

@ -0,0 +1,18 @@
[[stage(vertex)]]
fn main(
[[location(0)]] particle_pos: vec2<f32>,
[[location(1)]] particle_vel: vec2<f32>,
[[location(2)]] position: vec2<f32>,
) -> [[builtin(position)]] vec4<f32> {
let angle = -atan2(particle_vel.x, particle_vel.y);
let pos = vec2<f32>(
position.x * cos(angle) - position.y * sin(angle),
position.x * sin(angle) + position.y * cos(angle)
);
return vec4<f32>(pos + particle_pos, 0.0, 1.0);
}
[[stage(fragment)]]
fn main() -> [[location(0)]] vec4<f32> {
return vec4<f32>(1.0, 1.0, 1.0, 1.0);
}

326
wgpu/examples/boids/main.rs Normal file
View File

@ -0,0 +1,326 @@
// Flocking boids example with gpu compute update pass
// adapted from https://github.com/austinEng/webgpu-samples/blob/master/src/examples/computeBoids.ts
use rand::distributions::{Distribution, Uniform};
use std::{borrow::Cow, mem};
use wgpu::util::DeviceExt;
#[path = "../framework.rs"]
mod framework;
// number of boid particles to simulate
const NUM_PARTICLES: u32 = 1500;
// number of single-particle calculations (invocations) in each gpu work group
const PARTICLES_PER_GROUP: u32 = 64;
/// Example struct holds references to wgpu resources and frame persistent data
struct Example {
particle_bind_groups: Vec<wgpu::BindGroup>,
particle_buffers: Vec<wgpu::Buffer>,
vertices_buffer: wgpu::Buffer,
compute_pipeline: wgpu::ComputePipeline,
render_pipeline: wgpu::RenderPipeline,
work_group_count: u32,
frame_num: usize,
}
impl framework::Example for Example {
/// constructs initial instance of Example struct
fn init(
sc_desc: &wgpu::SwapChainDescriptor,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
_queue: &wgpu::Queue,
) -> Self {
// load and compile the shader
let mut flags = wgpu::ShaderFlags::VALIDATION;
match adapter.get_info().backend {
wgt::Backend::Vulkan | wgt::Backend::Metal | wgt::Backend::Gl => {
flags |= wgpu::ShaderFlags::EXPERIMENTAL_TRANSLATION;
}
_ => {} //TODO
}
let compute_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("compute.wgsl"))),
flags,
});
let draw_shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("draw.wgsl"))),
flags,
});
// buffer for simulation parameters uniform
let sim_param_data = [
0.04f32, // deltaT
0.1, // rule1Distance
0.025, // rule2Distance
0.025, // rule3Distance
0.02, // rule1Scale
0.05, // rule2Scale
0.005, // rule3Scale
]
.to_vec();
let sim_param_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Simulation Parameter Buffer"),
contents: bytemuck::cast_slice(&sim_param_data),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
// create compute bind layout group and compute pipeline layout
let compute_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(
(sim_param_data.len() * mem::size_of::<f32>()) as _,
),
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new((NUM_PARTICLES * 16) as _),
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new((NUM_PARTICLES * 16) as _),
},
count: None,
},
],
label: None,
});
let compute_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("compute"),
bind_group_layouts: &[&compute_bind_group_layout],
push_constant_ranges: &[],
});
// create render pipeline with empty bind group layout
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("render"),
bind_group_layouts: &[],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: None,
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &draw_shader,
entry_point: "main",
buffers: &[
wgpu::VertexBufferLayout {
array_stride: 4 * 4,
step_mode: wgpu::InputStepMode::Instance,
attributes: &wgpu::vertex_attr_array![0 => Float32x2, 1 => Float32x2],
},
wgpu::VertexBufferLayout {
array_stride: 2 * 4,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &wgpu::vertex_attr_array![2 => Float32x2],
},
],
},
fragment: Some(wgpu::FragmentState {
module: &draw_shader,
entry_point: "main",
targets: &[sc_desc.format.into()],
}),
primitive: wgpu::PrimitiveState::default(),
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
});
// create compute pipeline
let compute_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("Compute pipeline"),
layout: Some(&compute_pipeline_layout),
module: &compute_shader,
entry_point: "main",
});
// buffer for the three 2d triangle vertices of each instance
let vertex_buffer_data = [-0.01f32, -0.02, 0.01, -0.02, 0.00, 0.02];
let vertices_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::bytes_of(&vertex_buffer_data),
usage: wgpu::BufferUsage::VERTEX | wgpu::BufferUsage::COPY_DST,
});
// buffer for all particles data of type [(posx,posy,velx,vely),...]
let mut initial_particle_data = vec![0.0f32; (4 * NUM_PARTICLES) as usize];
let mut rng = rand::thread_rng();
let unif = Uniform::new_inclusive(-1.0, 1.0);
for particle_instance_chunk in initial_particle_data.chunks_mut(4) {
particle_instance_chunk[0] = unif.sample(&mut rng); // posx
particle_instance_chunk[1] = unif.sample(&mut rng); // posy
particle_instance_chunk[2] = unif.sample(&mut rng) * 0.1; // velx
particle_instance_chunk[3] = unif.sample(&mut rng) * 0.1; // vely
}
// creates two buffers of particle data each of size NUM_PARTICLES
// the two buffers alternate as dst and src for each frame
let mut particle_buffers = Vec::<wgpu::Buffer>::new();
let mut particle_bind_groups = Vec::<wgpu::BindGroup>::new();
for i in 0..2 {
particle_buffers.push(
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("Particle Buffer {}", i)),
contents: bytemuck::cast_slice(&initial_particle_data),
usage: wgpu::BufferUsage::VERTEX
| wgpu::BufferUsage::STORAGE
| wgpu::BufferUsage::COPY_DST,
}),
);
}
// create two bind groups, one for each buffer as the src
// where the alternate buffer is used as the dst
for i in 0..2 {
particle_bind_groups.push(device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &compute_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: sim_param_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: particle_buffers[i].as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: particle_buffers[(i + 1) % 2].as_entire_binding(), // bind to opposite buffer
},
],
label: None,
}));
}
// calculates number of work groups from PARTICLES_PER_GROUP constant
let work_group_count =
((NUM_PARTICLES as f32) / (PARTICLES_PER_GROUP as f32)).ceil() as u32;
// returns Example struct and No encoder commands
Example {
particle_bind_groups,
particle_buffers,
vertices_buffer,
compute_pipeline,
render_pipeline,
work_group_count,
frame_num: 0,
}
}
/// update is called for any WindowEvent not handled by the framework
fn update(&mut self, _event: winit::event::WindowEvent) {
//empty
}
/// resize is called on WindowEvent::Resized events
fn resize(
&mut self,
_sc_desc: &wgpu::SwapChainDescriptor,
_device: &wgpu::Device,
_queue: &wgpu::Queue,
) {
//empty
}
/// render is called each frame, dispatching compute groups proportional
/// a TriangleList draw call for all NUM_PARTICLES at 3 vertices each
fn render(
&mut self,
frame: &wgpu::SwapChainTexture,
device: &wgpu::Device,
queue: &wgpu::Queue,
_spawner: &framework::Spawner,
) {
// create render pass descriptor and its color attachments
let color_attachments = [wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
},
}];
let render_pass_descriptor = wgpu::RenderPassDescriptor {
label: None,
color_attachments: &color_attachments,
depth_stencil_attachment: None,
};
// get command encoder
let mut command_encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
command_encoder.push_debug_group("compute boid movement");
{
// compute pass
let mut cpass =
command_encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: None });
cpass.set_pipeline(&self.compute_pipeline);
cpass.set_bind_group(0, &self.particle_bind_groups[self.frame_num % 2], &[]);
cpass.dispatch(self.work_group_count, 1, 1);
}
command_encoder.pop_debug_group();
command_encoder.push_debug_group("render boids");
{
// render pass
let mut rpass = command_encoder.begin_render_pass(&render_pass_descriptor);
rpass.set_pipeline(&self.render_pipeline);
// render dst particles
rpass.set_vertex_buffer(0, self.particle_buffers[(self.frame_num + 1) % 2].slice(..));
// the three instance-local vertices
rpass.set_vertex_buffer(1, self.vertices_buffer.slice(..));
rpass.draw(0..3, 0..NUM_PARTICLES);
}
command_encoder.pop_debug_group();
// update frame count
self.frame_num += 1;
// done
queue.submit(Some(command_encoder.finish()));
}
}
/// run example
fn main() {
framework::run::<Example>("boids");
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 158 KiB

View File

@ -0,0 +1,354 @@
use bytemuck::{Pod, Zeroable};
use std::{borrow::Cow, mem};
use wgpu::util::DeviceExt;
#[path = "../framework.rs"]
mod framework;
const MAX_BUNNIES: usize = 1 << 20;
const BUNNY_SIZE: f32 = 0.15 * 256.0;
const GRAVITY: f32 = -9.8 * 100.0;
const MAX_VELOCITY: f32 = 750.0;
#[repr(C)]
#[derive(Clone, Copy, Pod, Zeroable)]
struct Globals {
mvp: [[f32; 4]; 4],
size: [f32; 2],
pad: [f32; 2],
}
#[repr(C, align(256))]
#[derive(Clone, Copy, Zeroable)]
struct Locals {
position: [f32; 2],
velocity: [f32; 2],
color: u32,
_pad: u32,
}
/// Example struct holds references to wgpu resources and frame persistent data
struct Example {
global_group: wgpu::BindGroup,
local_group: wgpu::BindGroup,
pipeline: wgpu::RenderPipeline,
bunnies: Vec<Locals>,
local_buffer: wgpu::Buffer,
extent: [u32; 2],
}
impl framework::Example for Example {
fn init(
sc_desc: &wgpu::SwapChainDescriptor,
_adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self {
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("shader.wgsl"))),
flags: wgpu::ShaderFlags::all(),
});
let global_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(mem::size_of::<Globals>() as _),
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
filtering: true,
comparison: false,
},
count: None,
},
],
label: None,
});
let local_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: wgpu::BufferSize::new(mem::size_of::<Locals>() as _),
},
count: None,
}],
label: None,
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&global_bind_group_layout, &local_bind_group_layout],
push_constant_ranges: &[],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: None,
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING),
write_mask: wgpu::ColorWrite::default(),
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
..wgpu::PrimitiveState::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
});
let texture = {
let img_data = include_bytes!("../../logo.png");
let decoder = png::Decoder::new(std::io::Cursor::new(img_data));
let (info, mut reader) = decoder.read_info().unwrap();
let mut buf = vec![0; info.buffer_size()];
reader.next_frame(&mut buf).unwrap();
let size = wgpu::Extent3d {
width: info.width,
height: info.height,
depth_or_array_layers: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: None,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::COPY_DST | wgpu::TextureUsage::SAMPLED,
});
queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
&buf,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(info.width * 4),
rows_per_image: None,
},
size,
);
texture
};
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: None,
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let globals = Globals {
mvp: cgmath::ortho(
0.0,
sc_desc.width as f32,
0.0,
sc_desc.height as f32,
-1.0,
1.0,
)
.into(),
size: [BUNNY_SIZE; 2],
pad: [0.0; 2],
};
let global_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("global"),
contents: bytemuck::bytes_of(&globals),
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::UNIFORM,
});
let local_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("local"),
size: (MAX_BUNNIES as wgpu::BufferAddress) * wgpu::BIND_BUFFER_ALIGNMENT,
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::UNIFORM,
mapped_at_creation: false,
});
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let global_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &global_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: global_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&view),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: None,
});
let local_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &local_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer: &local_buffer,
offset: 0,
size: wgpu::BufferSize::new(mem::size_of::<Locals>() as _),
}),
}],
label: None,
});
Example {
pipeline,
global_group,
local_group,
bunnies: Vec::new(),
local_buffer,
extent: [sc_desc.width, sc_desc.height],
}
}
fn update(&mut self, event: winit::event::WindowEvent) {
if let winit::event::WindowEvent::KeyboardInput {
input:
winit::event::KeyboardInput {
virtual_keycode: Some(winit::event::VirtualKeyCode::Space),
state: winit::event::ElementState::Pressed,
..
},
..
} = event
{
let spawn_count = 64 + self.bunnies.len() / 2;
let color = rand::random::<u32>();
println!(
"Spawning {} bunnies, total at {}",
spawn_count,
self.bunnies.len() + spawn_count
);
for _ in 0..spawn_count {
let speed = rand::random::<f32>() * MAX_VELOCITY - (MAX_VELOCITY * 0.5);
self.bunnies.push(Locals {
position: [0.0, 0.5 * (self.extent[1] as f32)],
velocity: [speed, 0.0],
color,
_pad: 0,
});
}
}
}
fn resize(
&mut self,
_sc_desc: &wgpu::SwapChainDescriptor,
_device: &wgpu::Device,
_queue: &wgpu::Queue,
) {
//empty
}
fn render(
&mut self,
frame: &wgpu::SwapChainTexture,
device: &wgpu::Device,
queue: &wgpu::Queue,
_spawner: &framework::Spawner,
) {
let delta = 0.01;
for bunny in self.bunnies.iter_mut() {
bunny.position[0] += bunny.velocity[0] * delta;
bunny.position[1] += bunny.velocity[1] * delta;
bunny.velocity[1] += GRAVITY * delta;
if (bunny.velocity[0] > 0.0
&& bunny.position[0] + 0.5 * BUNNY_SIZE > self.extent[0] as f32)
|| (bunny.velocity[0] < 0.0 && bunny.position[0] - 0.5 * BUNNY_SIZE < 0.0)
{
bunny.velocity[0] *= -1.0;
}
if bunny.velocity[1] < 0.0 && bunny.position[1] < 0.5 * BUNNY_SIZE {
bunny.velocity[1] *= -1.0;
}
}
queue.write_buffer(&self.local_buffer, 0, unsafe {
std::slice::from_raw_parts(
self.bunnies.as_ptr() as *const u8,
self.bunnies.len() * wgpu::BIND_BUFFER_ALIGNMENT as usize,
)
});
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
{
let clear_color = wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
};
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(clear_color),
store: true,
},
}],
depth_stencil_attachment: None,
});
rpass.set_pipeline(&self.pipeline);
rpass.set_bind_group(0, &self.global_group, &[]);
for i in 0..self.bunnies.len() {
let offset = (i as wgpu::DynamicOffset)
* (wgpu::BIND_BUFFER_ALIGNMENT as wgpu::DynamicOffset);
rpass.set_bind_group(1, &self.local_group, &[offset]);
rpass.draw(0..4, 0..1);
}
}
queue.submit(Some(encoder.finish()));
}
}
fn main() {
framework::run::<Example>("bunnymark");
}

View File

@ -0,0 +1,43 @@
[[block]]
struct Globals {
mvp: mat4x4<f32>;
size: vec2<f32>;
};
[[block]]
struct Locals {
position: vec2<f32>;
velocity: vec2<f32>;
color: u32;
};
[[group(0), binding(0)]]
var<uniform> globals: Globals;
[[group(1), binding(0)]]
var<uniform> locals: Locals;
struct VertexOutput {
[[builtin(position)]] position: vec4<f32>;
[[location(0)]] tex_coords: vec2<f32>;
[[location(1)]] color: vec4<f32>;
};
[[stage(vertex)]]
fn vs_main([[builtin(vertex_index)]] vi: u32) -> VertexOutput {
let tc = vec2<f32>(f32(vi & 1u), 0.5 * f32(vi & 2u));
let offset = vec2<f32>(tc.x * globals.size.x, tc.y * globals.size.y);
let pos = globals.mvp * vec4<f32>(locals.position + offset, 0.0, 1.0);
let color = vec4<f32>((vec4<u32>(locals.color) >> vec4<u32>(0u, 8u, 16u, 24u)) & vec4<u32>(255u)) / 255.0;
return VertexOutput(pos, tc, color);
}
[[group(0), binding(1)]]
var texture: texture_2d<f32>;
[[group(0), binding(2)]]
var sampler: sampler;
[[stage(fragment)]]
fn fs_main(in: VertexOutput) -> [[location(0)]] vec4<f32> {
return in.color * textureSampleLevel(texture, sampler, in.tex_coords, 0.0);
}

View File

@ -0,0 +1,18 @@
# capture
This example shows how to capture an image by rendering it to a texture, copying the texture to
a buffer, and retrieving it from the buffer.
This could be used for "taking a screenshot," with the added benefit that this method doesn't
require a window to be created.
## To Run
```
cargo run --example capture
open examples/capture/red.png
```
## Screenshots
![Capture example](./screenshot.png)

View File

@ -0,0 +1,248 @@
use std::env;
/// This example shows how to capture an image by rendering it to a texture, copying the texture to
/// a buffer, and retrieving it from the buffer. This could be used for "taking a screenshot," with
/// the added benefit that this method doesn't require a window to be created.
use std::fs::File;
use std::io::Write;
use std::mem::size_of;
use wgpu::{Buffer, Device};
async fn run(png_output_path: &str) {
let args: Vec<_> = env::args().collect();
let (width, height) = match args.len() {
// 0 on wasm, 1 on desktop
0 | 1 => (100usize, 200usize),
3 => (args[1].parse().unwrap(), args[2].parse().unwrap()),
_ => {
println!("Incorrect number of arguments, possible usages:");
println!("* 0 arguments - uses default width and height of (100, 200)");
println!("* 2 arguments - uses specified width and height values");
return;
}
};
let (device, buffer, buffer_dimensions) = create_red_image_with_dimensions(width, height).await;
create_png(png_output_path, device, buffer, &buffer_dimensions).await;
}
async fn create_red_image_with_dimensions(
width: usize,
height: usize,
) -> (Device, Buffer, BufferDimensions) {
let adapter = wgpu::Instance::new(wgpu::BackendBit::PRIMARY)
.request_adapter(&wgpu::RequestAdapterOptions::default())
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
},
None,
)
.await
.unwrap();
// It is a WebGPU requirement that ImageCopyBuffer.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0
// So we calculate padded_bytes_per_row by rounding unpadded_bytes_per_row
// up to the next multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT.
// https://en.wikipedia.org/wiki/Data_structure_alignment#Computing_padding
let buffer_dimensions = BufferDimensions::new(width, height);
// The output buffer lets us retrieve the data as an array
let output_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: (buffer_dimensions.padded_bytes_per_row * buffer_dimensions.height) as u64,
usage: wgpu::BufferUsage::MAP_READ | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
let texture_extent = wgpu::Extent3d {
width: buffer_dimensions.width as u32,
height: buffer_dimensions.height as u32,
depth_or_array_layers: 1,
};
// The render pipeline renders data into this texture
let texture = device.create_texture(&wgpu::TextureDescriptor {
size: texture_extent,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::RENDER_ATTACHMENT | wgpu::TextureUsage::COPY_SRC,
label: None,
});
// Set the background to be red
let command_buffer = {
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &texture.create_view(&wgpu::TextureViewDescriptor::default()),
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::RED),
store: true,
},
}],
depth_stencil_attachment: None,
});
// Copy the data from the texture to the buffer
encoder.copy_texture_to_buffer(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
wgpu::ImageCopyBuffer {
buffer: &output_buffer,
layout: wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(
std::num::NonZeroU32::new(buffer_dimensions.padded_bytes_per_row as u32)
.unwrap(),
),
rows_per_image: None,
},
},
texture_extent,
);
encoder.finish()
};
queue.submit(Some(command_buffer));
(device, output_buffer, buffer_dimensions)
}
async fn create_png(
png_output_path: &str,
device: Device,
output_buffer: Buffer,
buffer_dimensions: &BufferDimensions,
) {
// Note that we're not calling `.await` here.
let buffer_slice = output_buffer.slice(..);
let buffer_future = buffer_slice.map_async(wgpu::MapMode::Read);
// Poll the device in a blocking manner so that our future resolves.
// In an actual application, `device.poll(...)` should
// be called in an event loop or on another thread.
device.poll(wgpu::Maintain::Wait);
// If a file system is available, write the buffer as a PNG
let has_file_system_available = cfg!(not(target_arch = "wasm32"));
if !has_file_system_available {
return;
}
if let Ok(()) = buffer_future.await {
let padded_buffer = buffer_slice.get_mapped_range();
let mut png_encoder = png::Encoder::new(
File::create(png_output_path).unwrap(),
buffer_dimensions.width as u32,
buffer_dimensions.height as u32,
);
png_encoder.set_depth(png::BitDepth::Eight);
png_encoder.set_color(png::ColorType::RGBA);
let mut png_writer = png_encoder
.write_header()
.unwrap()
.into_stream_writer_with_size(buffer_dimensions.unpadded_bytes_per_row);
// from the padded_buffer we write just the unpadded bytes into the image
for chunk in padded_buffer.chunks(buffer_dimensions.padded_bytes_per_row) {
png_writer
.write_all(&chunk[..buffer_dimensions.unpadded_bytes_per_row])
.unwrap();
}
png_writer.finish().unwrap();
// With the current interface, we have to make sure all mapped views are
// dropped before we unmap the buffer.
drop(padded_buffer);
output_buffer.unmap();
}
}
struct BufferDimensions {
width: usize,
height: usize,
unpadded_bytes_per_row: usize,
padded_bytes_per_row: usize,
}
impl BufferDimensions {
fn new(width: usize, height: usize) -> Self {
let bytes_per_pixel = size_of::<u32>();
let unpadded_bytes_per_row = width * bytes_per_pixel;
let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize;
let padded_bytes_per_row_padding = (align - unpadded_bytes_per_row % align) % align;
let padded_bytes_per_row = unpadded_bytes_per_row + padded_bytes_per_row_padding;
Self {
width,
height,
unpadded_bytes_per_row,
padded_bytes_per_row,
}
}
}
fn main() {
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
pollster::block_on(run("red.png"));
}
#[cfg(target_arch = "wasm32")]
{
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
console_log::init().expect("could not initialize logger");
wasm_bindgen_futures::spawn_local(run("red.png"));
}
}
#[cfg(test)]
mod tests {
use super::*;
use wgpu::BufferView;
#[test]
fn ensure_generated_data_matches_expected() {
pollster::block_on(assert_generated_data_matches_expected());
}
async fn assert_generated_data_matches_expected() {
let (device, output_buffer, dimensions) =
create_red_image_with_dimensions(100usize, 200usize).await;
let buffer_slice = output_buffer.slice(..);
let buffer_future = buffer_slice.map_async(wgpu::MapMode::Read);
device.poll(wgpu::Maintain::Wait);
buffer_future
.await
.expect("failed to map buffer slice for capture test");
let padded_buffer = buffer_slice.get_mapped_range();
let expected_buffer_size = dimensions.padded_bytes_per_row * dimensions.height;
assert_eq!(padded_buffer.len(), expected_buffer_size);
assert_that_content_is_all_red(&dimensions, padded_buffer);
}
fn assert_that_content_is_all_red(dimensions: &BufferDimensions, padded_buffer: BufferView) {
let red = [0xFFu8, 0, 0, 0xFFu8];
let single_rgba = 4;
padded_buffer
.chunks(dimensions.padded_bytes_per_row)
.map(|padded_buffer_row| &padded_buffer_row[..dimensions.unpadded_bytes_per_row])
.for_each(|unpadded_row| {
unpadded_row
.chunks(single_rgba)
.for_each(|chunk| assert_eq!(chunk, &red))
});
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 565 B

View File

@ -0,0 +1,20 @@
# conservative-raster
This example shows how to render with conservative rasterization (native extension with limited support).
When enabled, any pixel touched by a triangle primitive is rasterized.
This is useful for various advanced techniques, most prominently for implementing realtime voxelization.
The demonstration here is implemented by rendering a triangle to a low-resolution target and then upscaling it with nearest-neighbor filtering.
The outlines of the triangle are then rendered in the original solution, using the same vertex shader as the triangle.
Pixels only drawn with conservative rasterization enabled are depicted red.
## To Run
```
cargo run --example conservative-raster
```
## Screenshots
![Conservative-raster window](./screenshot.png)

View File

@ -0,0 +1,316 @@
#[path = "../framework.rs"]
mod framework;
use std::borrow::Cow;
const RENDER_TARGET_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8UnormSrgb;
struct Example {
low_res_target: wgpu::TextureView,
bind_group_upscale: wgpu::BindGroup,
pipeline_triangle_conservative: wgpu::RenderPipeline,
pipeline_triangle_regular: wgpu::RenderPipeline,
pipeline_upscale: wgpu::RenderPipeline,
pipeline_lines: Option<wgpu::RenderPipeline>,
bind_group_layout_upscale: wgpu::BindGroupLayout,
}
impl Example {
fn create_low_res_target(
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
bind_group_layout_upscale: &wgpu::BindGroupLayout,
) -> (wgpu::TextureView, wgpu::BindGroup) {
let texture_view = device
.create_texture(&wgpu::TextureDescriptor {
label: Some("Low Resolution Target"),
size: wgpu::Extent3d {
width: sc_desc.width / 16,
height: sc_desc.width / 16,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: RENDER_TARGET_FORMAT,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::RENDER_ATTACHMENT,
})
.create_view(&Default::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("Nearest Neighbor Sampler"),
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("upscale bind group"),
layout: &bind_group_layout_upscale,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
});
(texture_view, bind_group)
}
}
impl framework::Example for Example {
fn required_features() -> wgpu::Features {
wgpu::Features::CONSERVATIVE_RASTERIZATION
}
fn optional_features() -> wgpu::Features {
wgpu::Features::NON_FILL_POLYGON_MODE
}
fn init(
sc_desc: &wgpu::SwapChainDescriptor,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
_queue: &wgpu::Queue,
) -> Self {
let pipeline_layout_empty =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[],
push_constant_ranges: &[],
});
let shader_triangle_and_lines =
device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!(
"triangle_and_lines.wgsl"
))),
flags: wgpu::ShaderFlags::all(),
});
let pipeline_triangle_conservative =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Conservative Rasterization"),
layout: Some(&pipeline_layout_empty),
vertex: wgpu::VertexState {
module: &shader_triangle_and_lines,
entry_point: "vs_main",
buffers: &[],
},
fragment: Some(wgpu::FragmentState {
module: &shader_triangle_and_lines,
entry_point: "fs_main_red",
targets: &[RENDER_TARGET_FORMAT.into()],
}),
primitive: wgpu::PrimitiveState {
conservative: true,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
});
let pipeline_triangle_regular =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Regular Rasterization"),
layout: Some(&pipeline_layout_empty),
vertex: wgpu::VertexState {
module: &shader_triangle_and_lines,
entry_point: "vs_main",
buffers: &[],
},
fragment: Some(wgpu::FragmentState {
module: &shader_triangle_and_lines,
entry_point: "fs_main_blue",
targets: &[RENDER_TARGET_FORMAT.into()],
}),
primitive: wgpu::PrimitiveState::default(),
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
});
let pipeline_lines = if adapter
.features()
.contains(wgpu::Features::NON_FILL_POLYGON_MODE)
{
Some(
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Lines"),
layout: Some(&pipeline_layout_empty),
vertex: wgpu::VertexState {
module: &shader_triangle_and_lines,
entry_point: "vs_main",
buffers: &[],
},
fragment: Some(wgpu::FragmentState {
module: &shader_triangle_and_lines,
entry_point: "fs_main_white",
targets: &[sc_desc.format.into()],
}),
primitive: wgpu::PrimitiveState {
polygon_mode: wgpu::PolygonMode::Line,
topology: wgpu::PrimitiveTopology::LineStrip,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
}),
)
} else {
None
};
let (pipeline_upscale, bind_group_layout_upscale) = {
let bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("upscale bindgroup"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: false },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
filtering: false,
comparison: false,
},
count: None,
},
],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("upscale.wgsl"))),
flags: wgpu::ShaderFlags::all(),
});
(
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Upscale"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[sc_desc.format.into()],
}),
primitive: wgpu::PrimitiveState::default(),
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
}),
bind_group_layout,
)
};
let (low_res_target, bind_group_upscale) =
Self::create_low_res_target(sc_desc, device, &bind_group_layout_upscale);
Self {
low_res_target,
bind_group_upscale,
pipeline_triangle_conservative,
pipeline_triangle_regular,
pipeline_upscale,
pipeline_lines,
bind_group_layout_upscale,
}
}
fn resize(
&mut self,
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
_queue: &wgpu::Queue,
) {
let (low_res_target, bind_group_upscale) =
Self::create_low_res_target(sc_desc, device, &self.bind_group_layout_upscale);
self.low_res_target = low_res_target;
self.bind_group_upscale = bind_group_upscale;
}
fn update(&mut self, _event: winit::event::WindowEvent) {}
fn render(
&mut self,
frame: &wgpu::SwapChainTexture,
device: &wgpu::Device,
queue: &wgpu::Queue,
_spawner: &framework::Spawner,
) {
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("primary"),
});
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("low resolution"),
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &self.low_res_target,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
},
}],
depth_stencil_attachment: None,
});
rpass.set_pipeline(&self.pipeline_triangle_conservative);
rpass.draw(0..3, 0..1);
rpass.set_pipeline(&self.pipeline_triangle_regular);
rpass.draw(0..3, 0..1);
}
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("full resolution"),
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
},
}],
depth_stencil_attachment: None,
});
rpass.set_pipeline(&self.pipeline_upscale);
rpass.set_bind_group(0, &self.bind_group_upscale, &[]);
rpass.draw(0..3, 0..1);
if let Some(pipeline_lines) = &self.pipeline_lines {
rpass.set_pipeline(pipeline_lines);
rpass.draw(0..4, 0..1);
}
}
queue.submit(Some(encoder.finish()));
}
}
fn main() {
framework::run::<Example>("conservative-raster");
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

@ -0,0 +1,22 @@
[[stage(vertex)]]
fn vs_main([[builtin(vertex_index)]] vertex_index: u32) -> [[builtin(position)]] vec4<f32> {
let i: i32 = i32(vertex_index % 3u);
let x: f32 = f32(i - 1) * 0.75;
let y: f32 = f32((i & 1) * 2 - 1) * 0.75 + x * 0.2 + 0.1;
return vec4<f32>(x, y, 0.0, 1.0);
}
[[stage(fragment)]]
fn fs_main_red() -> [[location(0)]] vec4<f32> {
return vec4<f32>(1.0, 0.0, 0.0, 1.0);
}
[[stage(fragment)]]
fn fs_main_blue() -> [[location(0)]] vec4<f32> {
return vec4<f32>(0.13, 0.31, 0.85, 1.0); // cornflower blue in linear space
}
[[stage(fragment)]]
fn fs_main_white() -> [[location(0)]] vec4<f32> {
return vec4<f32>(1.0, 1.0, 1.0, 1.0);
}

View File

@ -0,0 +1,24 @@
struct VertexOutput {
[[builtin(position)]] position: vec4<f32>;
[[location(0)]] tex_coords: vec2<f32>;
};
[[stage(vertex)]]
fn vs_main([[builtin(vertex_index)]] vertex_index: u32) -> VertexOutput {
let x: f32 = f32(i32(vertex_index & 1u) << 2u) - 1.0;
let y: f32 = f32(i32(vertex_index & 2u) << 1u) - 1.0;
var output: VertexOutput;
output.position = vec4<f32>(x, -y, 0.0, 1.0);
output.tex_coords = vec2<f32>(x + 1.0, y + 1.0) * 0.5;
return output;
}
[[group(0), binding(0)]]
var r_color: texture_2d<f32>;
[[group(0), binding(1)]]
var r_sampler: sampler;
[[stage(fragment)]]
fn fs_main(in: VertexOutput) -> [[location(0)]] vec4<f32> {
return textureSample(r_color, r_sampler, in.tex_coords);
}

View File

@ -0,0 +1,13 @@
# cube
This example renders a textured cube.
## To Run
```
cargo run --example cube
```
## Screenshots
![Cube example](./screenshot.png)

393
wgpu/examples/cube/main.rs Normal file
View File

@ -0,0 +1,393 @@
#[path = "../framework.rs"]
mod framework;
use bytemuck::{Pod, Zeroable};
use std::{borrow::Cow, mem};
use wgpu::util::DeviceExt;
#[repr(C)]
#[derive(Clone, Copy, Pod, Zeroable)]
struct Vertex {
_pos: [f32; 4],
_tex_coord: [f32; 2],
}
fn vertex(pos: [i8; 3], tc: [i8; 2]) -> Vertex {
Vertex {
_pos: [pos[0] as f32, pos[1] as f32, pos[2] as f32, 1.0],
_tex_coord: [tc[0] as f32, tc[1] as f32],
}
}
fn create_vertices() -> (Vec<Vertex>, Vec<u16>) {
let vertex_data = [
// top (0, 0, 1)
vertex([-1, -1, 1], [0, 0]),
vertex([1, -1, 1], [1, 0]),
vertex([1, 1, 1], [1, 1]),
vertex([-1, 1, 1], [0, 1]),
// bottom (0, 0, -1)
vertex([-1, 1, -1], [1, 0]),
vertex([1, 1, -1], [0, 0]),
vertex([1, -1, -1], [0, 1]),
vertex([-1, -1, -1], [1, 1]),
// right (1, 0, 0)
vertex([1, -1, -1], [0, 0]),
vertex([1, 1, -1], [1, 0]),
vertex([1, 1, 1], [1, 1]),
vertex([1, -1, 1], [0, 1]),
// left (-1, 0, 0)
vertex([-1, -1, 1], [1, 0]),
vertex([-1, 1, 1], [0, 0]),
vertex([-1, 1, -1], [0, 1]),
vertex([-1, -1, -1], [1, 1]),
// front (0, 1, 0)
vertex([1, 1, -1], [1, 0]),
vertex([-1, 1, -1], [0, 0]),
vertex([-1, 1, 1], [0, 1]),
vertex([1, 1, 1], [1, 1]),
// back (0, -1, 0)
vertex([1, -1, 1], [0, 0]),
vertex([-1, -1, 1], [1, 0]),
vertex([-1, -1, -1], [1, 1]),
vertex([1, -1, -1], [0, 1]),
];
let index_data: &[u16] = &[
0, 1, 2, 2, 3, 0, // top
4, 5, 6, 6, 7, 4, // bottom
8, 9, 10, 10, 11, 8, // right
12, 13, 14, 14, 15, 12, // left
16, 17, 18, 18, 19, 16, // front
20, 21, 22, 22, 23, 20, // back
];
(vertex_data.to_vec(), index_data.to_vec())
}
fn create_texels(size: usize) -> Vec<u8> {
(0..size * size)
.map(|id| {
// get high five for recognizing this ;)
let cx = 3.0 * (id % size) as f32 / (size - 1) as f32 - 2.0;
let cy = 2.0 * (id / size) as f32 / (size - 1) as f32 - 1.0;
let (mut x, mut y, mut count) = (cx, cy, 0);
while count < 0xFF && x * x + y * y < 4.0 {
let old_x = x;
x = x * x - y * y + cx;
y = 2.0 * old_x * y + cy;
count += 1;
}
count
})
.collect()
}
struct Example {
vertex_buf: wgpu::Buffer,
index_buf: wgpu::Buffer,
index_count: usize,
bind_group: wgpu::BindGroup,
uniform_buf: wgpu::Buffer,
pipeline: wgpu::RenderPipeline,
pipeline_wire: Option<wgpu::RenderPipeline>,
}
impl Example {
fn generate_matrix(aspect_ratio: f32) -> cgmath::Matrix4<f32> {
let mx_projection = cgmath::perspective(cgmath::Deg(45f32), aspect_ratio, 1.0, 10.0);
let mx_view = cgmath::Matrix4::look_at_rh(
cgmath::Point3::new(1.5f32, -5.0, 3.0),
cgmath::Point3::new(0f32, 0.0, 0.0),
cgmath::Vector3::unit_z(),
);
let mx_correction = framework::OPENGL_TO_WGPU_MATRIX;
mx_correction * mx_projection * mx_view
}
}
impl framework::Example for Example {
fn optional_features() -> wgt::Features {
wgt::Features::NON_FILL_POLYGON_MODE
}
fn init(
sc_desc: &wgpu::SwapChainDescriptor,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self {
// Create the vertex and index buffers
let vertex_size = mem::size_of::<Vertex>();
let (vertex_data, index_data) = create_vertices();
let vertex_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(&vertex_data),
usage: wgpu::BufferUsage::VERTEX,
});
let index_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(&index_data),
usage: wgpu::BufferUsage::INDEX,
});
// Create pipeline layout
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(64),
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Uint,
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
// Create the texture
let size = 256u32;
let texels = create_texels(size as usize);
let texture_extent = wgpu::Extent3d {
width: size,
height: size,
depth_or_array_layers: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texture_extent,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::R8Uint,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
});
let texture_view = texture.create_view(&wgpu::TextureViewDescriptor::default());
queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
&texels,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(std::num::NonZeroU32::new(size).unwrap()),
rows_per_image: None,
},
texture_extent,
);
// Create other resources
let mx_total = Self::generate_matrix(sc_desc.width as f32 / sc_desc.height as f32);
let mx_ref: &[f32; 16] = mx_total.as_ref();
let uniform_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(mx_ref),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
// Create bind group
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buf.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&texture_view),
},
],
label: None,
});
let mut flags = wgpu::ShaderFlags::VALIDATION;
match adapter.get_info().backend {
wgpu::Backend::Metal | wgpu::Backend::Vulkan | wgpu::Backend::Gl => {
flags |= wgpu::ShaderFlags::EXPERIMENTAL_TRANSLATION
}
_ => (), //TODO
}
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("shader.wgsl"))),
flags,
});
let vertex_buffers = [wgpu::VertexBufferLayout {
array_stride: vertex_size as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
format: wgpu::VertexFormat::Float32x4,
offset: 0,
shader_location: 0,
},
wgpu::VertexAttribute {
format: wgpu::VertexFormat::Float32x2,
offset: 4 * 4,
shader_location: 1,
},
],
}];
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: None,
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &vertex_buffers,
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[sc_desc.format.into()],
}),
primitive: wgpu::PrimitiveState {
cull_mode: Some(wgpu::Face::Back),
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
});
let pipeline_wire = if device
.features()
.contains(wgt::Features::NON_FILL_POLYGON_MODE)
{
let pipeline_wire = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: None,
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &vertex_buffers,
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_wire",
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
operation: wgpu::BlendOperation::Add,
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
},
alpha: wgpu::BlendComponent::REPLACE,
}),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Line,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
});
Some(pipeline_wire)
} else {
None
};
// Done
Example {
vertex_buf,
index_buf,
index_count: index_data.len(),
bind_group,
uniform_buf,
pipeline,
pipeline_wire,
}
}
fn update(&mut self, _event: winit::event::WindowEvent) {
//empty
}
fn resize(
&mut self,
sc_desc: &wgpu::SwapChainDescriptor,
_device: &wgpu::Device,
queue: &wgpu::Queue,
) {
let mx_total = Self::generate_matrix(sc_desc.width as f32 / sc_desc.height as f32);
let mx_ref: &[f32; 16] = mx_total.as_ref();
queue.write_buffer(&self.uniform_buf, 0, bytemuck::cast_slice(mx_ref));
}
fn render(
&mut self,
frame: &wgpu::SwapChainTexture,
device: &wgpu::Device,
queue: &wgpu::Queue,
_spawner: &framework::Spawner,
) {
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
rpass.push_debug_group("Prepare data for draw.");
rpass.set_pipeline(&self.pipeline);
rpass.set_bind_group(0, &self.bind_group, &[]);
rpass.set_index_buffer(self.index_buf.slice(..), wgpu::IndexFormat::Uint16);
rpass.set_vertex_buffer(0, self.vertex_buf.slice(..));
rpass.pop_debug_group();
rpass.insert_debug_marker("Draw!");
rpass.draw_indexed(0..self.index_count as u32, 0, 0..1);
if let Some(ref pipe) = self.pipeline_wire {
rpass.set_pipeline(pipe);
rpass.draw_indexed(0..self.index_count as u32, 0, 0..1);
}
}
queue.submit(Some(encoder.finish()));
}
}
fn main() {
framework::run::<Example>("cube");
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 393 KiB

View File

@ -0,0 +1,37 @@
struct VertexOutput {
[[location(0)]] tex_coord: vec2<f32>;
[[builtin(position)]] position: vec4<f32>;
};
[[block]]
struct Locals {
transform: mat4x4<f32>;
};
[[group(0), binding(0)]]
var r_locals: Locals;
[[stage(vertex)]]
fn vs_main(
[[location(0)]] position: vec4<f32>,
[[location(1)]] tex_coord: vec2<f32>,
) -> VertexOutput {
var out: VertexOutput;
out.tex_coord = tex_coord;
out.position = r_locals.transform * position;
return out;
}
[[group(0), binding(1)]]
var r_color: texture_2d<u32>;
[[stage(fragment)]]
fn fs_main(in: VertexOutput) -> [[location(0)]] vec4<f32> {
let tex = textureLoad(r_color, vec2<i32>(in.tex_coord * 256.0), 0);
let v = f32(tex.x) / 255.0;
return vec4<f32>(1.0 - (v * 5.0), 1.0 - (v * 15.0), 1.0 - (v * 50.0), 1.0);
}
[[stage(fragment)]]
fn fs_wire() -> [[location(0)]] vec4<f32> {
return vec4<f32>(0.0, 0.5, 0.0, 0.5);
}

370
wgpu/examples/framework.rs Normal file
View File

@ -0,0 +1,370 @@
use std::future::Future;
#[cfg(not(target_arch = "wasm32"))]
use std::time::{Duration, Instant};
use winit::{
event::{self, WindowEvent},
event_loop::{ControlFlow, EventLoop},
};
#[rustfmt::skip]
#[allow(unused)]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
#[allow(dead_code)]
pub fn cast_slice<T>(data: &[T]) -> &[u8] {
use std::{mem::size_of, slice::from_raw_parts};
unsafe { from_raw_parts(data.as_ptr() as *const u8, data.len() * size_of::<T>()) }
}
#[allow(dead_code)]
pub enum ShaderStage {
Vertex,
Fragment,
Compute,
}
pub trait Example: 'static + Sized {
fn optional_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_limits() -> wgpu::Limits {
wgpu::Limits::default()
}
fn init(
sc_desc: &wgpu::SwapChainDescriptor,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self;
fn resize(
&mut self,
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
queue: &wgpu::Queue,
);
fn update(&mut self, event: WindowEvent);
fn render(
&mut self,
frame: &wgpu::SwapChainTexture,
device: &wgpu::Device,
queue: &wgpu::Queue,
spawner: &Spawner,
);
}
struct Setup {
window: winit::window::Window,
event_loop: EventLoop<()>,
instance: wgpu::Instance,
size: winit::dpi::PhysicalSize<u32>,
surface: wgpu::Surface,
adapter: wgpu::Adapter,
device: wgpu::Device,
queue: wgpu::Queue,
}
async fn setup<E: Example>(title: &str) -> Setup {
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
};
let event_loop = EventLoop::new();
let mut builder = winit::window::WindowBuilder::new();
builder = builder.with_title(title);
#[cfg(windows_OFF)] // TODO
{
use winit::platform::windows::WindowBuilderExtWindows;
builder = builder.with_no_redirection_bitmap(true);
}
let window = builder.build(&event_loop).unwrap();
#[cfg(target_arch = "wasm32")]
{
use winit::platform::web::WindowExtWebSys;
console_log::init().expect("could not initialize logger");
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
// On wasm, append the canvas to the document body
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| doc.body())
.and_then(|body| {
body.append_child(&web_sys::Element::from(window.canvas()))
.ok()
})
.expect("couldn't append canvas to document body");
}
log::info!("Initializing the surface...");
let backend = if let Ok(backend) = std::env::var("WGPU_BACKEND") {
match backend.to_lowercase().as_str() {
"vulkan" => wgpu::BackendBit::VULKAN,
"metal" => wgpu::BackendBit::METAL,
"dx12" => wgpu::BackendBit::DX12,
"dx11" => wgpu::BackendBit::DX11,
"gl" => wgpu::BackendBit::GL,
"webgpu" => wgpu::BackendBit::BROWSER_WEBGPU,
other => panic!("Unknown backend: {}", other),
}
} else {
wgpu::BackendBit::PRIMARY
};
let power_preference = if let Ok(power_preference) = std::env::var("WGPU_POWER_PREF") {
match power_preference.to_lowercase().as_str() {
"low" => wgpu::PowerPreference::LowPower,
"high" => wgpu::PowerPreference::HighPerformance,
other => panic!("Unknown power preference: {}", other),
}
} else {
wgpu::PowerPreference::default()
};
let instance = wgpu::Instance::new(backend);
let (size, surface) = unsafe {
let size = window.inner_size();
let surface = instance.create_surface(&window);
(size, surface)
};
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference,
compatible_surface: Some(&surface),
})
.await
.expect("No suitable GPU adapters found on the system!");
#[cfg(not(target_arch = "wasm32"))]
{
let adapter_info = adapter.get_info();
println!("Using {} ({:?})", adapter_info.name, adapter_info.backend);
}
let optional_features = E::optional_features();
let required_features = E::required_features();
let adapter_features = adapter.features();
assert!(
adapter_features.contains(required_features),
"Adapter does not support required features for this example: {:?}",
required_features - adapter_features
);
let needed_limits = E::required_limits();
let trace_dir = std::env::var("WGPU_TRACE");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: (optional_features & adapter_features) | required_features,
limits: needed_limits,
},
trace_dir.ok().as_ref().map(std::path::Path::new),
)
.await
.expect("Unable to find a suitable GPU adapter!");
Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
}
}
fn start<E: Example>(
Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
}: Setup,
) {
let spawner = Spawner::new();
let mut sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format: adapter.get_swap_chain_preferred_format(&surface).unwrap(),
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Mailbox,
};
let mut swap_chain = device.create_swap_chain(&surface, &sc_desc);
log::info!("Initializing the example...");
let mut example = E::init(&sc_desc, &adapter, &device, &queue);
#[cfg(not(target_arch = "wasm32"))]
let mut last_update_inst = Instant::now();
#[cfg(not(target_arch = "wasm32"))]
let mut last_frame_inst = Instant::now();
#[cfg(not(target_arch = "wasm32"))]
let (mut frame_count, mut accum_time) = (0, 0.0);
log::info!("Entering render loop...");
event_loop.run(move |event, _, control_flow| {
let _ = (&instance, &adapter); // force ownership by the closure
*control_flow = if cfg!(feature = "metal-auto-capture") {
ControlFlow::Exit
} else {
ControlFlow::Poll
};
match event {
event::Event::RedrawEventsCleared => {
#[cfg(not(target_arch = "wasm32"))]
{
// Clamp to some max framerate to avoid busy-looping too much
// (we might be in wgpu::PresentMode::Mailbox, thus discarding superfluous frames)
//
// winit has window.current_monitor().video_modes() but that is a list of all full screen video modes.
// So without extra dependencies it's a bit tricky to get the max refresh rate we can run the window on.
// Therefore we just go with 60fps - sorry 120hz+ folks!
let target_frametime = Duration::from_secs_f64(1.0 / 60.0);
let time_since_last_frame = last_update_inst.elapsed();
if time_since_last_frame >= target_frametime {
window.request_redraw();
last_update_inst = Instant::now();
} else {
*control_flow = ControlFlow::WaitUntil(
Instant::now() + target_frametime - time_since_last_frame,
);
}
spawner.run_until_stalled();
}
#[cfg(target_arch = "wasm32")]
window.request_redraw();
}
event::Event::WindowEvent {
event: WindowEvent::Resized(size),
..
} => {
log::info!("Resizing to {:?}", size);
sc_desc.width = size.width.max(1);
sc_desc.height = size.height.max(1);
example.resize(&sc_desc, &device, &queue);
swap_chain = device.create_swap_chain(&surface, &sc_desc);
}
event::Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::Escape),
state: event::ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
_ => {
example.update(event);
}
},
event::Event::RedrawRequested(_) => {
#[cfg(not(target_arch = "wasm32"))]
{
accum_time += last_frame_inst.elapsed().as_secs_f32();
last_frame_inst = Instant::now();
frame_count += 1;
if frame_count == 100 {
println!(
"Avg frame time {}ms",
accum_time * 1000.0 / frame_count as f32
);
accum_time = 0.0;
frame_count = 0;
}
}
let frame = match swap_chain.get_current_frame() {
Ok(frame) => frame,
Err(_) => {
swap_chain = device.create_swap_chain(&surface, &sc_desc);
swap_chain
.get_current_frame()
.expect("Failed to acquire next swap chain texture!")
}
};
example.render(&frame.output, &device, &queue, &spawner);
}
_ => {}
}
});
}
#[cfg(not(target_arch = "wasm32"))]
pub struct Spawner<'a> {
executor: async_executor::LocalExecutor<'a>,
}
#[cfg(not(target_arch = "wasm32"))]
impl<'a> Spawner<'a> {
fn new() -> Self {
Self {
executor: async_executor::LocalExecutor::new(),
}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'a) {
self.executor.spawn(future).detach();
}
fn run_until_stalled(&self) {
while self.executor.try_tick() {}
}
}
#[cfg(target_arch = "wasm32")]
pub struct Spawner {}
#[cfg(target_arch = "wasm32")]
impl Spawner {
fn new() -> Self {
Self {}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'static) {
wasm_bindgen_futures::spawn_local(future);
}
}
#[cfg(not(target_arch = "wasm32"))]
pub fn run<E: Example>(title: &str) {
let setup = pollster::block_on(setup::<E>(title));
start::<E>(setup);
}
#[cfg(target_arch = "wasm32")]
pub fn run<E: Example>(title: &str) {
let title = title.to_owned();
wasm_bindgen_futures::spawn_local(async move {
let setup = setup::<E>(&title).await;
start::<E>(setup);
});
}
// This allows treating the framework as a standalone example,
// thus avoiding listing the example names in `Cargo.toml`.
#[allow(dead_code)]
fn main() {}

View File

@ -0,0 +1,22 @@
# hello-compute
Runs a compute shader to determine the number of iterations of the rules from
Collatz Conjecture
- If n is even, n = n/2
- If n is odd, n = 3n+1
that it will take to finish and reach the number `1`.
## To Run
```
# Pass in any 4 numbers as arguments
RUST_LOG=hello_compute cargo run --example hello-compute 1 4 3 295
```
## Example Output
```
[2020-04-25T11:15:33Z INFO hello_compute] Times: [0, 2, 7, 55]
```

View File

@ -0,0 +1,243 @@
use std::{borrow::Cow, convert::TryInto, str::FromStr};
use wgpu::util::DeviceExt;
// Indicates a u32 overflow in an intermediate Collatz value
const OVERFLOW: u32 = 0xffffffff;
async fn run() {
let numbers = if std::env::args().len() <= 1 {
let default = vec![1, 2, 3, 4];
println!("No numbers were provided, defaulting to {:?}", default);
default
} else {
std::env::args()
.skip(1)
.map(|s| u32::from_str(&s).expect("You must pass a list of positive integers!"))
.collect()
};
let steps = execute_gpu(numbers).await;
let disp_steps: Vec<String> = steps
.iter()
.map(|&n| match n {
OVERFLOW => "OVERFLOW".to_string(),
_ => n.to_string(),
})
.collect();
println!("Steps: [{}]", disp_steps.join(", "));
#[cfg(target_arch = "wasm32")]
log::info!("Steps: [{}]", disp_steps.join(", "));
}
async fn execute_gpu(numbers: Vec<u32>) -> Vec<u32> {
// Instantiates instance of WebGPU
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
// `request_adapter` instantiates the general connection to the GPU
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions::default())
.await
.unwrap();
// `request_device` instantiates the feature specific connection to the GPU, defining some parameters,
// `features` being the available features.
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
},
None,
)
.await
.unwrap();
// Loads the shader from the SPIR-V file.arrayvec
let mut flags = wgpu::ShaderFlags::VALIDATION;
match adapter.get_info().backend {
wgpu::Backend::Vulkan | wgpu::Backend::Metal | wgpu::Backend::Gl => {
flags |= wgpu::ShaderFlags::EXPERIMENTAL_TRANSLATION;
}
_ => {}
}
let cs_module = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("shader.wgsl"))),
flags,
});
// Gets the size in bytes of the buffer.
let slice_size = numbers.len() * std::mem::size_of::<u32>();
let size = slice_size as wgpu::BufferAddress;
// Instantiates buffer without data.
// `usage` of buffer specifies how it can be used:
// `BufferUsage::MAP_READ` allows it to be read (outside the shader).
// `BufferUsage::COPY_DST` allows it to be the destination of the copy.
let staging_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size,
usage: wgpu::BufferUsage::MAP_READ | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
// Instantiates buffer with data (`numbers`).
// Usage allowing the buffer to be:
// A storage buffer (can be bound within a bind group and thus available to a shader).
// The destination of a copy.
// The source of a copy.
let storage_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Storage Buffer"),
contents: bytemuck::cast_slice(&numbers),
usage: wgpu::BufferUsage::STORAGE
| wgpu::BufferUsage::COPY_DST
| wgpu::BufferUsage::COPY_SRC,
});
// A bind group defines how buffers are accessed by shaders.
// It is to WebGPU what a descriptor set is to Vulkan.
// `binding` here refers to the `binding` of a buffer in the shader (`layout(set = 0, binding = 0) buffer`).
// A pipeline specifies the operation of a shader
// Instantiates the pipeline.
let compute_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: None,
layout: None,
module: &cs_module,
entry_point: "main",
});
// Instantiates the bind group, once again specifying the binding of buffers.
let bind_group_layout = compute_pipeline.get_bind_group_layout(0);
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: storage_buffer.as_entire_binding(),
}],
});
// A command encoder executes one or many pipelines.
// It is to WebGPU what a command buffer is to Vulkan.
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: None });
cpass.set_pipeline(&compute_pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.insert_debug_marker("compute collatz iterations");
cpass.dispatch(numbers.len() as u32, 1, 1); // Number of cells to run, the (x,y,z) size of item being processed
}
// Sets adds copy operation to command encoder.
// Will copy data from storage buffer on GPU to staging buffer on CPU.
encoder.copy_buffer_to_buffer(&storage_buffer, 0, &staging_buffer, 0, size);
// Submits command encoder for processing
queue.submit(Some(encoder.finish()));
// Note that we're not calling `.await` here.
let buffer_slice = staging_buffer.slice(..);
// Gets the future representing when `staging_buffer` can be read from
let buffer_future = buffer_slice.map_async(wgpu::MapMode::Read);
// Poll the device in a blocking manner so that our future resolves.
// In an actual application, `device.poll(...)` should
// be called in an event loop or on another thread.
device.poll(wgpu::Maintain::Wait);
// Awaits until `buffer_future` can be read from
if let Ok(()) = buffer_future.await {
// Gets contents of buffer
let data = buffer_slice.get_mapped_range();
// Since contents are got in bytes, this converts these bytes back to u32
let result = data
.chunks_exact(4)
.map(|b| u32::from_ne_bytes(b.try_into().unwrap()))
.collect();
// With the current interface, we have to make sure all mapped views are
// dropped before we unmap the buffer.
drop(data);
staging_buffer.unmap(); // Unmaps buffer from memory
// If you are familiar with C++ these 2 lines can be thought of similarly to:
// delete myPointer;
// myPointer = NULL;
// It effectively frees the memory
// Returns data from buffer
result
} else {
panic!("failed to run compute on gpu!")
}
}
fn main() {
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
pollster::block_on(run());
}
#[cfg(target_arch = "wasm32")]
{
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
console_log::init().expect("could not initialize logger");
wasm_bindgen_futures::spawn_local(run());
}
}
#[cfg(all(test, not(target_arch = "wasm32")))]
mod tests {
use super::*;
#[test]
fn test_compute_1() {
let input = vec![1, 2, 3, 4];
pollster::block_on(assert_execute_gpu(input, vec![0, 1, 7, 2]));
}
#[test]
fn test_compute_2() {
let input = vec![5, 23, 10, 9];
pollster::block_on(assert_execute_gpu(input, vec![5, 15, 6, 19]));
}
#[test]
fn test_compute_overflow() {
let input = vec![77031, 837799, 8400511, 63728127];
pollster::block_on(assert_execute_gpu(
input,
vec![350, 524, OVERFLOW, OVERFLOW],
));
}
#[test]
fn test_multithreaded_compute() {
use std::{sync::mpsc, thread, time::Duration};
let thread_count = 8;
let (tx, rx) = mpsc::channel();
for _ in 0..thread_count {
let tx = tx.clone();
thread::spawn(move || {
let input = vec![100, 100, 100];
pollster::block_on(assert_execute_gpu(input, vec![25, 25, 25]));
tx.send(true).unwrap();
});
}
for _ in 0..thread_count {
rx.recv_timeout(Duration::from_secs(10))
.expect("A thread never completed.");
}
}
async fn assert_execute_gpu(input: Vec<u32>, expected: Vec<u32>) {
assert_eq!(execute_gpu(input).await, expected);
}
}

View File

@ -0,0 +1,41 @@
[[block]]
struct PrimeIndices {
data: [[stride(4)]] array<u32>;
}; // this is used as both input and output for convenience
[[group(0), binding(0)]]
var<storage> v_indices: [[access(read_write)]] PrimeIndices;
// The Collatz Conjecture states that for any integer n:
// If n is even, n = n/2
// If n is odd, n = 3n+1
// And repeat this process for each new n, you will always eventually reach 1.
// Though the conjecture has not been proven, no counterexample has ever been found.
// This function returns how many times this recurrence needs to be applied to reach 1.
fn collatz_iterations(n_base: u32) -> u32{
var n: u32 = n_base;
var i: u32 = 0u;
loop {
if (n <= 1u) {
break;
}
if (n % 2u == 0u) {
n = n / 2u;
}
else {
// Overflow? (i.e. 3*n + 1 > 0xffffffffu?)
if (n >= 1431655765u) { // 0x55555555u
return 4294967295u; // 0xffffffffu
}
n = 3u * n + 1u;
}
i = i + 1u;
}
return i;
}
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] global_id: vec3<u32>) {
v_indices.data[global_id.x] = collatz_iterations(v_indices.data[global_id.x]);
}

View File

@ -0,0 +1,13 @@
# hello-triangle
This example renders a triangle to a window.
## To Run
```
cargo run --example hello-triangle
```
## Screenshots
![Triangle window](./screenshot.png)

View File

@ -0,0 +1,154 @@
use std::borrow::Cow;
use winit::{
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
window::Window,
};
async fn run(event_loop: EventLoop<()>, window: Window) {
let size = window.inner_size();
let instance = wgpu::Instance::new(wgpu::BackendBit::all());
let surface = unsafe { instance.create_surface(&window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
// Request an adapter which can render to our surface
compatible_surface: Some(&surface),
})
.await
.expect("Failed to find an appropriate adapter");
// Create the logical device and command queue
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
},
None,
)
.await
.expect("Failed to create device");
// Load the shaders from disk
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("shader.wgsl"))),
flags: wgpu::ShaderFlags::all(),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[],
push_constant_ranges: &[],
});
let swapchain_format = adapter.get_swap_chain_preferred_format(&surface).unwrap();
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: None,
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[swapchain_format.into()],
}),
primitive: wgpu::PrimitiveState::default(),
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
});
let mut sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format: swapchain_format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Mailbox,
};
let mut swap_chain = device.create_swap_chain(&surface, &sc_desc);
event_loop.run(move |event, _, control_flow| {
// Have the closure take ownership of the resources.
// `event_loop.run` never returns, therefore we must do this to ensure
// the resources are properly cleaned up.
let _ = (&instance, &adapter, &shader, &pipeline_layout);
*control_flow = ControlFlow::Wait;
match event {
Event::WindowEvent {
event: WindowEvent::Resized(size),
..
} => {
// Recreate the swap chain with the new size
sc_desc.width = size.width;
sc_desc.height = size.height;
swap_chain = device.create_swap_chain(&surface, &sc_desc);
}
Event::RedrawRequested(_) => {
let frame = swap_chain
.get_current_frame()
.expect("Failed to acquire next swap chain texture")
.output;
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::GREEN),
store: true,
},
}],
depth_stencil_attachment: None,
});
rpass.set_pipeline(&render_pipeline);
rpass.draw(0..3, 0..1);
}
queue.submit(Some(encoder.finish()));
}
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => *control_flow = ControlFlow::Exit,
_ => {}
}
});
}
fn main() {
let event_loop = EventLoop::new();
let window = winit::window::Window::new(&event_loop).unwrap();
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
// Temporarily avoid srgb formats for the swapchain on the web
pollster::block_on(run(event_loop, window));
}
#[cfg(target_arch = "wasm32")]
{
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
console_log::init().expect("could not initialize logger");
use winit::platform::web::WindowExtWebSys;
// On wasm, append the canvas to the document body
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| doc.body())
.and_then(|body| {
body.append_child(&web_sys::Element::from(window.canvas()))
.ok()
})
.expect("couldn't append canvas to document body");
wasm_bindgen_futures::spawn_local(run(event_loop, window));
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

View File

@ -0,0 +1,11 @@
[[stage(vertex)]]
fn vs_main([[builtin(vertex_index)]] in_vertex_index: u32) -> [[builtin(position)]] vec4<f32> {
let x = f32(i32(in_vertex_index) - 1);
let y = f32(i32(in_vertex_index & 1u) * 2 - 1);
return vec4<f32>(x, y, 0.0, 1.0);
}
[[stage(fragment)]]
fn fs_main() -> [[location(0)]] vec4<f32> {
return vec4<f32>(1.0, 0.0, 0.0, 1.0);
}

View File

@ -0,0 +1,13 @@
# hello-windows
This example renders a set of 16 windows, with a differently colored background
## To Run
```
cargo run --example hello-windows
```
## Screenshots
![16 windows](./screenshot.png)

View File

@ -0,0 +1,203 @@
use std::collections::HashMap;
use winit::{
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowId},
};
struct ViewportDesc {
window: Window,
background: wgpu::Color,
surface: wgpu::Surface,
}
struct Viewport {
desc: ViewportDesc,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
}
impl ViewportDesc {
fn new(window: Window, background: wgpu::Color, instance: &wgpu::Instance) -> Self {
let surface = unsafe { instance.create_surface(&window) };
Self {
window,
background,
surface,
}
}
fn build(self, adapter: &wgpu::Adapter, device: &wgpu::Device) -> Viewport {
let size = self.window.inner_size();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
format: adapter
.get_swap_chain_preferred_format(&self.surface)
.unwrap(),
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&self.surface, &sc_desc);
Viewport {
desc: self,
sc_desc,
swap_chain,
}
}
}
impl Viewport {
fn resize(&mut self, device: &wgpu::Device, size: winit::dpi::PhysicalSize<u32>) {
self.sc_desc.width = size.width;
self.sc_desc.height = size.height;
self.swap_chain = device.create_swap_chain(&self.desc.surface, &self.sc_desc);
}
fn get_current_frame(&mut self) -> wgpu::SwapChainTexture {
self.swap_chain
.get_current_frame()
.expect("Failed to acquire next swap chain texture")
.output
}
}
async fn run(event_loop: EventLoop<()>, viewports: Vec<(Window, wgpu::Color)>) {
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let viewports: Vec<_> = viewports
.into_iter()
.map(|(window, color)| ViewportDesc::new(window, color, &instance))
.collect();
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
// Request an adapter which can render to our surface
compatible_surface: viewports.first().map(|desc| &desc.surface),
})
.await
.expect("Failed to find an appropriate adapter");
// Create the logical device and command queue
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
},
None,
)
.await
.expect("Failed to create device");
let mut viewports: HashMap<WindowId, Viewport> = viewports
.into_iter()
.map(|desc| (desc.window.id(), desc.build(&adapter, &device)))
.collect();
event_loop.run(move |event, _, control_flow| {
// Have the closure take ownership of the resources.
// `event_loop.run` never returns, therefore we must do this to ensure
// the resources are properly cleaned up.
let _ = (&instance, &adapter);
*control_flow = ControlFlow::Wait;
match event {
Event::WindowEvent {
window_id,
event: WindowEvent::Resized(size),
..
} => {
// Recreate the swap chain with the new size
if let Some(viewport) = viewports.get_mut(&window_id) {
viewport.resize(&device, size);
}
}
Event::RedrawRequested(window_id) => {
if let Some(viewport) = viewports.get_mut(&window_id) {
let frame = viewport.get_current_frame();
let mut encoder = device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let _rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(viewport.desc.background),
store: true,
},
}],
depth_stencil_attachment: None,
});
}
queue.submit(Some(encoder.finish()));
}
}
Event::WindowEvent {
window_id,
event: WindowEvent::CloseRequested,
..
} => {
viewports.remove(&window_id);
if viewports.is_empty() {
*control_flow = ControlFlow::Exit
}
}
_ => {}
}
});
}
fn main() {
#[cfg(not(target_arch = "wasm32"))]
{
const WINDOW_SIZE: u32 = 128;
const WINDOW_PADDING: u32 = 16;
const WINDOW_TITLEBAR: u32 = 32;
const WINDOW_OFFSET: u32 = WINDOW_SIZE + WINDOW_PADDING;
const ROWS: u32 = 4;
const COLUMNS: u32 = 4;
let event_loop = EventLoop::new();
let mut viewports = Vec::with_capacity((ROWS * COLUMNS) as usize);
for row in 0..ROWS {
for column in 0..COLUMNS {
let window = winit::window::WindowBuilder::new()
.with_title(format!("x{}y{}", column, row))
.with_inner_size(winit::dpi::PhysicalSize::new(WINDOW_SIZE, WINDOW_SIZE))
.build(&event_loop)
.unwrap();
window.set_outer_position(winit::dpi::PhysicalPosition::new(
WINDOW_PADDING + column * WINDOW_OFFSET,
WINDOW_PADDING + row * (WINDOW_OFFSET + WINDOW_TITLEBAR),
));
fn frac(index: u32, max: u32) -> f64 {
index as f64 / max as f64
}
viewports.push((
window,
wgpu::Color {
r: frac(row, ROWS),
g: 0.5 - frac(row * column, ROWS * COLUMNS) * 0.5,
b: frac(column, COLUMNS),
a: 1.0,
},
))
}
}
env_logger::init();
// Temporarily avoid srgb formats for the swapchain on the web
pollster::block_on(run(event_loop, viewports));
}
#[cfg(target_arch = "wasm32")]
{
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
panic!("wasm32 is not supported")
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View File

@ -0,0 +1,16 @@
# hello
This example prints output describing the adapter in use.
## To Run
```
cargo run --example hello
```
## Example output
```
# You might see different output as it depends on your graphics card
AdapterInfo { name: "Intel(R) UHD Graphics 630", vendor: 0, device: 0, device_type: IntegratedGpu, backend: Metal }
```

View File

@ -0,0 +1,25 @@
/// This example shows how to describe the adapter in use.
async fn run() {
#[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
let adapter = wgpu::Instance::new(wgpu::BackendBit::PRIMARY)
.request_adapter(&wgpu::RequestAdapterOptions::default())
.await
.unwrap();
#[cfg(not(target_arch = "wasm32"))]
println!("{:?}", adapter.get_info())
}
fn main() {
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
pollster::block_on(run());
}
#[cfg(target_arch = "wasm32")]
{
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
console_log::init().expect("could not initialize logger");
wasm_bindgen_futures::spawn_local(run());
}
}

View File

@ -0,0 +1,13 @@
# mipmap
This example shows how to generate and make use of mipmaps.
## To Run
```
cargo run --example mipmap
```
## Screenshots
![Mip maps](./screenshot.png)

View File

@ -0,0 +1,32 @@
struct VertexOutput {
[[builtin(position)]] position: vec4<f32>;
[[location(0)]] tex_coords: vec2<f32>;
};
[[stage(vertex)]]
fn vs_main([[builtin(vertex_index)]] vertex_index: u32) -> VertexOutput {
var out: VertexOutput;
let x = i32(vertex_index) / 2;
let y = i32(vertex_index) & 1;
let tc = vec2<f32>(
f32(x) * 2.0,
f32(y) * 2.0
);
out.position = vec4<f32>(
tc.x * 2.0 - 1.0,
1.0 - tc.y * 2.0,
0.0, 1.0
);
out.tex_coords = tc;
return out;
}
[[group(0), binding(0)]]
var r_color: texture_2d<f32>;
[[group(0), binding(1)]]
var r_sampler: sampler;
[[stage(fragment)]]
fn fs_main(in: VertexOutput) -> [[location(0)]] vec4<f32> {
return textureSample(r_color, r_sampler, in.tex_coords);
}

View File

@ -0,0 +1,33 @@
struct VertexOutput {
[[builtin(position)]] position: vec4<f32>;
[[location(0)]] tex_coords: vec2<f32>;
};
[[block]]
struct Locals {
transform: mat4x4<f32>;
};
[[group(0), binding(0)]]
var r_data: Locals;
[[stage(vertex)]]
fn vs_main([[builtin(vertex_index)]] vertex_index: u32) -> VertexOutput {
let pos = vec2<f32>(
100.0 * (1.0 - f32(vertex_index & 2u)),
1000.0 * f32(vertex_index & 1u)
);
var out: VertexOutput;
out.tex_coords = 0.05 * pos + vec2<f32>(0.5, 0.5);
out.position = r_data.transform * vec4<f32>(pos, 0.0, 1.0);
return out;
}
[[group(0), binding(1)]]
var r_color: texture_2d<f32>;
[[group(0), binding(2)]]
var r_sampler: sampler;
[[stage(fragment)]]
fn fs_main(in: VertexOutput) -> [[location(0)]] vec4<f32> {
return textureSample(r_color, r_sampler, in.tex_coords);
}

View File

@ -0,0 +1,489 @@
#[path = "../framework.rs"]
mod framework;
use bytemuck::{Pod, Zeroable};
use std::{borrow::Cow, mem, num::NonZeroU32};
use wgpu::util::DeviceExt;
const TEXTURE_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8UnormSrgb;
const MIP_LEVEL_COUNT: u32 = 9;
const MIP_PASS_COUNT: u32 = MIP_LEVEL_COUNT - 1;
fn create_texels(size: usize, cx: f32, cy: f32) -> Vec<u8> {
use std::iter;
(0..size * size)
.flat_map(|id| {
// get high five for recognizing this ;)
let mut x = 4.0 * (id % size) as f32 / (size - 1) as f32 - 2.0;
let mut y = 2.0 * (id / size) as f32 / (size - 1) as f32 - 1.0;
let mut count = 0;
while count < 0xFF && x * x + y * y < 4.0 {
let old_x = x;
x = x * x - y * y + cx;
y = 2.0 * old_x * y + cy;
count += 1;
}
iter::once(0xFF - (count * 2) as u8)
.chain(iter::once(0xFF - (count * 5) as u8))
.chain(iter::once(0xFF - (count * 13) as u8))
.chain(iter::once(std::u8::MAX))
})
.collect()
}
struct QuerySets {
timestamp: wgpu::QuerySet,
timestamp_period: f32,
pipeline_statistics: wgpu::QuerySet,
data_buffer: wgpu::Buffer,
}
#[repr(C)]
#[derive(Clone, Copy, Pod, Zeroable)]
struct TimestampData {
start: u64,
end: u64,
}
#[repr(C)]
#[derive(Clone, Copy, Pod, Zeroable)]
struct QueryData {
timestamps: [TimestampData; MIP_PASS_COUNT as usize],
pipeline_queries: [u64; MIP_PASS_COUNT as usize],
}
struct Example {
bind_group: wgpu::BindGroup,
uniform_buf: wgpu::Buffer,
draw_pipeline: wgpu::RenderPipeline,
}
impl Example {
fn generate_matrix(aspect_ratio: f32) -> cgmath::Matrix4<f32> {
let mx_projection = cgmath::perspective(cgmath::Deg(45f32), aspect_ratio, 1.0, 1000.0);
let mx_view = cgmath::Matrix4::look_at_rh(
cgmath::Point3::new(0f32, 0.0, 10.0),
cgmath::Point3::new(0f32, 50.0, 0.0),
cgmath::Vector3::unit_z(),
);
let mx_correction = framework::OPENGL_TO_WGPU_MATRIX;
mx_correction * mx_projection * mx_view
}
fn generate_mipmaps(
encoder: &mut wgpu::CommandEncoder,
device: &wgpu::Device,
texture: &wgpu::Texture,
query_sets: &Option<QuerySets>,
mip_count: u32,
shader_flags: wgpu::ShaderFlags,
) {
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("blit.wgsl"))),
flags: shader_flags,
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("blit"),
layout: None,
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[TEXTURE_FORMAT.into()],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
});
let bind_group_layout = pipeline.get_bind_group_layout(0);
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("mip"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let views = (0..mip_count)
.map(|mip| {
texture.create_view(&wgpu::TextureViewDescriptor {
label: Some("mip"),
format: None,
dimension: None,
aspect: wgpu::TextureAspect::All,
base_mip_level: mip,
mip_level_count: NonZeroU32::new(1),
base_array_layer: 0,
array_layer_count: None,
})
})
.collect::<Vec<_>>();
for target_mip in 1..mip_count as usize {
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&views[target_mip - 1]),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: None,
});
let pipeline_query_index_base = target_mip as u32 - 1;
let timestamp_query_index_base = (target_mip as u32 - 1) * 2;
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &views[target_mip],
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::WHITE),
store: true,
},
}],
depth_stencil_attachment: None,
});
if let Some(ref query_sets) = query_sets {
rpass.write_timestamp(&query_sets.timestamp, timestamp_query_index_base);
rpass.begin_pipeline_statistics_query(
&query_sets.pipeline_statistics,
pipeline_query_index_base,
);
}
rpass.set_pipeline(&pipeline);
rpass.set_bind_group(0, &bind_group, &[]);
rpass.draw(0..4, 0..1);
if let Some(ref query_sets) = query_sets {
rpass.write_timestamp(&query_sets.timestamp, timestamp_query_index_base + 1);
rpass.end_pipeline_statistics_query();
}
}
if let Some(ref query_sets) = query_sets {
let timestamp_query_count = MIP_PASS_COUNT * 2;
encoder.resolve_query_set(
&query_sets.timestamp,
0..timestamp_query_count,
&query_sets.data_buffer,
0,
);
encoder.resolve_query_set(
&query_sets.pipeline_statistics,
0..MIP_PASS_COUNT,
&query_sets.data_buffer,
(timestamp_query_count * mem::size_of::<u64>() as u32) as wgpu::BufferAddress,
);
}
}
}
impl framework::Example for Example {
fn optional_features() -> wgpu::Features {
wgpu::Features::TIMESTAMP_QUERY | wgpu::Features::PIPELINE_STATISTICS_QUERY
}
fn init(
sc_desc: &wgpu::SwapChainDescriptor,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self {
let mut init_encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
// Create the texture
let size = 1 << MIP_LEVEL_COUNT;
let texels = create_texels(size as usize, -0.8, 0.156);
let texture_extent = wgpu::Extent3d {
width: size,
height: size,
depth_or_array_layers: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
size: texture_extent,
mip_level_count: MIP_LEVEL_COUNT,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: TEXTURE_FORMAT,
usage: wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::RENDER_ATTACHMENT
| wgpu::TextureUsage::COPY_DST,
label: None,
});
let texture_view = texture.create_view(&wgpu::TextureViewDescriptor::default());
//Note: we could use queue.write_texture instead, and this is what other
// examples do, but here we want to show another way to do this.
let temp_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Temporary Buffer"),
contents: texels.as_slice(),
usage: wgpu::BufferUsage::COPY_SRC,
});
init_encoder.copy_buffer_to_texture(
wgpu::ImageCopyBuffer {
buffer: &temp_buf,
layout: wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(NonZeroU32::new(4 * size).unwrap()),
rows_per_image: None,
},
},
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
texture_extent,
);
// Create other resources
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: None,
address_mode_u: wgpu::AddressMode::Repeat,
address_mode_v: wgpu::AddressMode::Repeat,
address_mode_w: wgpu::AddressMode::Repeat,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
..Default::default()
});
let mx_total = Self::generate_matrix(sc_desc.width as f32 / sc_desc.height as f32);
let mx_ref: &[f32; 16] = mx_total.as_ref();
let uniform_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::cast_slice(mx_ref),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
// Create the render pipeline
let mut flags = wgpu::ShaderFlags::VALIDATION;
match adapter.get_info().backend {
wgpu::Backend::Metal | wgpu::Backend::Vulkan => {
flags |= wgpu::ShaderFlags::EXPERIMENTAL_TRANSLATION
}
_ => (), //TODO
}
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("draw.wgsl"))),
flags,
});
let draw_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("draw"),
layout: None,
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[sc_desc.format.into()],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
});
// Create bind group
let bind_group_layout = draw_pipeline.get_bind_group_layout(0);
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buf.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&texture_view),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: None,
});
// If both kinds of query are supported, use queries
let query_sets = if device
.features()
.contains(wgpu::Features::TIMESTAMP_QUERY | wgpu::Features::PIPELINE_STATISTICS_QUERY)
{
// For N total mips, it takes N - 1 passes to generate them, and we're measuring those.
let mip_passes = MIP_LEVEL_COUNT - 1;
// Create the timestamp query set. We need twice as many queries as we have passes,
// as we need a query at the beginning and at the end of the operation.
let timestamp = device.create_query_set(&wgpu::QuerySetDescriptor {
count: mip_passes * 2,
ty: wgpu::QueryType::Timestamp,
});
// Timestamp queries use an device-specific timestamp unit. We need to figure out how many
// nanoseconds go by for the timestamp to be incremented by one. The period is this value.
let timestamp_period = queue.get_timestamp_period();
// We only need one pipeline statistics query per pass.
let pipeline_statistics = device.create_query_set(&wgpu::QuerySetDescriptor {
count: mip_passes,
ty: wgpu::QueryType::PipelineStatistics(
wgpu::PipelineStatisticsTypes::FRAGMENT_SHADER_INVOCATIONS,
),
});
// This databuffer has to store all of the query results, 2 * passes timestamp queries
// and 1 * passes statistics queries. Each query returns a u64 value.
let data_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("query buffer"),
size: mip_passes as wgpu::BufferAddress
* 3
* mem::size_of::<u64>() as wgpu::BufferAddress,
usage: wgpu::BufferUsage::COPY_DST | wgpu::BufferUsage::MAP_READ,
mapped_at_creation: false,
});
Some(QuerySets {
timestamp,
timestamp_period,
pipeline_statistics,
data_buffer,
})
} else {
None
};
Self::generate_mipmaps(
&mut init_encoder,
&device,
&texture,
&query_sets,
MIP_LEVEL_COUNT,
flags,
);
queue.submit(Some(init_encoder.finish()));
if let Some(ref query_sets) = query_sets {
// We can ignore the future as we're about to wait for the device.
let _ = query_sets
.data_buffer
.slice(..)
.map_async(wgpu::MapMode::Read);
// Wait for device to be done rendering mipmaps
device.poll(wgpu::Maintain::Wait);
// This is guaranteed to be ready.
let view = query_sets.data_buffer.slice(..).get_mapped_range();
// Convert the raw data into a useful structure
let data: &QueryData = bytemuck::from_bytes(&*view);
// Iterate over the data
for (idx, (timestamp, pipeline)) in data
.timestamps
.iter()
.zip(data.pipeline_queries.iter())
.enumerate()
{
// Figure out the timestamp differences and multiply by the period to get nanoseconds
let nanoseconds =
(timestamp.end - timestamp.start) as f32 * query_sets.timestamp_period;
// Nanoseconds is a bit small, so lets use microseconds.
let microseconds = nanoseconds / 1000.0;
// Print the data!
println!(
"Generating mip level {} took {:.3} μs and called the fragment shader {} times",
idx + 1,
microseconds,
pipeline
);
}
}
Example {
bind_group,
uniform_buf,
draw_pipeline,
}
}
fn update(&mut self, _event: winit::event::WindowEvent) {
//empty
}
fn resize(
&mut self,
sc_desc: &wgpu::SwapChainDescriptor,
_device: &wgpu::Device,
queue: &wgpu::Queue,
) {
let mx_total = Self::generate_matrix(sc_desc.width as f32 / sc_desc.height as f32);
let mx_ref: &[f32; 16] = mx_total.as_ref();
queue.write_buffer(&self.uniform_buf, 0, bytemuck::cast_slice(mx_ref));
}
fn render(
&mut self,
frame: &wgpu::SwapChainTexture,
device: &wgpu::Device,
queue: &wgpu::Queue,
_spawner: &framework::Spawner,
) {
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let clear_color = wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
};
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(clear_color),
store: true,
},
}],
depth_stencil_attachment: None,
});
rpass.set_pipeline(&self.draw_pipeline);
rpass.set_bind_group(0, &self.bind_group, &[]);
rpass.draw(0..4, 0..1);
}
queue.submit(Some(encoder.finish()));
}
}
fn main() {
framework::run::<Example>("mipmap");
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 MiB

View File

@ -0,0 +1,13 @@
# msaa-line
This example shows how to render lines using MSAA.
## To Run
```
cargo run --example msaa-line
```
## Screenshots
![MSAA line](./screenshot.png)

View File

@ -0,0 +1,292 @@
//! The parts of this example enabling MSAA are:
//! * The render pipeline is created with a sample_count > 1.
//! * A new texture with a sample_count > 1 is created and set as the color_attachment instead of the swapchain.
//! * The swapchain is now specified as a resolve_target.
//!
//! The parts of this example enabling LineList are:
//! * Set the primitive_topology to PrimitiveTopology::LineList.
//! * Vertices and Indices describe the two points that make up a line.
#[path = "../framework.rs"]
mod framework;
use std::{borrow::Cow, iter};
use bytemuck::{Pod, Zeroable};
use wgpu::util::DeviceExt;
#[repr(C)]
#[derive(Clone, Copy, Pod, Zeroable)]
struct Vertex {
_pos: [f32; 2],
_color: [f32; 4],
}
struct Example {
bundle: wgpu::RenderBundle,
shader: wgpu::ShaderModule,
pipeline_layout: wgpu::PipelineLayout,
multisampled_framebuffer: wgpu::TextureView,
vertex_buffer: wgpu::Buffer,
vertex_count: u32,
sample_count: u32,
rebuild_bundle: bool,
sc_desc: wgpu::SwapChainDescriptor,
}
impl Example {
fn create_bundle(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
shader: &wgpu::ShaderModule,
pipeline_layout: &wgpu::PipelineLayout,
sample_count: u32,
vertex_buffer: &wgpu::Buffer,
vertex_count: u32,
) -> wgpu::RenderBundle {
log::info!("sample_count: {}", sample_count);
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: None,
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: shader,
entry_point: "vs_main",
buffers: &[wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &wgpu::vertex_attr_array![0 => Float32x2, 1 => Float32x4],
}],
},
fragment: Some(wgpu::FragmentState {
module: shader,
entry_point: "fs_main",
targets: &[sc_desc.format.into()],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::LineList,
front_face: wgpu::FrontFace::Ccw,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: sample_count,
..Default::default()
},
});
let mut encoder =
device.create_render_bundle_encoder(&wgpu::RenderBundleEncoderDescriptor {
label: None,
color_formats: &[sc_desc.format],
depth_stencil_format: None,
sample_count,
});
encoder.set_pipeline(&pipeline);
encoder.set_vertex_buffer(0, vertex_buffer.slice(..));
encoder.draw(0..vertex_count, 0..1);
encoder.finish(&wgpu::RenderBundleDescriptor {
label: Some("main"),
})
}
fn create_multisampled_framebuffer(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
sample_count: u32,
) -> wgpu::TextureView {
let multisampled_texture_extent = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth_or_array_layers: 1,
};
let multisampled_frame_descriptor = &wgpu::TextureDescriptor {
size: multisampled_texture_extent,
mip_level_count: 1,
sample_count,
dimension: wgpu::TextureDimension::D2,
format: sc_desc.format,
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
label: None,
};
device
.create_texture(multisampled_frame_descriptor)
.create_view(&wgpu::TextureViewDescriptor::default())
}
}
impl framework::Example for Example {
fn init(
sc_desc: &wgpu::SwapChainDescriptor,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
_queue: &wgpu::Queue,
) -> Self {
log::info!("Press left/right arrow keys to change sample_count.");
let sample_count = 4;
let mut flags = wgpu::ShaderFlags::VALIDATION;
match adapter.get_info().backend {
wgpu::Backend::Metal | wgpu::Backend::Vulkan => {
flags |= wgpu::ShaderFlags::EXPERIMENTAL_TRANSLATION
}
_ => (), //TODO
}
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("shader.wgsl"))),
flags,
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[],
push_constant_ranges: &[],
});
let multisampled_framebuffer =
Example::create_multisampled_framebuffer(device, sc_desc, sample_count);
let mut vertex_data = vec![];
let max = 50;
for i in 0..max {
let percent = i as f32 / max as f32;
let (sin, cos) = (percent * 2.0 * std::f32::consts::PI).sin_cos();
vertex_data.push(Vertex {
_pos: [0.0, 0.0],
_color: [1.0, -sin, cos, 1.0],
});
vertex_data.push(Vertex {
_pos: [1.0 * cos, 1.0 * sin],
_color: [sin, -cos, 1.0, 1.0],
});
}
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(&vertex_data),
usage: wgpu::BufferUsage::VERTEX,
});
let vertex_count = vertex_data.len() as u32;
let bundle = Example::create_bundle(
device,
&sc_desc,
&shader,
&pipeline_layout,
sample_count,
&vertex_buffer,
vertex_count,
);
Example {
bundle,
shader,
pipeline_layout,
multisampled_framebuffer,
vertex_buffer,
vertex_count,
sample_count,
rebuild_bundle: false,
sc_desc: sc_desc.clone(),
}
}
#[allow(clippy::single_match)]
fn update(&mut self, event: winit::event::WindowEvent) {
match event {
winit::event::WindowEvent::KeyboardInput { input, .. } => {
if let winit::event::ElementState::Pressed = input.state {
match input.virtual_keycode {
// TODO: Switch back to full scans of possible options when we expose
// supported sample counts to the user.
Some(winit::event::VirtualKeyCode::Left) => {
if self.sample_count == 4 {
self.sample_count = 1;
self.rebuild_bundle = true;
}
}
Some(winit::event::VirtualKeyCode::Right) => {
if self.sample_count == 1 {
self.sample_count = 4;
self.rebuild_bundle = true;
}
}
_ => {}
}
}
}
_ => {}
}
}
fn resize(
&mut self,
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
_queue: &wgpu::Queue,
) {
self.sc_desc = sc_desc.clone();
self.multisampled_framebuffer =
Example::create_multisampled_framebuffer(device, sc_desc, self.sample_count);
}
fn render(
&mut self,
frame: &wgpu::SwapChainTexture,
device: &wgpu::Device,
queue: &wgpu::Queue,
_spawner: &framework::Spawner,
) {
if self.rebuild_bundle {
self.bundle = Example::create_bundle(
device,
&self.sc_desc,
&self.shader,
&self.pipeline_layout,
self.sample_count,
&self.vertex_buffer,
self.vertex_count,
);
self.multisampled_framebuffer =
Example::create_multisampled_framebuffer(device, &self.sc_desc, self.sample_count);
self.rebuild_bundle = false;
}
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let ops = wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
};
let rpass_color_attachment = if self.sample_count == 1 {
wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops,
}
} else {
wgpu::RenderPassColorAttachment {
view: &self.multisampled_framebuffer,
resolve_target: Some(&frame.view),
ops,
}
};
encoder
.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[rpass_color_attachment],
depth_stencil_attachment: None,
})
.execute_bundles(iter::once(&self.bundle));
}
queue.submit(iter::once(encoder.finish()));
}
}
fn main() {
framework::run::<Example>("msaa-line");
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 179 KiB

View File

@ -0,0 +1,20 @@
struct VertexOutput {
[[location(0)]] color: vec4<f32>;
[[builtin(position)]] position: vec4<f32>;
};
[[stage(vertex)]]
fn vs_main(
[[location(0)]] position: vec2<f32>,
[[location(1)]] color: vec4<f32>,
) -> VertexOutput {
var out: VertexOutput;
out.position = vec4<f32>(position, 0.0, 1.0);
out.color = color;
return out;
}
[[stage(fragment)]]
fn fs_main(in: VertexOutput) -> [[location(0)]] vec4<f32> {
return in.color;
}

View File

@ -0,0 +1,13 @@
# shadow
This animated example demonstrates shadow mapping.
## To Run
```
cargo run --example shadow
```
## Screenshots
![Shadow mapping](./screenshot.png)

View File

@ -0,0 +1,831 @@
use std::{borrow::Cow, iter, mem, num::NonZeroU32, ops::Range, rc::Rc};
#[path = "../framework.rs"]
mod framework;
use bytemuck::{Pod, Zeroable};
use wgpu::util::DeviceExt;
#[repr(C)]
#[derive(Clone, Copy, Pod, Zeroable)]
struct Vertex {
_pos: [i8; 4],
_normal: [i8; 4],
}
fn vertex(pos: [i8; 3], nor: [i8; 3]) -> Vertex {
Vertex {
_pos: [pos[0], pos[1], pos[2], 1],
_normal: [nor[0], nor[1], nor[2], 0],
}
}
fn create_cube() -> (Vec<Vertex>, Vec<u16>) {
let vertex_data = [
// top (0, 0, 1)
vertex([-1, -1, 1], [0, 0, 1]),
vertex([1, -1, 1], [0, 0, 1]),
vertex([1, 1, 1], [0, 0, 1]),
vertex([-1, 1, 1], [0, 0, 1]),
// bottom (0, 0, -1)
vertex([-1, 1, -1], [0, 0, -1]),
vertex([1, 1, -1], [0, 0, -1]),
vertex([1, -1, -1], [0, 0, -1]),
vertex([-1, -1, -1], [0, 0, -1]),
// right (1, 0, 0)
vertex([1, -1, -1], [1, 0, 0]),
vertex([1, 1, -1], [1, 0, 0]),
vertex([1, 1, 1], [1, 0, 0]),
vertex([1, -1, 1], [1, 0, 0]),
// left (-1, 0, 0)
vertex([-1, -1, 1], [-1, 0, 0]),
vertex([-1, 1, 1], [-1, 0, 0]),
vertex([-1, 1, -1], [-1, 0, 0]),
vertex([-1, -1, -1], [-1, 0, 0]),
// front (0, 1, 0)
vertex([1, 1, -1], [0, 1, 0]),
vertex([-1, 1, -1], [0, 1, 0]),
vertex([-1, 1, 1], [0, 1, 0]),
vertex([1, 1, 1], [0, 1, 0]),
// back (0, -1, 0)
vertex([1, -1, 1], [0, -1, 0]),
vertex([-1, -1, 1], [0, -1, 0]),
vertex([-1, -1, -1], [0, -1, 0]),
vertex([1, -1, -1], [0, -1, 0]),
];
let index_data: &[u16] = &[
0, 1, 2, 2, 3, 0, // top
4, 5, 6, 6, 7, 4, // bottom
8, 9, 10, 10, 11, 8, // right
12, 13, 14, 14, 15, 12, // left
16, 17, 18, 18, 19, 16, // front
20, 21, 22, 22, 23, 20, // back
];
(vertex_data.to_vec(), index_data.to_vec())
}
fn create_plane(size: i8) -> (Vec<Vertex>, Vec<u16>) {
let vertex_data = [
vertex([size, -size, 0], [0, 0, 1]),
vertex([size, size, 0], [0, 0, 1]),
vertex([-size, -size, 0], [0, 0, 1]),
vertex([-size, size, 0], [0, 0, 1]),
];
let index_data: &[u16] = &[0, 1, 2, 2, 1, 3];
(vertex_data.to_vec(), index_data.to_vec())
}
struct Entity {
mx_world: cgmath::Matrix4<f32>,
rotation_speed: f32,
color: wgpu::Color,
vertex_buf: Rc<wgpu::Buffer>,
index_buf: Rc<wgpu::Buffer>,
index_format: wgpu::IndexFormat,
index_count: usize,
uniform_offset: wgpu::DynamicOffset,
}
struct Light {
pos: cgmath::Point3<f32>,
color: wgpu::Color,
fov: f32,
depth: Range<f32>,
target_view: wgpu::TextureView,
}
#[repr(C)]
#[derive(Clone, Copy, Pod, Zeroable)]
struct LightRaw {
proj: [[f32; 4]; 4],
pos: [f32; 4],
color: [f32; 4],
}
impl Light {
fn to_raw(&self) -> LightRaw {
use cgmath::{Deg, EuclideanSpace, Matrix4, PerspectiveFov, Point3, Vector3};
let mx_view = Matrix4::look_at_rh(self.pos, Point3::origin(), Vector3::unit_z());
let projection = PerspectiveFov {
fovy: Deg(self.fov).into(),
aspect: 1.0,
near: self.depth.start,
far: self.depth.end,
};
let mx_correction = framework::OPENGL_TO_WGPU_MATRIX;
let mx_view_proj =
mx_correction * cgmath::Matrix4::from(projection.to_perspective()) * mx_view;
LightRaw {
proj: *mx_view_proj.as_ref(),
pos: [self.pos.x, self.pos.y, self.pos.z, 1.0],
color: [
self.color.r as f32,
self.color.g as f32,
self.color.b as f32,
1.0,
],
}
}
}
#[repr(C)]
#[derive(Clone, Copy, Pod, Zeroable)]
struct GlobalUniforms {
proj: [[f32; 4]; 4],
num_lights: [u32; 4],
}
#[repr(C)]
#[derive(Clone, Copy, Pod, Zeroable)]
struct EntityUniforms {
model: [[f32; 4]; 4],
color: [f32; 4],
}
struct Pass {
pipeline: wgpu::RenderPipeline,
bind_group: wgpu::BindGroup,
uniform_buf: wgpu::Buffer,
}
struct Example {
entities: Vec<Entity>,
lights: Vec<Light>,
lights_are_dirty: bool,
shadow_pass: Pass,
forward_pass: Pass,
forward_depth: wgpu::TextureView,
entity_bind_group: wgpu::BindGroup,
light_storage_buf: wgpu::Buffer,
entity_uniform_buf: wgpu::Buffer,
}
impl Example {
const MAX_LIGHTS: usize = 10;
const SHADOW_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
const SHADOW_SIZE: wgpu::Extent3d = wgpu::Extent3d {
width: 512,
height: 512,
depth_or_array_layers: Self::MAX_LIGHTS as u32,
};
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
fn generate_matrix(aspect_ratio: f32) -> cgmath::Matrix4<f32> {
let mx_projection = cgmath::perspective(cgmath::Deg(45f32), aspect_ratio, 1.0, 20.0);
let mx_view = cgmath::Matrix4::look_at_rh(
cgmath::Point3::new(3.0f32, -10.0, 6.0),
cgmath::Point3::new(0f32, 0.0, 0.0),
cgmath::Vector3::unit_z(),
);
let mx_correction = framework::OPENGL_TO_WGPU_MATRIX;
mx_correction * mx_projection * mx_view
}
fn create_depth_texture(
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
) -> wgpu::TextureView {
let depth_texture = device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
label: None,
});
depth_texture.create_view(&wgpu::TextureViewDescriptor::default())
}
}
impl framework::Example for Example {
fn optional_features() -> wgpu::Features {
wgpu::Features::DEPTH_CLAMPING
}
fn init(
sc_desc: &wgpu::SwapChainDescriptor,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
_queue: &wgpu::Queue,
) -> Self {
// Create the vertex and index buffers
let vertex_size = mem::size_of::<Vertex>();
let (cube_vertex_data, cube_index_data) = create_cube();
let cube_vertex_buf = Rc::new(device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Cubes Vertex Buffer"),
contents: bytemuck::cast_slice(&cube_vertex_data),
usage: wgpu::BufferUsage::VERTEX,
},
));
let cube_index_buf = Rc::new(device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Cubes Index Buffer"),
contents: bytemuck::cast_slice(&cube_index_data),
usage: wgpu::BufferUsage::INDEX,
},
));
let (plane_vertex_data, plane_index_data) = create_plane(7);
let plane_vertex_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Plane Vertex Buffer"),
contents: bytemuck::cast_slice(&plane_vertex_data),
usage: wgpu::BufferUsage::VERTEX,
});
let plane_index_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Plane Index Buffer"),
contents: bytemuck::cast_slice(&plane_index_data),
usage: wgpu::BufferUsage::INDEX,
});
struct CubeDesc {
offset: cgmath::Vector3<f32>,
angle: f32,
scale: f32,
rotation: f32,
}
let cube_descs = [
CubeDesc {
offset: cgmath::vec3(-2.0, -2.0, 2.0),
angle: 10.0,
scale: 0.7,
rotation: 0.1,
},
CubeDesc {
offset: cgmath::vec3(2.0, -2.0, 2.0),
angle: 50.0,
scale: 1.3,
rotation: 0.2,
},
CubeDesc {
offset: cgmath::vec3(-2.0, 2.0, 2.0),
angle: 140.0,
scale: 1.1,
rotation: 0.3,
},
CubeDesc {
offset: cgmath::vec3(2.0, 2.0, 2.0),
angle: 210.0,
scale: 0.9,
rotation: 0.4,
},
];
let entity_uniform_size = mem::size_of::<EntityUniforms>() as wgpu::BufferAddress;
let num_entities = 1 + cube_descs.len() as wgpu::BufferAddress;
assert!(entity_uniform_size <= wgpu::BIND_BUFFER_ALIGNMENT);
//Note: dynamic offsets also have to be aligned to `BIND_BUFFER_ALIGNMENT`.
let entity_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: num_entities * wgpu::BIND_BUFFER_ALIGNMENT,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
let index_format = wgpu::IndexFormat::Uint16;
let mut entities = vec![{
use cgmath::SquareMatrix;
Entity {
mx_world: cgmath::Matrix4::identity(),
rotation_speed: 0.0,
color: wgpu::Color::WHITE,
vertex_buf: Rc::new(plane_vertex_buf),
index_buf: Rc::new(plane_index_buf),
index_format,
index_count: plane_index_data.len(),
uniform_offset: 0,
}
}];
for (i, cube) in cube_descs.iter().enumerate() {
use cgmath::{Decomposed, Deg, InnerSpace, Quaternion, Rotation3};
let transform = Decomposed {
disp: cube.offset,
rot: Quaternion::from_axis_angle(cube.offset.normalize(), Deg(cube.angle)),
scale: cube.scale,
};
entities.push(Entity {
mx_world: cgmath::Matrix4::from(transform),
rotation_speed: cube.rotation,
color: wgpu::Color::GREEN,
vertex_buf: Rc::clone(&cube_vertex_buf),
index_buf: Rc::clone(&cube_index_buf),
index_format,
index_count: cube_index_data.len(),
uniform_offset: ((i + 1) * wgpu::BIND_BUFFER_ALIGNMENT as usize) as _,
});
}
let local_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: wgpu::BufferSize::new(entity_uniform_size),
},
count: None,
}],
label: None,
});
let entity_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &local_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer: &entity_uniform_buf,
offset: 0,
size: wgpu::BufferSize::new(entity_uniform_size),
}),
}],
label: None,
});
// Create other resources
let shadow_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("shadow"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
..Default::default()
});
let shadow_texture = device.create_texture(&wgpu::TextureDescriptor {
size: Self::SHADOW_SIZE,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::SHADOW_FORMAT,
usage: wgpu::TextureUsage::RENDER_ATTACHMENT | wgpu::TextureUsage::SAMPLED,
label: None,
});
let shadow_view = shadow_texture.create_view(&wgpu::TextureViewDescriptor::default());
let mut shadow_target_views = (0..2)
.map(|i| {
Some(shadow_texture.create_view(&wgpu::TextureViewDescriptor {
label: Some("shadow"),
format: None,
dimension: Some(wgpu::TextureViewDimension::D2),
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
mip_level_count: None,
base_array_layer: i as u32,
array_layer_count: NonZeroU32::new(1),
}))
})
.collect::<Vec<_>>();
let lights = vec![
Light {
pos: cgmath::Point3::new(7.0, -5.0, 10.0),
color: wgpu::Color {
r: 0.5,
g: 1.0,
b: 0.5,
a: 1.0,
},
fov: 60.0,
depth: 1.0..20.0,
target_view: shadow_target_views[0].take().unwrap(),
},
Light {
pos: cgmath::Point3::new(-5.0, 7.0, 10.0),
color: wgpu::Color {
r: 1.0,
g: 0.5,
b: 0.5,
a: 1.0,
},
fov: 45.0,
depth: 1.0..20.0,
target_view: shadow_target_views[1].take().unwrap(),
},
];
let light_uniform_size =
(Self::MAX_LIGHTS * mem::size_of::<LightRaw>()) as wgpu::BufferAddress;
let light_storage_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: light_uniform_size,
usage: wgpu::BufferUsage::STORAGE
| wgpu::BufferUsage::COPY_SRC
| wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
let vertex_attr = wgpu::vertex_attr_array![0 => Sint8x4, 1 => Sint8x4];
let vb_desc = wgpu::VertexBufferLayout {
array_stride: vertex_size as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &vertex_attr,
};
let mut flags = wgpu::ShaderFlags::VALIDATION;
match adapter.get_info().backend {
wgpu::Backend::Metal | wgpu::Backend::Vulkan => {
flags |= wgpu::ShaderFlags::EXPERIMENTAL_TRANSLATION;
}
_ => (), //TODO
}
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("shader.wgsl"))),
flags,
});
let shadow_pass = {
let uniform_size = mem::size_of::<GlobalUniforms>() as wgpu::BufferAddress;
// Create pipeline layout
let bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0, // global
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(uniform_size),
},
count: None,
}],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("shadow"),
bind_group_layouts: &[&bind_group_layout, &local_bind_group_layout],
push_constant_ranges: &[],
});
let uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: uniform_size,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
// Create bind group
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buf.as_entire_binding(),
}],
label: None,
});
// Create the render pipeline
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("shadow"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_bake",
buffers: &[vb_desc.clone()],
},
fragment: None,
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
clamp_depth: device.features().contains(wgpu::Features::DEPTH_CLAMPING),
..Default::default()
},
depth_stencil: Some(wgpu::DepthStencilState {
format: Self::SHADOW_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::LessEqual,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState {
constant: 2, // corresponds to bilinear filtering
slope_scale: 2.0,
clamp: 0.0,
},
}),
multisample: wgpu::MultisampleState::default(),
});
Pass {
pipeline,
bind_group,
uniform_buf,
}
};
let forward_pass = {
// Create pipeline layout
let bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0, // global
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(
mem::size_of::<GlobalUniforms>() as _,
),
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1, // lights
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(light_uniform_size),
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Depth,
view_dimension: wgpu::TextureViewDimension::D2Array,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: true,
filtering: true,
},
count: None,
},
],
label: None,
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("main"),
bind_group_layouts: &[&bind_group_layout, &local_bind_group_layout],
push_constant_ranges: &[],
});
let mx_total = Self::generate_matrix(sc_desc.width as f32 / sc_desc.height as f32);
let forward_uniforms = GlobalUniforms {
proj: *mx_total.as_ref(),
num_lights: [lights.len() as u32, 0, 0, 0],
};
let uniform_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Uniform Buffer"),
contents: bytemuck::bytes_of(&forward_uniforms),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
// Create bind group
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buf.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: light_storage_buf.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&shadow_view),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::Sampler(&shadow_sampler),
},
],
label: None,
});
// Create the render pipeline
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("main"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[vb_desc],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[sc_desc.format.into()],
}),
primitive: wgpu::PrimitiveState {
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
..Default::default()
},
depth_stencil: Some(wgpu::DepthStencilState {
format: Self::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState::default(),
});
Pass {
pipeline,
bind_group,
uniform_buf,
}
};
let forward_depth = Self::create_depth_texture(sc_desc, device);
Example {
entities,
lights,
lights_are_dirty: true,
shadow_pass,
forward_pass,
forward_depth,
light_storage_buf,
entity_uniform_buf,
entity_bind_group,
}
}
fn update(&mut self, _event: winit::event::WindowEvent) {
//empty
}
fn resize(
&mut self,
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
queue: &wgpu::Queue,
) {
// update view-projection matrix
let mx_total = Self::generate_matrix(sc_desc.width as f32 / sc_desc.height as f32);
let mx_ref: &[f32; 16] = mx_total.as_ref();
queue.write_buffer(
&self.forward_pass.uniform_buf,
0,
bytemuck::cast_slice(mx_ref),
);
self.forward_depth = Self::create_depth_texture(sc_desc, device);
}
fn render(
&mut self,
frame: &wgpu::SwapChainTexture,
device: &wgpu::Device,
queue: &wgpu::Queue,
_spawner: &framework::Spawner,
) {
// update uniforms
for entity in self.entities.iter_mut() {
if entity.rotation_speed != 0.0 {
let rotation = cgmath::Matrix4::from_angle_x(cgmath::Deg(entity.rotation_speed));
entity.mx_world = entity.mx_world * rotation;
}
let data = EntityUniforms {
model: entity.mx_world.into(),
color: [
entity.color.r as f32,
entity.color.g as f32,
entity.color.b as f32,
entity.color.a as f32,
],
};
queue.write_buffer(
&self.entity_uniform_buf,
entity.uniform_offset as wgpu::BufferAddress,
bytemuck::bytes_of(&data),
);
}
if self.lights_are_dirty {
self.lights_are_dirty = false;
for (i, light) in self.lights.iter().enumerate() {
queue.write_buffer(
&self.light_storage_buf,
(i * mem::size_of::<LightRaw>()) as wgpu::BufferAddress,
bytemuck::bytes_of(&light.to_raw()),
);
}
}
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
encoder.push_debug_group("shadow passes");
for (i, light) in self.lights.iter().enumerate() {
encoder.push_debug_group(&format!(
"shadow pass {} (light at position {:?})",
i, light.pos
));
// The light uniform buffer already has the projection,
// let's just copy it over to the shadow uniform buffer.
encoder.copy_buffer_to_buffer(
&self.light_storage_buf,
(i * mem::size_of::<LightRaw>()) as wgpu::BufferAddress,
&self.shadow_pass.uniform_buf,
0,
64,
);
encoder.insert_debug_marker("render entities");
{
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &light.target_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: true,
}),
stencil_ops: None,
}),
});
pass.set_pipeline(&self.shadow_pass.pipeline);
pass.set_bind_group(0, &self.shadow_pass.bind_group, &[]);
for entity in &self.entities {
pass.set_bind_group(1, &self.entity_bind_group, &[entity.uniform_offset]);
pass.set_index_buffer(entity.index_buf.slice(..), entity.index_format);
pass.set_vertex_buffer(0, entity.vertex_buf.slice(..));
pass.draw_indexed(0..entity.index_count as u32, 0, 0..1);
}
}
encoder.pop_debug_group();
}
encoder.pop_debug_group();
// forward pass
encoder.push_debug_group("forward rendering pass");
{
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.forward_depth,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: false,
}),
stencil_ops: None,
}),
});
pass.set_pipeline(&self.forward_pass.pipeline);
pass.set_bind_group(0, &self.forward_pass.bind_group, &[]);
for entity in &self.entities {
pass.set_bind_group(1, &self.entity_bind_group, &[entity.uniform_offset]);
pass.set_index_buffer(entity.index_buf.slice(..), entity.index_format);
pass.set_vertex_buffer(0, entity.vertex_buf.slice(..));
pass.draw_indexed(0..entity.index_count as u32, 0, 0..1);
}
}
encoder.pop_debug_group();
queue.submit(iter::once(encoder.finish()));
}
}
fn main() {
framework::run::<Example>("shadow");
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 277 KiB

View File

@ -0,0 +1,104 @@
[[block]]
struct Globals {
view_proj: mat4x4<f32>;
num_lights: vec4<u32>;
};
[[group(0), binding(0)]]
var<uniform> u_globals: Globals;
[[block]]
struct Entity {
world: mat4x4<f32>;
color: vec4<f32>;
};
[[group(1), binding(0)]]
var<uniform> u_entity: Entity;
[[stage(vertex)]]
fn vs_bake([[location(0)]] position: vec4<i32>) -> [[builtin(position)]] vec4<f32> {
return u_globals.view_proj * u_entity.world * vec4<f32>(position);
}
struct VertexOutput {
[[builtin(position)]] proj_position: vec4<f32>;
[[location(0)]] world_normal: vec3<f32>;
[[location(1)]] world_position: vec4<f32>;
};
[[stage(vertex)]]
fn vs_main(
[[location(0)]] position: vec4<i32>,
[[location(1)]] normal: vec4<i32>,
) -> VertexOutput {
let w = u_entity.world;
let world_pos = u_entity.world * vec4<f32>(position);
var out: VertexOutput;
out.world_normal = mat3x3<f32>(w.x.xyz, w.y.xyz, w.z.xyz) * vec3<f32>(normal.xyz);
out.world_position = world_pos;
out.proj_position = u_globals.view_proj * world_pos;
return out;
}
// fragment shader
struct Light {
proj: mat4x4<f32>;
pos: vec4<f32>;
color: vec4<f32>;
};
[[block]]
struct Lights {
data: [[stride(96)]] array<Light>;
};
[[group(0), binding(1)]]
var<storage> s_lights: [[access(read)]] Lights;
[[group(0), binding(2)]]
var t_shadow: texture_depth_2d_array;
[[group(0), binding(3)]]
var sampler_shadow: sampler_comparison;
fn fetch_shadow(light_id: u32, homogeneous_coords: vec4<f32>) -> f32 {
if (homogeneous_coords.w <= 0.0) {
return 1.0;
}
// compensate for the Y-flip difference between the NDC and texture coordinates
let flip_correction = vec2<f32>(0.5, -0.5);
// compute texture coordinates for shadow lookup
let proj_correction = 1.0 / homogeneous_coords.w;
let light_local = homogeneous_coords.xy * flip_correction * proj_correction + vec2<f32>(0.5, 0.5);
// do the lookup, using HW PCF and comparison
return textureSampleCompare(t_shadow, sampler_shadow, light_local, i32(light_id), homogeneous_coords.z * proj_correction);
}
let c_ambient: vec3<f32> = vec3<f32>(0.05, 0.05, 0.05);
let c_max_lights: u32 = 10u;
[[stage(fragment)]]
fn fs_main(in: VertexOutput) -> [[location(0)]] vec4<f32> {
let normal = normalize(in.world_normal);
// accumulate color
var color: vec3<f32> = c_ambient;
var i: u32 = 0u;
loop {
if (i >= min(u_globals.num_lights.x, c_max_lights)) {
break;
}
let light = s_lights.data[i];
// project into the light space
let shadow = fetch_shadow(i, light.proj * in.world_position);
// compute Lambertian diffuse term
let light_dir = normalize(light.pos.xyz - in.world_position.xyz);
let diffuse = max(0.0, dot(normal, light_dir));
// add light contribution
color = color + shadow * diffuse * light.color.xyz;
continuing {
i = i + 1u;
}
}
// multiply the light by material color
return vec4<f32>(color, 1.0) * u_entity.color;
}

View File

@ -0,0 +1,14 @@
# skybox
This animated example demonstrates loading a Wavefront OBJ model, and rendering it with skybox and simple reflections.
It hooks up `winit` mouse controls for camera rotation around the model at the center.
## To Run
```
cargo run --example skybox
```
## Screenshots
![Skybox](./screenshot.png)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,469 @@
#[path = "../framework.rs"]
mod framework;
use bytemuck::{Pod, Zeroable};
use cgmath::SquareMatrix;
use std::borrow::Cow;
use wgpu::util::DeviceExt;
const IMAGE_SIZE: u32 = 128;
#[derive(Clone, Copy, Pod, Zeroable)]
#[repr(C)]
struct Vertex {
pos: [f32; 3],
normal: [f32; 3],
}
struct Entity {
vertex_count: u32,
vertex_buf: wgpu::Buffer,
}
// Note: we use the Y=up coordinate space in this example.
struct Camera {
screen_size: (u32, u32),
angle_y: f32,
angle_xz: f32,
dist: f32,
}
const MODEL_CENTER_Y: f32 = 2.0;
impl Camera {
fn to_uniform_data(&self) -> [f32; 16 * 3 + 4] {
let aspect = self.screen_size.0 as f32 / self.screen_size.1 as f32;
let mx_projection = cgmath::perspective(cgmath::Deg(45f32), aspect, 1.0, 50.0);
let cam_pos = cgmath::Point3::new(
self.angle_xz.cos() * self.angle_y.sin() * self.dist,
self.angle_xz.sin() * self.dist + MODEL_CENTER_Y,
self.angle_xz.cos() * self.angle_y.cos() * self.dist,
);
let mx_view = cgmath::Matrix4::look_at_rh(
cam_pos,
cgmath::Point3::new(0f32, MODEL_CENTER_Y, 0.0),
cgmath::Vector3::unit_y(),
);
let proj = framework::OPENGL_TO_WGPU_MATRIX * mx_projection;
let proj_inv = proj.invert().unwrap();
let view = framework::OPENGL_TO_WGPU_MATRIX * mx_view;
let mut raw = [0f32; 16 * 3 + 4];
raw[..16].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&proj)[..]);
raw[16..32].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&proj_inv)[..]);
raw[32..48].copy_from_slice(&AsRef::<[f32; 16]>::as_ref(&view)[..]);
raw[48..51].copy_from_slice(AsRef::<[f32; 3]>::as_ref(&cam_pos));
raw[51] = 1.0;
raw
}
}
pub struct Skybox {
camera: Camera,
sky_pipeline: wgpu::RenderPipeline,
entity_pipeline: wgpu::RenderPipeline,
bind_group: wgpu::BindGroup,
uniform_buf: wgpu::Buffer,
entities: Vec<Entity>,
depth_view: wgpu::TextureView,
staging_belt: wgpu::util::StagingBelt,
}
impl Skybox {
const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth24Plus;
fn create_depth_texture(
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
) -> wgpu::TextureView {
let depth_texture = device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsage::RENDER_ATTACHMENT,
label: None,
});
depth_texture.create_view(&wgpu::TextureViewDescriptor::default())
}
}
impl framework::Example for Skybox {
fn optional_features() -> wgpu::Features {
wgpu::Features::TEXTURE_COMPRESSION_ASTC_LDR
| wgpu::Features::TEXTURE_COMPRESSION_ETC2
| wgpu::Features::TEXTURE_COMPRESSION_BC
}
fn init(
sc_desc: &wgpu::SwapChainDescriptor,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self {
let mut entities = Vec::new();
{
let source = include_bytes!("models/teslacyberv3.0.obj");
let data = obj::ObjData::load_buf(&source[..]).unwrap();
let mut vertices = Vec::new();
for object in data.objects {
for group in object.groups {
vertices.clear();
for poly in group.polys {
for end_index in 2..poly.0.len() {
for &index in &[0, end_index - 1, end_index] {
let obj::IndexTuple(position_id, _texture_id, normal_id) =
poly.0[index];
vertices.push(Vertex {
pos: data.position[position_id],
normal: data.normal[normal_id.unwrap()],
})
}
}
}
let vertex_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex"),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsage::VERTEX,
});
entities.push(Entity {
vertex_count: vertices.len() as u32,
vertex_buf,
});
}
}
}
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
multisampled: false,
view_dimension: wgpu::TextureViewDimension::Cube,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
});
// Create the render pipeline
let mut flags = wgpu::ShaderFlags::VALIDATION;
match adapter.get_info().backend {
wgpu::Backend::Metal | wgpu::Backend::Vulkan => {
flags |= wgpu::ShaderFlags::EXPERIMENTAL_TRANSLATION
}
_ => (), //TODO
}
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("shader.wgsl"))),
flags,
});
let camera = Camera {
screen_size: (sc_desc.width, sc_desc.height),
angle_xz: 0.2,
angle_y: 0.2,
dist: 30.0,
};
let raw_uniforms = camera.to_uniform_data();
let uniform_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Buffer"),
contents: bytemuck::cast_slice(&raw_uniforms),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: &[],
});
// Create the render pipelines
let sky_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Sky"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_sky",
buffers: &[],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_sky",
targets: &[sc_desc.format.into()],
}),
primitive: wgpu::PrimitiveState {
front_face: wgpu::FrontFace::Cw,
..Default::default()
},
depth_stencil: Some(wgpu::DepthStencilState {
format: Self::DEPTH_FORMAT,
depth_write_enabled: false,
depth_compare: wgpu::CompareFunction::LessEqual,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState::default(),
});
let entity_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Entity"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_entity",
buffers: &[wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3],
}],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_entity",
targets: &[sc_desc.format.into()],
}),
primitive: wgpu::PrimitiveState {
front_face: wgpu::FrontFace::Cw,
..Default::default()
},
depth_stencil: Some(wgpu::DepthStencilState {
format: Self::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::LessEqual,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState::default(),
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: None,
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
..Default::default()
});
let device_features = device.features();
let skybox_format =
if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_ASTC_LDR) {
wgpu::TextureFormat::Astc4x4RgbaUnormSrgb
} else if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_ETC2) {
wgpu::TextureFormat::Etc2RgbUnormSrgb
} else if device_features.contains(wgpu::Features::TEXTURE_COMPRESSION_BC) {
wgpu::TextureFormat::Bc1RgbaUnormSrgb
} else {
wgpu::TextureFormat::Bgra8UnormSrgb
};
let size = wgpu::Extent3d {
width: IMAGE_SIZE,
height: IMAGE_SIZE,
depth_or_array_layers: 6,
};
let layer_size = wgpu::Extent3d {
depth_or_array_layers: 1,
..size
};
let max_mips = layer_size.max_mips();
log::debug!(
"Copying {:?} skybox images of size {}, {}, 6 with {} mips to gpu",
skybox_format,
IMAGE_SIZE,
IMAGE_SIZE,
max_mips,
);
let bytes = match skybox_format {
wgpu::TextureFormat::Astc4x4RgbaUnormSrgb => &include_bytes!("images/astc.dds")[..],
wgpu::TextureFormat::Etc2RgbUnormSrgb => &include_bytes!("images/etc2.dds")[..],
wgpu::TextureFormat::Bc1RgbaUnormSrgb => &include_bytes!("images/bc1.dds")[..],
wgpu::TextureFormat::Bgra8UnormSrgb => &include_bytes!("images/bgra.dds")[..],
_ => unreachable!(),
};
let image = ddsfile::Dds::read(&mut std::io::Cursor::new(&bytes)).unwrap();
let texture = device.create_texture_with_data(
&queue,
&wgpu::TextureDescriptor {
size,
mip_level_count: max_mips as u32,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: skybox_format,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: None,
},
&image.data,
);
let texture_view = texture.create_view(&wgpu::TextureViewDescriptor {
label: None,
dimension: Some(wgpu::TextureViewDimension::Cube),
..wgpu::TextureViewDescriptor::default()
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buf.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&texture_view),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: None,
});
let depth_view = Self::create_depth_texture(sc_desc, device);
Skybox {
camera,
sky_pipeline,
entity_pipeline,
bind_group,
uniform_buf,
entities,
depth_view,
staging_belt: wgpu::util::StagingBelt::new(0x100),
}
}
#[allow(clippy::single_match)]
fn update(&mut self, event: winit::event::WindowEvent) {
match event {
winit::event::WindowEvent::CursorMoved { position, .. } => {
let norm_x = position.x as f32 / self.camera.screen_size.0 as f32 - 0.5;
let norm_y = position.y as f32 / self.camera.screen_size.1 as f32 - 0.5;
self.camera.angle_y = norm_x * 5.0;
self.camera.angle_xz = norm_y;
}
_ => {}
}
}
fn resize(
&mut self,
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
_queue: &wgpu::Queue,
) {
self.depth_view = Self::create_depth_texture(sc_desc, device);
self.camera.screen_size = (sc_desc.width, sc_desc.height);
}
fn render(
&mut self,
frame: &wgpu::SwapChainTexture,
device: &wgpu::Device,
queue: &wgpu::Queue,
spawner: &framework::Spawner,
) {
let mut encoder =
device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
// update rotation
let raw_uniforms = self.camera.to_uniform_data();
self.staging_belt
.write_buffer(
&mut encoder,
&self.uniform_buf,
0,
wgpu::BufferSize::new((raw_uniforms.len() * 4) as wgpu::BufferAddress).unwrap(),
device,
)
.copy_from_slice(bytemuck::cast_slice(&raw_uniforms));
self.staging_belt.finish();
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.depth_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: false,
}),
stencil_ops: None,
}),
});
rpass.set_bind_group(0, &self.bind_group, &[]);
rpass.set_pipeline(&self.entity_pipeline);
for entity in self.entities.iter() {
rpass.set_vertex_buffer(0, entity.vertex_buf.slice(..));
rpass.draw(0..entity.vertex_count, 0..1);
}
rpass.set_pipeline(&self.sky_pipeline);
rpass.draw(0..3, 0..1);
}
queue.submit(std::iter::once(encoder.finish()));
let belt_future = self.staging_belt.recall();
spawner.spawn_local(belt_future);
}
}
fn main() {
framework::run::<Skybox>("skybox");
}

View File

@ -0,0 +1,62 @@
# Blender MTL File: 'teslacyberv3.0.blend'
# Material Count: 6
newmtl Material
Ns 65.476285
Ka 1.000000 1.000000 1.000000
Kd 0.411568 0.411568 0.411568
Ks 0.614679 0.614679 0.614679
Ke 0.000000 0.000000 0.000000
Ni 36.750000
d 1.000000
illum 3
newmtl Материал
Ns 323.999994
Ka 1.000000 1.000000 1.000000
Kd 0.800000 0.800000 0.800000
Ks 0.500000 0.500000 0.500000
Ke 0.000000 0.000000 0.000000
Ni 1.000000
d 1.000000
illum 2
newmtl Материал.001
Ns 900.000000
Ka 1.000000 1.000000 1.000000
Kd 0.026240 0.026240 0.026240
Ks 0.000000 0.000000 0.000000
Ke 0.000000 0.000000 0.000000
Ni 1.450000
d 1.000000
illum 1
newmtl Материал.002
Ns 0.000000
Ka 1.000000 1.000000 1.000000
Kd 0.031837 0.032429 0.029425
Ks 0.169725 0.169725 0.169725
Ke 0.000000 0.000000 0.000000
Ni 0.000000
d 1.000000
illum 2
newmtl Материал.003
Ns 900.000000
Ka 1.000000 1.000000 1.000000
Kd 0.023585 0.083235 0.095923
Ks 1.000000 1.000000 1.000000
Ke 0.000000 0.000000 0.000000
Ni 45.049999
d 1.000000
illum 3
newmtl Материал.004
Ns 323.999994
Ka 1.000000 1.000000 1.000000
Kd 0.800000 0.800000 0.800000
Ks 0.500000 0.500000 0.500000
Ke 0.000000 0.000000 0.000000
Ni 1.000000
d 1.000000
illum 2

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 484 KiB

View File

@ -0,0 +1,78 @@
struct SkyOutput {
[[builtin(position)]] position: vec4<f32>;
[[location(0)]] uv: vec3<f32>;
};
[[block]]
struct Data {
// from camera to screen
proj: mat4x4<f32>;
// from screen to camera
proj_inv: mat4x4<f32>;
// from world to camera
view: mat4x4<f32>;
// camera position
cam_pos: vec4<f32>;
};
[[group(0), binding(0)]]
var r_data: Data;
[[stage(vertex)]]
fn vs_sky([[builtin(vertex_index)]] vertex_index: u32) -> SkyOutput {
// hacky way to draw a large triangle
let tmp1 = i32(vertex_index) / 2;
let tmp2 = i32(vertex_index) & 1;
let pos = vec4<f32>(
f32(tmp1) * 4.0 - 1.0,
f32(tmp2) * 4.0 - 1.0,
1.0,
1.0
);
// transposition = inversion for this orthonormal matrix
let inv_model_view = transpose(mat3x3<f32>(r_data.view.x.xyz, r_data.view.y.xyz, r_data.view.z.xyz));
let unprojected = r_data.proj_inv * pos;
var out: SkyOutput;
out.uv = inv_model_view * unprojected.xyz;
out.position = pos;
return out;
}
struct EntityOutput {
[[builtin(position)]] position: vec4<f32>;
[[location(1)]] normal: vec3<f32>;
[[location(3)]] view: vec3<f32>;
};
[[stage(vertex)]]
fn vs_entity(
[[location(0)]] pos: vec3<f32>,
[[location(1)]] normal: vec3<f32>,
) -> EntityOutput {
var out: EntityOutput;
out.normal = normal;
out.view = pos - r_data.cam_pos.xyz;
out.position = r_data.proj * r_data.view * vec4<f32>(pos, 1.0);
return out;
}
[[group(0), binding(1)]]
var r_texture: texture_cube<f32>;
[[group(0), binding(2)]]
var r_sampler: sampler;
[[stage(fragment)]]
fn fs_sky(in: SkyOutput) -> [[location(0)]] vec4<f32> {
return textureSample(r_texture, r_sampler, in.uv);
}
[[stage(fragment)]]
fn fs_entity(in: EntityOutput) -> [[location(0)]] vec4<f32> {
let incident = normalize(in.view);
let normal = normalize(in.normal);
let reflected = incident - 2.0 * dot(normal, incident) * normal;
let reflected_color = textureSample(r_texture, r_sampler, reflected);
return vec4<f32>(0.1, 0.1, 0.1, 0.1) + 0.5 * reflected_color;
}

View File

@ -0,0 +1,19 @@
#version 450
layout(location = 0) in vec2 v_TexCoord;
layout(location = 1) flat in int v_Index; // dynamically non-uniform
layout(location = 0) out vec4 o_Color;
layout(set = 0, binding = 0) uniform texture2D u_Textures[2];
layout(set = 0, binding = 1) uniform sampler u_Sampler;
void main() {
if (v_Index == 0) {
o_Color = vec4(texture(sampler2D(u_Textures[0], u_Sampler), v_TexCoord).rgb, 1.0);
} else if (v_Index == 1) {
o_Color = vec4(texture(sampler2D(u_Textures[1], u_Sampler), v_TexCoord).rgb, 1.0);
} else {
// We need to write something to output color
o_Color = vec4(0.0, 0.0, 1.0, 0.0);
}
}

Binary file not shown.

View File

@ -0,0 +1,332 @@
#[path = "../framework.rs"]
mod framework;
use bytemuck::{Pod, Zeroable};
use std::num::NonZeroU32;
use wgpu::util::DeviceExt;
#[repr(C)]
#[derive(Clone, Copy, Pod, Zeroable)]
struct Vertex {
_pos: [f32; 2],
_tex_coord: [f32; 2],
_index: u32,
}
fn vertex(pos: [i8; 2], tc: [i8; 2], index: i8) -> Vertex {
Vertex {
_pos: [pos[0] as f32, pos[1] as f32],
_tex_coord: [tc[0] as f32, tc[1] as f32],
_index: index as u32,
}
}
fn create_vertices() -> Vec<Vertex> {
vec![
// left rectangle
vertex([-1, -1], [0, 1], 0),
vertex([-1, 1], [0, 0], 0),
vertex([0, 1], [1, 0], 0),
vertex([0, -1], [1, 1], 0),
// right rectangle
vertex([0, -1], [0, 1], 1),
vertex([0, 1], [0, 0], 1),
vertex([1, 1], [1, 0], 1),
vertex([1, -1], [1, 1], 1),
]
}
fn create_indices() -> Vec<u16> {
vec![
// Left rectangle
0, 1, 2, // 1st
2, 0, 3, // 2nd
// Right rectangle
4, 5, 6, // 1st
6, 4, 7, // 2nd
]
}
#[derive(Copy, Clone)]
enum Color {
RED,
GREEN,
}
fn create_texture_data(color: Color) -> [u8; 4] {
match color {
Color::RED => [255, 0, 0, 255],
Color::GREEN => [0, 255, 0, 255],
}
}
struct Example {
pipeline: wgpu::RenderPipeline,
bind_group: wgpu::BindGroup,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
index_format: wgpu::IndexFormat,
uniform_workaround: bool,
}
impl framework::Example for Example {
fn optional_features() -> wgpu::Features {
wgpu::Features::UNSIZED_BINDING_ARRAY
| wgpu::Features::SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING
| wgpu::Features::SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING
| wgpu::Features::PUSH_CONSTANTS
}
fn required_features() -> wgpu::Features {
wgpu::Features::SAMPLED_TEXTURE_BINDING_ARRAY
}
fn required_limits() -> wgpu::Limits {
wgpu::Limits {
max_push_constant_size: 4,
..wgpu::Limits::default()
}
}
fn init(
sc_desc: &wgpu::SwapChainDescriptor,
_adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self {
let mut uniform_workaround = false;
let vs_module = device.create_shader_module(&wgpu::include_spirv!("shader.vert.spv"));
let fs_source = match device.features() {
f if f.contains(wgpu::Features::UNSIZED_BINDING_ARRAY) => {
wgpu::include_spirv!("unsized-non-uniform.frag.spv")
}
f if f.contains(wgpu::Features::SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING) => {
wgpu::include_spirv!("non-uniform.frag.spv")
}
f if f.contains(wgpu::Features::SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING) => {
uniform_workaround = true;
wgpu::include_spirv!("uniform.frag.spv")
}
f if f.contains(wgpu::Features::SAMPLED_TEXTURE_BINDING_ARRAY) => {
wgpu::include_spirv!("constant.frag.spv")
}
_ => unreachable!(),
};
let fs_module = device.create_shader_module(&fs_source);
let vertex_size = std::mem::size_of::<Vertex>();
let vertex_data = create_vertices();
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(&vertex_data),
usage: wgpu::BufferUsage::VERTEX,
});
let index_data = create_indices();
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(&index_data),
usage: wgpu::BufferUsage::INDEX,
});
let red_texture_data = create_texture_data(Color::RED);
let green_texture_data = create_texture_data(Color::GREEN);
let texture_descriptor = wgpu::TextureDescriptor {
size: wgpu::Extent3d::default(),
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsage::SAMPLED | wgpu::TextureUsage::COPY_DST,
label: None,
};
let red_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("red"),
..texture_descriptor
});
let green_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("green"),
..texture_descriptor
});
let red_texture_view = red_texture.create_view(&wgpu::TextureViewDescriptor::default());
let green_texture_view = green_texture.create_view(&wgpu::TextureViewDescriptor::default());
queue.write_texture(
wgpu::ImageCopyTexture {
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
texture: &red_texture,
},
&red_texture_data,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(NonZeroU32::new(4).unwrap()),
rows_per_image: None,
},
wgpu::Extent3d::default(),
);
queue.write_texture(
wgpu::ImageCopyTexture {
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
texture: &green_texture,
},
&green_texture_data,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(NonZeroU32::new(4).unwrap()),
rows_per_image: None,
},
wgpu::Extent3d::default(),
);
let sampler = device.create_sampler(&wgpu::SamplerDescriptor::default());
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("bind group layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: NonZeroU32::new(2),
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureViewArray(&[
&red_texture_view,
&green_texture_view,
]),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
layout: &bind_group_layout,
label: Some("bind group"),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("main"),
bind_group_layouts: &[&bind_group_layout],
push_constant_ranges: if uniform_workaround {
&[wgpu::PushConstantRange {
stages: wgpu::ShaderStage::FRAGMENT,
range: 0..4,
}]
} else {
&[]
},
});
let index_format = wgpu::IndexFormat::Uint16;
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: None,
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &vs_module,
entry_point: "main",
buffers: &[wgpu::VertexBufferLayout {
array_stride: vertex_size as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &wgpu::vertex_attr_array![0 => Float32x2, 1 => Float32x2, 2 => Sint32],
}],
},
fragment: Some(wgpu::FragmentState {
module: &fs_module,
entry_point: "main",
targets: &[sc_desc.format.into()],
}),
primitive: wgpu::PrimitiveState {
front_face: wgpu::FrontFace::Ccw,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
});
Self {
vertex_buffer,
index_buffer,
index_format,
bind_group,
pipeline,
uniform_workaround,
}
}
fn resize(
&mut self,
_sc_desc: &wgpu::SwapChainDescriptor,
_device: &wgpu::Device,
_queue: &wgpu::Queue,
) {
// noop
}
fn update(&mut self, _event: winit::event::WindowEvent) {
// noop
}
fn render(
&mut self,
frame: &wgpu::SwapChainTexture,
device: &wgpu::Device,
queue: &wgpu::Queue,
_spawner: &framework::Spawner,
) {
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("primary"),
});
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
},
}],
depth_stencil_attachment: None,
});
rpass.set_pipeline(&self.pipeline);
rpass.set_bind_group(0, &self.bind_group, &[]);
rpass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
rpass.set_index_buffer(self.index_buffer.slice(..), self.index_format);
if self.uniform_workaround {
rpass.set_push_constants(wgpu::ShaderStage::FRAGMENT, 0, bytemuck::cast_slice(&[0]));
rpass.draw_indexed(0..6, 0, 0..1);
rpass.set_push_constants(wgpu::ShaderStage::FRAGMENT, 0, bytemuck::cast_slice(&[1]));
rpass.draw_indexed(6..12, 0, 0..1);
} else {
rpass.draw_indexed(0..12, 0, 0..1);
}
drop(rpass);
queue.submit(Some(encoder.finish()));
}
}
fn main() {
framework::run::<Example>("texture-arrays");
}

View File

@ -0,0 +1,14 @@
#version 450
#extension GL_EXT_nonuniform_qualifier : require
layout(location = 0) in vec2 v_TexCoord;
layout(location = 1) nonuniformEXT flat in int v_Index; // dynamically non-uniform
layout(location = 0) out vec4 o_Color;
layout(set = 0, binding = 0) uniform texture2D u_Textures[2];
layout(set = 0, binding = 1) uniform sampler u_Sampler;
void main() {
o_Color = vec4(texture(sampler2D(u_Textures[v_Index], u_Sampler), v_TexCoord).rgb, 1.0);
}

Binary file not shown.

View File

@ -0,0 +1,13 @@
#version 450
layout(location = 0) in vec2 a_Pos;
layout(location = 1) in vec2 a_TexCoord;
layout(location = 2) in int a_Index;
layout(location = 0) out vec2 v_TexCoord;
layout(location = 1) flat out int v_Index;
void main() {
v_TexCoord = a_TexCoord;
v_Index = a_Index;
gl_Position = vec4(a_Pos, 0.0, 1.0);
}

Binary file not shown.

View File

@ -0,0 +1,15 @@
#version 450
layout(location = 0) in vec2 v_TexCoord;
layout(location = 1) flat in int v_Index; // dynamically non-uniform
layout(location = 0) out vec4 o_Color;
layout(set = 0, binding = 0) uniform texture2D u_Textures[2];
layout(set = 0, binding = 1) uniform sampler u_Sampler;
layout(push_constant) uniform Uniforms {
int u_Index; // dynamically uniform
};
void main() {
o_Color = vec4(texture(sampler2D(u_Textures[u_Index], u_Sampler), v_TexCoord).rgb, 1.0);
}

Binary file not shown.

View File

@ -0,0 +1,14 @@
#version 450
#extension GL_EXT_nonuniform_qualifier : require
layout(location = 0) in vec2 v_TexCoord;
layout(location = 1) nonuniformEXT flat in int v_Index; // dynamically non-uniform
layout(location = 0) out vec4 o_Color;
layout(set = 0, binding = 0) uniform texture2D u_Textures[];
layout(set = 0, binding = 1) uniform sampler u_Sampler;
void main() {
o_Color = vec4(texture(sampler2D(u_Textures[v_Index], u_Sampler), v_TexCoord).rgb, 1.0);
}

View File

@ -0,0 +1,24 @@
# Water example
This example renders animated water.
It demonstrates Read only Depth/Stencil (abbreviated RODS), where a depth/stencil buffer is used as an attachment which is read-only. In this case it's used in the shaders to calculate reflections and depth.
## Files:
```
water
├── main.rs ------------------ Main program
├── point_gen.rs ------------- Hexagon point generation
├── README.md ---------------- This readme
├── screenshot.png ----------- Screenshot
├── terrain.wgsl ------------- WGSL Shader for terrain
└── water.wgsl --------------- WGSL Shader for water
```
## To run
```
cargo run --example water
```
## Screenshot
![Water example](./screenshot.png)

799
wgpu/examples/water/main.rs Normal file
View File

@ -0,0 +1,799 @@
#[path = "../framework.rs"]
mod framework;
mod point_gen;
use bytemuck::{Pod, Zeroable};
use cgmath::Point3;
use std::{borrow::Cow, iter, mem};
use wgpu::util::DeviceExt;
///
/// Radius of the terrain.
///
/// Changing this value will change the size of the
/// water and terrain. Note however, that changes to
/// this value will require modification of the time
/// scale in the `render` method below.
///
const SIZE: f32 = 10.0;
///
/// Location of the camera.
/// Location of light is in terrain/water shaders.
///
const CAMERA: Point3<f32> = Point3 {
x: -100.0,
y: 50.0,
z: 100.0,
};
struct Matrices {
view: cgmath::Matrix4<f32>,
flipped_view: cgmath::Matrix4<f32>,
projection: cgmath::Matrix4<f32>,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq, Pod, Zeroable)]
struct TerrainUniforms {
view_projection: [f32; 16],
clipping_plane: [f32; 4],
}
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq, Pod, Zeroable)]
struct WaterUniforms {
view: [f32; 16],
projection: [f32; 16],
time_size_width: [f32; 4],
height: [f32; 4],
}
struct Uniforms {
terrain_normal: TerrainUniforms,
terrain_flipped: TerrainUniforms,
water: WaterUniforms,
}
struct Example {
water_vertex_buf: wgpu::Buffer,
water_vertex_count: usize,
water_bind_group_layout: wgpu::BindGroupLayout,
water_bind_group: wgpu::BindGroup,
water_uniform_buf: wgpu::Buffer,
water_pipeline: wgpu::RenderPipeline,
terrain_vertex_buf: wgpu::Buffer,
terrain_vertex_count: usize,
terrain_normal_bind_group: wgpu::BindGroup,
///
/// Binds to the uniform buffer where the
/// camera has been placed underwater.
///
terrain_flipped_bind_group: wgpu::BindGroup,
terrain_normal_uniform_buf: wgpu::Buffer,
///
/// Contains uniform variables where the camera
/// has been placed underwater.
///
terrain_flipped_uniform_buf: wgpu::Buffer,
terrain_pipeline: wgpu::RenderPipeline,
reflect_view: wgpu::TextureView,
depth_buffer: wgpu::TextureView,
current_frame: usize,
///
/// Used to prevent issues when rendering after
/// minimizing the window.
///
active: Option<usize>,
}
impl Example {
///
/// Creates the view matrices, and the corrected projection matrix.
///
fn generate_matrices(aspect_ratio: f32) -> Matrices {
let projection = cgmath::perspective(cgmath::Deg(45f32), aspect_ratio, 10.0, 400.0);
let reg_view = cgmath::Matrix4::look_at_rh(
CAMERA,
cgmath::Point3::new(0f32, 0.0, 0.0),
cgmath::Vector3::unit_y(), //Note that y is up. Differs from other examples.
);
let scale = cgmath::Matrix4::from_nonuniform_scale(8.0, 1.5, 8.0);
let reg_view = reg_view * scale;
let flipped_view = cgmath::Matrix4::look_at_rh(
cgmath::Point3::new(CAMERA.x, -CAMERA.y, CAMERA.z),
cgmath::Point3::new(0f32, 0.0, 0.0),
cgmath::Vector3::unit_y(),
);
let correction = framework::OPENGL_TO_WGPU_MATRIX;
let flipped_view = flipped_view * scale;
Matrices {
view: reg_view,
flipped_view,
projection: correction * projection,
}
}
fn generate_uniforms(width: u32, height: u32) -> Uniforms {
let Matrices {
view,
flipped_view,
projection,
} = Self::generate_matrices(width as f32 / height as f32);
Uniforms {
terrain_normal: TerrainUniforms {
view_projection: *(projection * view).as_ref(),
clipping_plane: [0.0; 4],
},
terrain_flipped: TerrainUniforms {
view_projection: *(projection * flipped_view).as_ref(),
clipping_plane: [0., 1., 0., 0.],
},
water: WaterUniforms {
view: *view.as_ref(),
projection: *projection.as_ref(),
time_size_width: [0.0, 1.0, SIZE * 2.0, width as f32],
height: [height as f32, 0.0, 0.0, 0.0],
},
}
}
///
/// Initializes Uniforms and textures.
///
fn initialize_resources(
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
queue: &wgpu::Queue,
water_uniforms: &wgpu::Buffer,
terrain_normal_uniforms: &wgpu::Buffer,
terrain_flipped_uniforms: &wgpu::Buffer,
water_bind_group_layout: &wgpu::BindGroupLayout,
) -> (wgpu::TextureView, wgpu::TextureView, wgpu::BindGroup) {
// Matrices for our projection and view.
// flipped_view is the view from under the water.
let Uniforms {
terrain_normal,
terrain_flipped,
water,
} = Self::generate_uniforms(sc_desc.width, sc_desc.height);
// Put the uniforms into buffers on the GPU
queue.write_buffer(
terrain_normal_uniforms,
0,
bytemuck::cast_slice(&[terrain_normal]),
);
queue.write_buffer(
terrain_flipped_uniforms,
0,
bytemuck::cast_slice(&[terrain_flipped]),
);
queue.write_buffer(water_uniforms, 0, bytemuck::cast_slice(&[water]));
let texture_extent = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth_or_array_layers: 1,
};
let reflection_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Reflection Render Texture"),
size: texture_extent,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: sc_desc.format,
usage: wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::COPY_DST
| wgpu::TextureUsage::RENDER_ATTACHMENT,
});
let draw_depth_buffer = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Depth Buffer"),
size: texture_extent,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Depth32Float,
usage: wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::COPY_DST
| wgpu::TextureUsage::RENDER_ATTACHMENT,
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("Texture Sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let depth_view = draw_depth_buffer.create_view(&wgpu::TextureViewDescriptor::default());
let water_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: water_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: water_uniforms.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(
&reflection_texture.create_view(&wgpu::TextureViewDescriptor::default()),
),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&depth_view),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
label: Some("Water Bind Group"),
});
(
reflection_texture.create_view(&wgpu::TextureViewDescriptor::default()),
depth_view,
water_bind_group,
)
}
}
impl framework::Example for Example {
fn init(
sc_desc: &wgpu::SwapChainDescriptor,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self {
// Size of one water vertex
let water_vertex_size = mem::size_of::<point_gen::WaterVertexAttributes>();
let water_vertices = point_gen::HexWaterMesh::generate(SIZE).generate_points();
// Size of one terrain vertex
let terrain_vertex_size = mem::size_of::<point_gen::TerrainVertexAttributes>();
// Noise generation
let terrain_noise = noise::OpenSimplex::new();
// Random colouration
let mut terrain_random = rand::thread_rng();
// Generate terrain. The closure determines what each hexagon will look like.
let terrain =
point_gen::HexTerrainMesh::generate(SIZE, |point| -> point_gen::TerrainVertex {
use noise::NoiseFn;
use rand::Rng;
let noise = terrain_noise.get([point[0] as f64 / 5.0, point[1] as f64 / 5.0]) + 0.1;
let y = noise as f32 * 8.0;
// Multiplies a colour by some random amount.
fn mul_arr(mut arr: [u8; 4], by: f32) -> [u8; 4] {
arr[0] = (arr[0] as f32 * by).min(255.0) as u8;
arr[1] = (arr[1] as f32 * by).min(255.0) as u8;
arr[2] = (arr[2] as f32 * by).min(255.0) as u8;
arr
}
// Under water
const DARK_SAND: [u8; 4] = [235, 175, 71, 255];
// Coast
const SAND: [u8; 4] = [217, 191, 76, 255];
// Normal
const GRASS: [u8; 4] = [122, 170, 19, 255];
// Mountain
const SNOW: [u8; 4] = [175, 224, 237, 255];
// Random colouration.
let random = terrain_random.gen::<f32>() * 0.2 + 0.9;
// Choose colour.
let colour = if y <= 0.0 {
DARK_SAND
} else if y <= 0.8 {
SAND
} else if y <= 3.0 {
GRASS
} else {
SNOW
};
point_gen::TerrainVertex {
position: Point3 {
x: point[0],
y,
z: point[1],
},
colour: mul_arr(colour, random),
}
});
// Generate the buffer data.
let terrain_vertices = terrain.make_buffer_data();
// Create the buffers on the GPU to hold the data.
let water_vertex_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Water vertices"),
contents: bytemuck::cast_slice(&water_vertices),
usage: wgpu::BufferUsage::VERTEX,
});
let terrain_vertex_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Terrain vertices"),
contents: bytemuck::cast_slice(&terrain_vertices),
usage: wgpu::BufferUsage::VERTEX,
});
// Create the bind group layout. This is what our uniforms will look like.
let water_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Water Bind Group Layout"),
entries: &[
// Uniform variables such as projection/view.
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(
mem::size_of::<WaterUniforms>() as _,
),
},
count: None,
},
// Reflection texture.
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
// Depth texture for terrain.
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
// Sampler to be able to sample the textures.
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler {
comparison: false,
filtering: true,
},
count: None,
},
],
});
let terrain_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Terrain Bind Group Layout"),
entries: &[
// Regular uniform variables like view/projection.
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: wgpu::BufferSize::new(
mem::size_of::<TerrainUniforms>() as _,
),
},
count: None,
},
],
});
// Create our pipeline layouts.
let water_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("water"),
bind_group_layouts: &[&water_bind_group_layout],
push_constant_ranges: &[],
});
let terrain_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("terrain"),
bind_group_layouts: &[&terrain_bind_group_layout],
push_constant_ranges: &[],
});
let water_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Water Uniforms"),
size: mem::size_of::<WaterUniforms>() as _,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
let terrain_normal_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Normal Terrain Uniforms"),
size: mem::size_of::<TerrainUniforms>() as _,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
let terrain_flipped_uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Flipped Terrain Uniforms"),
size: mem::size_of::<TerrainUniforms>() as _,
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
mapped_at_creation: false,
});
// Create bind group.
// This puts values behind what was laid out in the bind group layout.
let (reflect_view, depth_buffer, water_bind_group) = Self::initialize_resources(
sc_desc,
device,
queue,
&water_uniform_buf,
&terrain_normal_uniform_buf,
&terrain_flipped_uniform_buf,
&water_bind_group_layout,
);
let terrain_normal_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &terrain_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: terrain_normal_uniform_buf.as_entire_binding(),
}],
label: Some("Terrain Normal Bind Group"),
});
let terrain_flipped_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &terrain_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: terrain_flipped_uniform_buf.as_entire_binding(),
}],
label: Some("Terrain Flipped Bind Group"),
});
// Upload/compile them to GPU code.
let mut flags = wgpu::ShaderFlags::VALIDATION;
match adapter.get_info().backend {
wgpu::Backend::Metal | wgpu::Backend::Vulkan => {
flags |= wgpu::ShaderFlags::EXPERIMENTAL_TRANSLATION
}
_ => (), //TODO
}
let terrain_module = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("terrain"),
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("terrain.wgsl"))),
flags,
});
let water_module = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("water"),
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("water.wgsl"))),
flags,
});
// Create the render pipelines. These describe how the data will flow through the GPU, and what
// constraints and modifiers it will have.
let water_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("water"),
// The "layout" is what uniforms will be needed.
layout: Some(&water_pipeline_layout),
// Vertex shader and input buffers
vertex: wgpu::VertexState {
module: &water_module,
entry_point: "vs_main",
// Layout of our vertices. This should match the structs
// which are uploaded to the GPU. This should also be
// ensured by tagging on either a `#[repr(C)]` onto a
// struct, or a `#[repr(transparent)]` if it only contains
// one item, which is itself `repr(C)`.
buffers: &[wgpu::VertexBufferLayout {
array_stride: water_vertex_size as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &wgpu::vertex_attr_array![0 => Sint16x2, 1 => Sint8x4],
}],
},
// Fragment shader and output targets
fragment: Some(wgpu::FragmentState {
module: &water_module,
entry_point: "fs_main",
// Describes how the colour will be interpolated
// and assigned to the output attachment.
targets: &[wgpu::ColorTargetState {
format: sc_desc.format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Max,
},
}),
write_mask: wgpu::ColorWrite::ALL,
}],
}),
// How the triangles will be rasterized. This is more important
// for the terrain because of the beneath-the water shot.
// This is also dependent on how the triangles are being generated.
primitive: wgpu::PrimitiveState {
// What kind of data are we passing in?
topology: wgpu::PrimitiveTopology::TriangleList,
front_face: wgpu::FrontFace::Cw,
..Default::default()
},
// Describes how us writing to the depth/stencil buffer
// will work. Since this is water, we need to read from the
// depth buffer both as a texture in the shader, and as an
// input attachment to do depth-testing. We don't write, so
// depth_write_enabled is set to false. This is called
// RODS or read-only depth stencil.
depth_stencil: Some(wgpu::DepthStencilState {
// We don't use stencil.
format: wgpu::TextureFormat::Depth32Float,
depth_write_enabled: false,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
// No multisampling is used.
multisample: wgpu::MultisampleState::default(),
});
// Same idea as the water pipeline.
let terrain_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("terrain"),
layout: Some(&terrain_pipeline_layout),
vertex: wgpu::VertexState {
module: &terrain_module,
entry_point: "vs_main",
buffers: &[wgpu::VertexBufferLayout {
array_stride: terrain_vertex_size as wgpu::BufferAddress,
step_mode: wgpu::InputStepMode::Vertex,
attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x3, 2 => Unorm8x4],
}],
},
fragment: Some(wgpu::FragmentState {
module: &terrain_module,
entry_point: "fs_main",
targets: &[sc_desc.format.into()],
}),
primitive: wgpu::PrimitiveState {
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Front),
..Default::default()
},
depth_stencil: Some(wgpu::DepthStencilState {
format: wgpu::TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState::default(),
});
// Done
Example {
water_vertex_buf,
water_vertex_count: water_vertices.len(),
water_bind_group_layout,
water_bind_group,
water_uniform_buf,
water_pipeline,
terrain_vertex_buf,
terrain_vertex_count: terrain_vertices.len(),
terrain_normal_bind_group,
terrain_flipped_bind_group,
terrain_normal_uniform_buf,
terrain_flipped_uniform_buf,
terrain_pipeline,
reflect_view,
depth_buffer,
current_frame: 0,
active: Some(0),
}
}
fn update(&mut self, _event: winit::event::WindowEvent) {
//empty
}
fn resize(
&mut self,
sc_desc: &wgpu::SwapChainDescriptor,
device: &wgpu::Device,
queue: &wgpu::Queue,
) {
if sc_desc.width == 0 && sc_desc.height == 0 {
// Stop rendering altogether.
self.active = None;
return;
}
self.active = Some(self.current_frame);
// Regenerate all of the buffers and textures.
let (reflect_view, depth_buffer, water_bind_group) = Self::initialize_resources(
sc_desc,
device,
queue,
&self.water_uniform_buf,
&self.terrain_normal_uniform_buf,
&self.terrain_flipped_uniform_buf,
&self.water_bind_group_layout,
);
self.water_bind_group = water_bind_group;
self.depth_buffer = depth_buffer;
self.reflect_view = reflect_view;
}
#[allow(clippy::eq_op)]
fn render(
&mut self,
frame: &wgpu::SwapChainTexture,
device: &wgpu::Device,
queue: &wgpu::Queue,
_spawner: &framework::Spawner,
) {
// Increment frame count regardless of if we draw.
self.current_frame += 1;
let back_color = wgpu::Color {
r: 161.0 / 255.0,
g: 246.0 / 255.0,
b: 255.0 / 255.0,
a: 1.0,
};
// Write the sin/cos values to the uniform buffer for the water.
let (water_sin, water_cos) = ((self.current_frame as f32) / 600.0).sin_cos();
queue.write_buffer(
&self.water_uniform_buf,
mem::size_of::<[f32; 16]>() as wgpu::BufferAddress * 2,
bytemuck::cast_slice(&[water_sin, water_cos]),
);
// Only render valid frames. See resize method.
if let Some(active) = self.active {
if active >= self.current_frame {
return;
}
} else {
return;
}
// The encoder provides a way to turn our instructions here, into
// a command buffer the GPU can understand.
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Main Command Encoder"),
});
// First pass: render the reflection.
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &self.reflect_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(back_color),
store: true,
},
}],
// We still need to use the depth buffer here
// since the pipeline requires it.
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.depth_buffer,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: true,
}),
stencil_ops: None,
}),
});
rpass.set_pipeline(&self.terrain_pipeline);
rpass.set_bind_group(0, &self.terrain_flipped_bind_group, &[]);
rpass.set_vertex_buffer(0, self.terrain_vertex_buf.slice(..));
rpass.draw(0..self.terrain_vertex_count as u32, 0..1);
}
// Terrain right side up. This time we need to use the
// depth values, so we must use StoreOp::Store.
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(back_color),
store: true,
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.depth_buffer,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: true,
}),
stencil_ops: None,
}),
});
rpass.set_pipeline(&self.terrain_pipeline);
rpass.set_bind_group(0, &self.terrain_normal_bind_group, &[]);
rpass.set_vertex_buffer(0, self.terrain_vertex_buf.slice(..));
rpass.draw(0..self.terrain_vertex_count as u32, 0..1);
}
// Render the water. This reads from the depth buffer, but does not write
// to it, so it cannot be in the same render pass.
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &frame.view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: true,
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.depth_buffer,
depth_ops: None,
stencil_ops: None,
}),
});
rpass.set_pipeline(&self.water_pipeline);
rpass.set_bind_group(0, &self.water_bind_group, &[]);
rpass.set_vertex_buffer(0, self.water_vertex_buf.slice(..));
rpass.draw(0..self.water_vertex_count as u32, 0..1);
}
queue.submit(iter::once(encoder.finish()));
}
}
fn main() {
framework::run::<Example>("water");
}

View File

@ -0,0 +1,286 @@
//!
//! This module covers generating points in a hexagonal fashion.
//!
use bytemuck::{Pod, Zeroable};
use cgmath::{InnerSpace, Point3, Vector3};
use std::collections::HashMap;
// The following constants are used in calculations.
// A and B are multiplication factors for x and y.
///
/// X multiplication factor.
/// 1.0 / sqrt(2)
///
const A: f32 = std::f32::consts::FRAC_1_SQRT_2;
///
/// Y multiplication factor.
/// sqrt(3) / sqrt(2) == sqrt(1.5)
///
const B: f32 = SQRT_3 * A;
///
/// `sin(45deg)` is used to rotate the points.
///
const S45: f32 = std::f32::consts::FRAC_1_SQRT_2;
///
/// `cos(45deg)` is used to rotate the points.
///
const C45: f32 = S45;
const SQRT_3: f32 = 1.7320508;
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq, Pod, Zeroable)]
pub struct TerrainVertexAttributes {
position: [f32; 3],
normal: [f32; 3],
colour: [u8; 4],
}
#[repr(C)]
#[derive(Copy, Clone, Debug, PartialEq, Pod, Zeroable)]
pub struct WaterVertexAttributes {
position: [i16; 2],
offsets: [i8; 4],
}
///
/// Represents the center of a single hexagon.
///
#[derive(Copy, Clone, Debug)]
pub struct TerrainVertex {
pub position: Point3<f32>,
pub colour: [u8; 4],
}
///
/// Gets the surrounding hexagonal points from a point.
///
/// +---0---1
/// | / | |
/// 5---p---2
/// | | / |
/// 4---3---+
///
fn surrounding_hexagonal_points(x: isize, y: isize) -> [(isize, isize); 6] {
[
(x, y - 1),
(x + 1, y - 1),
(x + 1, y),
(x, y + 1),
(x - 1, y + 1),
(x - 1, y),
]
}
fn surrounding_point_values_iter<T>(
hashmap: &HashMap<(isize, isize), T>,
x: isize,
y: isize,
for_each: impl FnMut((&T, &T)),
) {
let points = surrounding_hexagonal_points(x, y);
let points = [
points[0], points[1], points[2], points[3], points[4], points[5], points[0],
];
points
.windows(2)
.map(|x| (hashmap.get(&x[0]), hashmap.get(&x[1])))
.flat_map(|(a, b)| a.and_then(|x| b.map(|y| (x, y))))
.for_each(for_each);
}
///
/// Used in calculating terrain normals.
///
pub fn calculate_normal(a: Point3<f32>, b: Point3<f32>, c: Point3<f32>) -> Vector3<f32> {
(b - a).normalize().cross((c - a).normalize()).normalize()
}
///
/// Given the radius, how large of a square do we need to make a unit hexagon grid?
///
fn q_given_r(radius: f32) -> usize {
((((((4.0 * radius) / SQRT_3) + 1.0).floor() / 2.0).floor() * 2.0) + 1.0) as usize
}
///
/// Represents terrain, however it contains the vertices only once.
///
#[derive(Clone)]
pub struct HexTerrainMesh {
pub vertices: HashMap<(isize, isize), TerrainVertex>,
half_size: isize,
}
impl HexTerrainMesh {
///
/// Generates the vertices (or the centers of the hexagons). The colour and height is determined by
/// a function passed in by the user.
///
pub fn generate(radius: f32, mut gen_vertex: impl FnMut([f32; 2]) -> TerrainVertex) -> Self {
let width = q_given_r(radius);
let half_width = (width / 2) as isize;
let mut map = HashMap::new();
let mut max = std::f32::NEG_INFINITY;
for i in -half_width..=half_width {
let x_o = i as f32;
for j in -half_width..=half_width {
let y_o = j as f32;
let x = A * (x_o * C45 - y_o * S45);
let z = B * (x_o * S45 + y_o * C45);
if x.hypot(z) < radius {
let vertex = gen_vertex([x, z]);
if vertex.position.y > max {
max = vertex.position.y;
}
map.insert((i, j), vertex);
}
}
}
Self {
vertices: map,
half_size: width as isize / 2,
}
}
///
/// Creates the points required to render the mesh.
///
pub fn make_buffer_data(&self) -> Vec<TerrainVertexAttributes> {
let mut vertices = Vec::new();
fn middle(p1: &TerrainVertex, p2: &TerrainVertex, p: &TerrainVertex) -> Point3<f32> {
Point3 {
x: (p1.position.x + p2.position.x + p.position.x) / 3.0,
y: (p1.position.y + p2.position.y + p.position.y) / 3.0,
z: (p1.position.z + p2.position.z + p.position.z) / 3.0,
}
}
fn half(p1: &TerrainVertex, p2: &TerrainVertex) -> Point3<f32> {
Point3 {
x: (p1.position.x + p2.position.x) / 2.0,
y: (p1.position.y + p2.position.y) / 2.0,
z: (p1.position.z + p2.position.z) / 2.0,
}
}
let mut push_triangle = |p1: &TerrainVertex,
p2: &TerrainVertex,
p: &TerrainVertex,
c: [u8; 4]| {
let m = middle(p1, p2, p);
let ap = half(p1, p);
let bp = half(p2, p);
let p = p.position;
let n1 = calculate_normal(ap, m, p);
let n2 = calculate_normal(m, bp, p);
vertices.extend(
[ap, m, p, m, bp, p]
.iter()
.zip(
std::iter::repeat::<[f32; 3]>(n1.into())
.chain(std::iter::repeat::<[f32; 3]>(n2.into())),
)
.zip(std::iter::repeat(c))
.map(|((pos, normal), colour)| TerrainVertexAttributes {
position: *pos.as_ref(),
normal,
colour,
}),
);
};
for i in -self.half_size..=self.half_size {
for j in -self.half_size..=self.half_size {
if let Some(p) = self.vertices.get(&(i, j)) {
surrounding_point_values_iter(&self.vertices, i, j, |(a, b)| {
push_triangle(a, b, p, p.colour)
});
}
}
}
vertices
}
}
///
/// Water mesh which contains vertex data for the water mesh.
///
/// It stores the values multiplied and rounded to the
/// nearest whole number to be more efficient with space when
/// sending large meshes to the GPU.
///
pub struct HexWaterMesh {
pub vertices: HashMap<(isize, isize), [i16; 2]>,
half_size: isize,
}
impl HexWaterMesh {
pub fn generate(radius: f32) -> Self {
let width = q_given_r(radius);
let half_width = (width / 2) as isize;
let mut map = HashMap::new();
for i in -half_width..=half_width {
let x_o = i as f32;
for j in -half_width..=half_width {
let y_o = j as f32;
let x = A * (x_o * C45 - y_o * S45);
let z = B * (x_o * S45 + y_o * C45);
if x.hypot(z) < radius {
let x = (x * 2.0).round() as i16;
let z = ((z / B) * std::f32::consts::SQRT_2).round() as i16;
map.insert((i, j), [x, z]);
}
}
}
Self {
vertices: map,
half_size: half_width,
}
}
///
/// Generates the points required to render the mesh.
///
pub fn generate_points(&self) -> Vec<WaterVertexAttributes> {
let mut vertices = Vec::new();
fn calculate_differences(a: [i16; 2], b: [i16; 2], c: [i16; 2]) -> [i8; 4] {
[
(b[0] - a[0]) as i8,
(b[1] - a[1]) as i8,
(c[0] - a[0]) as i8,
(c[1] - a[1]) as i8,
]
}
let mut push_triangle = |a: [i16; 2], b: [i16; 2], c: [i16; 2]| {
let bc = calculate_differences(a, b, c);
let ca = calculate_differences(b, c, a);
let ab = calculate_differences(c, a, b);
vertices.extend(
[a, b, c]
.iter()
.zip([bc, ca, ab].iter())
.map(|(&position, &offsets)| WaterVertexAttributes { position, offsets }),
);
};
for i in -self.half_size..=self.half_size {
for j in -self.half_size..=self.half_size {
if (i - j) % 3 == 0 {
if let Some(&p) = self.vertices.get(&(i, j)) {
surrounding_point_values_iter(&self.vertices, i, j, |(a, b)| {
push_triangle(*a, *b, p)
});
}
}
}
}
vertices
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 196 KiB

View File

@ -0,0 +1,49 @@
[[block]]
struct Uniforms {
projection_view: mat4x4<f32>;
clipping_plane: vec4<f32>;
};
[[group(0), binding(0)]]
var<uniform> uniforms: Uniforms;
let light: vec3<f32> = vec3<f32>(150.0, 70.0, 0.0);
let light_colour: vec3<f32> = vec3<f32>(1.0, 0.98, 0.82);
let ambient: f32 = 0.2;
struct VertexOutput {
[[builtin(position)]] position: vec4<f32>;
[[location(0)]] colour: vec4<f32>;
// Comment this out if using user-clipping planes:
[[location(1)]] clip_dist: f32;
};
[[stage(vertex)]]
fn vs_main(
[[location(0)]] position: vec3<f32>,
[[location(1)]] normal: vec3<f32>,
[[location(2)]] colour: vec4<f32>,
) -> VertexOutput {
var out: VertexOutput;
out.position = uniforms.projection_view * vec4<f32>(position, 1.0);
// https://www.desmos.com/calculator/nqgyaf8uvo
let normalized_light_direction = normalize(position - light);
let brightness_diffuse = clamp(dot(normalized_light_direction, normal), 0.2, 1.0);
out.colour = vec4<f32>(max((brightness_diffuse + ambient) * light_colour * colour.rgb, vec3<f32>(0.0, 0.0, 0.0)), colour.a);
out.clip_dist = dot(vec4<f32>(position, 1.0), uniforms.clipping_plane);
return out;
}
[[stage(fragment), early_depth_test]]
fn fs_main(
in: VertexOutput,
) -> [[location(0)]] vec4<f32> {
// Comment this out if using user-clipping planes:
if(in.clip_dist < 0.0) {
discard;
}
return vec4<f32>(in.colour.xyz, 1.0);
}

View File

@ -0,0 +1,252 @@
[[block]]
struct Uniforms {
view: mat4x4<f32>;
projection: mat4x4<f32>;
time_size_width: vec4<f32>;
viewport_height: f32;
};
[[group(0), binding(0)]] var<uniform> uniforms: Uniforms;
let light_point: vec3<f32> = vec3<f32>(150.0, 70.0, 0.0);
let light_colour: vec3<f32> = vec3<f32>(1.0, 0.98, 0.82);
let one: vec4<f32> = vec4<f32>(1.0, 1.0, 1.0, 1.0);
let Y_SCL: f32 = 0.86602540378443864676372317075294;
let CURVE_BIAS: f32 = -0.1;
let INV_1_CURVE_BIAS: f32 = 1.11111111111; //1.0 / (1.0 + CURVE_BIAS);
//
// The following code to calculate simplex 3D
// is from https://github.com/ashima/webgl-noise
//
// Simplex 3D Noise
// by Ian McEwan, Ashima Arts.
//
fn permute(x: vec4<f32>) -> vec4<f32> {
var temp: vec4<f32> = 289.0 * one;
return modf(((x*34.0) + one) * x, &temp);
}
fn taylorInvSqrt(r: vec4<f32>) -> vec4<f32> {
return 1.79284291400159 * one - 0.85373472095314 * r;
}
fn snoise(v: vec3<f32>) -> f32 {
let C = vec2<f32>(1.0/6.0, 1.0/3.0);
let D = vec4<f32>(0.0, 0.5, 1.0, 2.0);
// First corner
//TODO: use the splat operations when available
let vCy = dot(v, C.yyy);
var i: vec3<f32> = floor(v + vec3<f32>(vCy, vCy, vCy));
let iCx = dot(i, C.xxx);
let x0 = v - i + vec3<f32>(iCx, iCx, iCx);
// Other corners
let g = step(x0.yzx, x0.xyz);
let l = (vec3<f32>(1.0, 1.0, 1.0) - g).zxy;
let i1 = min(g, l);
let i2 = max(g, l);
// x0 = x0 - 0.0 + 0.0 * C.xxx;
// x1 = x0 - i1 + 1.0 * C.xxx;
// x2 = x0 - i2 + 2.0 * C.xxx;
// x3 = x0 - 1.0 + 3.0 * C.xxx;
let x1 = x0 - i1 + C.xxx;
let x2 = x0 - i2 + C.yyy; // 2.0*C.x = 1/3 = C.y
let x3 = x0 - D.yyy; // -1.0+3.0*C.x = -0.5 = -D.y
// Permutations
var temp: vec3<f32> = 289.0 * one.xyz;
i = modf(i, &temp);
let p = permute(
permute(
permute(i.zzzz + vec4<f32>(0.0, i1.z, i2.z, 1.0))
+ i.yyyy + vec4<f32>(0.0, i1.y, i2.y, 1.0))
+ i.xxxx + vec4<f32>(0.0, i1.x, i2.x, 1.0));
// Gradients: 7x7 points over a square, mapped onto an octahedron.
// The ring size 17*17 = 289 is close to a multiple of 49 (49*6 = 294)
let n_ = 0.142857142857;// 1.0/7.0
let ns = n_ * D.wyz - D.xzx;
let j = p - 49.0 * floor(p * ns.z * ns.z);// mod(p,7*7)
let x_ = floor(j * ns.z);
let y_ = floor(j - 7.0 * x_);// mod(j,N)
var x: vec4<f32> = x_ *ns.x + ns.yyyy;
var y: vec4<f32> = y_ *ns.x + ns.yyyy;
let h = one - abs(x) - abs(y);
let b0 = vec4<f32>(x.xy, y.xy);
let b1 = vec4<f32>(x.zw, y.zw);
//vec4 s0 = vec4(lessThan(b0,0.0))*2.0 - one;
//vec4 s1 = vec4(lessThan(b1,0.0))*2.0 - one;
let s0 = floor(b0)*2.0 + one;
let s1 = floor(b1)*2.0 + one;
let sh = -step(h, 0.0 * one);
let a0 = b0.xzyw + s0.xzyw*sh.xxyy;
let a1 = b1.xzyw + s1.xzyw*sh.zzww;
var p0: vec3<f32> = vec3<f32>(a0.xy, h.x);
var p1: vec3<f32> = vec3<f32>(a0.zw, h.y);
var p2: vec3<f32> = vec3<f32>(a1.xy, h.z);
var p3: vec3<f32> = vec3<f32>(a1.zw, h.w);
//Normalise gradients
let norm = taylorInvSqrt(vec4<f32>(dot(p0, p0), dot(p1, p1), dot(p2, p2), dot(p3, p3)));
p0 = p0 * norm.x;
p1 = p1 * norm.y;
p2 = p2 * norm.z;
p3 = p3 * norm.w;
// Mix final noise value
var m: vec4<f32> = max(0.6 * one - vec4<f32>(dot(x0, x0), dot(x1, x1), dot(x2, x2), dot(x3, x3)), 0.0 * one);
m = m * m;
return 9.0 * dot(m*m, vec4<f32>(dot(p0, x0), dot(p1, x1), dot(p2, x2), dot(p3, x3)));
}
// End of 3D simplex code.
fn apply_distortion(pos: vec3<f32>) -> vec3<f32> {
var perlin_pos: vec3<f32> = pos;
//Do noise transformation to permit for smooth,
//continuous movement.
//TODO: we should be able to name them `sin` and `cos`.
let sn = uniforms.time_size_width.x;
let cs = uniforms.time_size_width.y;
let size = uniforms.time_size_width.z;
// Rotate 90 Z, Move Left Size / 2
perlin_pos = vec3<f32>(perlin_pos.y - perlin_pos.x - size, perlin_pos.x, perlin_pos.z);
let xcos = perlin_pos.x * cs;
let xsin = perlin_pos.x * sn;
let ycos = perlin_pos.y * cs;
let ysin = perlin_pos.y * sn;
let zcos = perlin_pos.z * cs;
let zsin = perlin_pos.z * sn;
// Rotate Time Y
let perlin_pos_y = vec3<f32>(xcos + zsin, perlin_pos.y, -xsin + xcos);
// Rotate Time Z
let perlin_pos_z = vec3<f32>(xcos - ysin, xsin + ycos, perlin_pos.x);
// Rotate 90 Y
perlin_pos = vec3<f32>(perlin_pos.z - perlin_pos.x, perlin_pos.y, perlin_pos.x);
// Rotate Time X
let perlin_pos_x = vec3<f32>(perlin_pos.x, ycos - zsin, ysin + zcos);
// Sample at different places for x/y/z to get random-looking water.
return vec3<f32>(
//TODO: use splats
pos.x + snoise(perlin_pos_x + 2.0*one.xxx) * 0.4,
pos.y + snoise(perlin_pos_y - 2.0*one.xxx) * 1.8,
pos.z + snoise(perlin_pos_z) * 0.4
);
}
// Multiply the input by the scale values.
fn make_position(original: vec2<f32>) -> vec4<f32> {
let interpreted = vec3<f32>(original.x * 0.5, 0.0, original.y * Y_SCL);
return vec4<f32>(apply_distortion(interpreted), 1.0);
}
// Create the normal, and apply the curve. Change the Curve Bias above.
fn make_normal(a: vec3<f32>, b: vec3<f32>, c: vec3<f32>) -> vec3<f32> {
let norm = normalize(cross(b - c, a - c));
let center = (a + b + c) * (1.0 / 3.0); //TODO: use splat
return (normalize(a - center) * CURVE_BIAS + norm) * INV_1_CURVE_BIAS;
}
// Calculate the fresnel effect.
fn calc_fresnel(view: vec3<f32>, normal: vec3<f32>) -> f32 {
var refractive: f32 = abs(dot(view, normal));
refractive = pow(refractive, 1.33333333333);
return refractive;
}
// Calculate the specular lighting.
fn calc_specular(eye: vec3<f32>, normal: vec3<f32>, light: vec3<f32>) -> f32 {
let light_reflected = reflect(light, normal);
var specular: f32 = max(dot(eye, light_reflected), 0.0);
specular = pow(specular, 10.0);
return specular;
}
struct VertexOutput {
[[builtin(position)]] position: vec4<f32>;
[[location(0)]] f_WaterScreenPos: vec2<f32>;
[[location(1)]] f_Fresnel: f32;
[[location(2)]] f_Light: vec3<f32>;
};
[[stage(vertex)]]
fn vs_main(
[[location(0)]] position: vec2<i32>,
[[location(1)]] offsets: vec4<i32>,
) -> VertexOutput {
let p_pos = vec2<f32>(position);
let b_pos = make_position(p_pos + vec2<f32>(offsets.xy));
let c_pos = make_position(p_pos + vec2<f32>(offsets.zw));
let a_pos = make_position(p_pos);
let original_pos = vec4<f32>(p_pos.x * 0.5, 0.0, p_pos.y * Y_SCL, 1.0);
let vm = uniforms.view;
let transformed_pos = vm * a_pos;
//TODO: use vector splats for division
let water_pos = transformed_pos.xyz * (1.0 / transformed_pos.w);
let normal = make_normal((vm * a_pos).xyz, (vm * b_pos).xyz, (vm * c_pos).xyz);
let eye = normalize(-water_pos);
let transformed_light = vm * vec4<f32>(light_point, 1.0);
var out: VertexOutput;
out.f_Light = light_colour * calc_specular(eye, normal, normalize(water_pos.xyz - (transformed_light.xyz * (1.0 / transformed_light.w))));
out.f_Fresnel = calc_fresnel(eye, normal);
let gridpos = uniforms.projection * vm * original_pos;
out.f_WaterScreenPos = (0.5 * gridpos.xy * (1.0 / gridpos.w)) + vec2<f32>(0.5, 0.5);
out.position = uniforms.projection * transformed_pos;
return out;
}
let water_colour: vec3<f32> = vec3<f32>(0.0, 0.46, 0.95);
let zNear: f32 = 10.0;
let zFar: f32 = 400.0;
[[group(0), binding(1)]] var reflection: texture_2d<f32>;
[[group(0), binding(2)]] var terrain_depth_tex: texture_2d<f32>;
[[group(0), binding(3)]] var colour_sampler: sampler;
fn to_linear_depth(depth: f32) -> f32 {
let z_n: f32 = 2.0 * depth - 1.0;
let z_e: f32 = 2.0 * zNear * zFar / (zFar + zNear - z_n * (zFar - zNear));
return z_e;
}
[[stage(fragment)]]
fn fs_main(in: VertexOutput) -> [[location(0)]] vec4<f32> {
let reflection_colour = textureSample(reflection, colour_sampler, in.f_WaterScreenPos.xy).xyz;
let pixel_depth = to_linear_depth(in.position.z);
let normalized_coords = in.position.xy / vec2<f32>(uniforms.time_size_width.w, uniforms.viewport_height);
let terrain_depth = to_linear_depth(textureSample(terrain_depth_tex, colour_sampler, normalized_coords).r);
let dist = terrain_depth - pixel_depth;
let clamped = pow(smoothStep(0.0, 1.5, dist), 4.8);
let final_colour = in.f_Light + reflection_colour;
let t = smoothStep(1.0, 5.0, dist) * 0.2; //TODO: splat for mix()?
let depth_colour = mix(final_colour, water_colour, vec3<f32>(t, t, t));
return vec4<f32>(depth_colour, clamped * (1.0 - in.f_Fresnel));
}

BIN
wgpu/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

0
wgpu/rustfmt.toml Normal file
View File

1972
wgpu/src/backend/direct.rs Normal file

File diff suppressed because it is too large Load Diff

333
wgpu/src/backend/error.rs Normal file
View File

@ -0,0 +1,333 @@
use std::{error::Error, fmt};
#[derive(Debug)]
pub(super) struct ContextError {
pub string: &'static str,
pub cause: Box<dyn Error + Send + Sync + 'static>,
pub label_key: &'static str,
pub label: String,
}
impl fmt::Display for ContextError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "In {}", self.string)
}
}
impl Error for ContextError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
Some(self.cause.as_ref())
}
}
impl super::Context {
pub(super) fn format_error(&self, err: &(impl Error + 'static)) -> String {
let mut err_descs = vec![self.format_pretty_any(err)];
let mut source_opt = err.source();
while let Some(source) = source_opt {
err_descs.push(self.format_pretty_any(source));
source_opt = source.source();
}
format!("Validation Error\n\nCaused by:\n{}", err_descs.join(""))
}
fn format_pretty_any(&self, error: &(dyn Error + 'static)) -> String {
if let Some(pretty_err) = error.downcast_ref::<ContextError>() {
return pretty_err.fmt_pretty(self);
}
if let Some(pretty_err) = error.downcast_ref::<wgc::command::RenderCommandError>() {
return pretty_err.fmt_pretty(self);
}
if let Some(pretty_err) = error.downcast_ref::<wgc::binding_model::CreateBindGroupError>() {
return pretty_err.fmt_pretty(self);
}
if let Some(pretty_err) =
error.downcast_ref::<wgc::binding_model::CreatePipelineLayoutError>()
{
return pretty_err.fmt_pretty(self);
}
if let Some(pretty_err) = error.downcast_ref::<wgc::command::ExecutionError>() {
return pretty_err.fmt_pretty(self);
}
if let Some(pretty_err) = error.downcast_ref::<wgc::command::RenderPassErrorInner>() {
return pretty_err.fmt_pretty(self);
}
if let Some(pretty_err) = error.downcast_ref::<wgc::command::RenderPassError>() {
return pretty_err.fmt_pretty(self);
}
if let Some(pretty_err) = error.downcast_ref::<wgc::command::ComputePassErrorInner>() {
return pretty_err.fmt_pretty(self);
}
if let Some(pretty_err) = error.downcast_ref::<wgc::command::ComputePassError>() {
return pretty_err.fmt_pretty(self);
}
if let Some(pretty_err) = error.downcast_ref::<wgc::command::RenderBundleError>() {
return pretty_err.fmt_pretty(self);
}
if let Some(pretty_err) = error.downcast_ref::<wgc::command::TransferError>() {
return pretty_err.fmt_pretty(self);
}
// default
format_error_line(error.as_display())
}
}
pub(super) fn format_error_line(err: &dyn fmt::Display) -> String {
format!(" {}\n", err)
}
pub(super) fn format_note_line(note: &dyn fmt::Display) -> String {
format!(" note: {}\n", note)
}
pub(super) fn format_label_line(label_key: &str, label_value: &str) -> String {
if label_key.is_empty() || label_value.is_empty() {
String::new()
} else {
format_note_line(&format!("{} = `{}`", label_key, label_value))
}
}
trait AsDisplay {
fn as_display(&self) -> &dyn fmt::Display;
}
impl<T: fmt::Display> AsDisplay for T {
fn as_display(&self) -> &dyn fmt::Display {
self
}
}
pub trait PrettyError: Error {
fn fmt_pretty(&self, _context: &super::Context) -> String {
format_error_line(self.as_display())
}
}
impl PrettyError for ContextError {
fn fmt_pretty(&self, _context: &super::Context) -> String {
format_error_line(self.as_display()) + &format_label_line(self.label_key, &self.label)
}
}
impl PrettyError for wgc::command::RenderCommandError {
fn fmt_pretty(&self, context: &super::Context) -> String {
let global = context.global();
let mut ret = format_error_line(self);
match *self {
Self::InvalidBindGroup(id) => {
let name = wgc::gfx_select!(id => global.bind_group_label(id));
ret.push_str(&format_label_line("bind group", &name));
}
Self::InvalidPipeline(id) => {
let name = wgc::gfx_select!(id => global.render_pipeline_label(id));
ret.push_str(&format_label_line("render pipeline", &name));
}
Self::Buffer(id, ..) | Self::DestroyedBuffer(id) => {
let name = wgc::gfx_select!(id => global.buffer_label(id));
ret.push_str(&format_label_line("buffer", &name));
}
_ => {}
};
ret
}
}
impl PrettyError for wgc::binding_model::CreateBindGroupError {
fn fmt_pretty(&self, context: &super::Context) -> String {
let global = context.global();
let mut ret = format_error_line(self);
match *self {
Self::InvalidBuffer(id) => {
let name = wgc::gfx_select!(id => global.buffer_label(id));
ret.push_str(&format_label_line("buffer", &name));
}
Self::InvalidTextureView(id) => {
let name = wgc::gfx_select!(id => global.texture_view_label(id));
ret.push_str(&format_label_line("texture view", &name));
}
Self::InvalidSampler(id) => {
let name = wgc::gfx_select!(id => global.sampler_label(id));
ret.push_str(&format_label_line("sampler", &name));
}
_ => {}
};
ret
}
}
impl PrettyError for wgc::binding_model::CreatePipelineLayoutError {
fn fmt_pretty(&self, context: &super::Context) -> String {
let global = context.global();
let mut ret = format_error_line(self);
if let Self::InvalidBindGroupLayout(id) = *self {
let name = wgc::gfx_select!(id => global.bind_group_layout_label(id));
ret.push_str(&format_label_line("bind group layout", &name));
};
ret
}
}
impl PrettyError for wgc::command::ExecutionError {
fn fmt_pretty(&self, context: &super::Context) -> String {
let global = context.global();
let mut ret = format_error_line(self);
match *self {
Self::DestroyedBuffer(id) => {
let name = wgc::gfx_select!(id => global.buffer_label(id));
ret.push_str(&format_label_line("buffer", &name));
}
Self::Unimplemented(_reason) => {}
};
ret
}
}
impl PrettyError for wgc::command::RenderPassErrorInner {
fn fmt_pretty(&self, context: &super::Context) -> String {
let global = context.global();
let mut ret = format_error_line(self);
if let Self::InvalidAttachment(id) = *self {
let name = wgc::gfx_select!(id => global.texture_view_label(id));
ret.push_str(&format_label_line("attachment", &name));
};
ret
}
}
impl PrettyError for wgc::command::RenderPassError {
fn fmt_pretty(&self, context: &super::Context) -> String {
// This error is wrapper for the inner error,
// but the scope has useful labels
format_error_line(self) + &self.scope.fmt_pretty(context)
}
}
impl PrettyError for wgc::command::ComputePassError {
fn fmt_pretty(&self, context: &super::Context) -> String {
// This error is wrapper for the inner error,
// but the scope has useful labels
format_error_line(self) + &self.scope.fmt_pretty(context)
}
}
impl PrettyError for wgc::command::RenderBundleError {
fn fmt_pretty(&self, context: &super::Context) -> String {
// This error is wrapper for the inner error,
// but the scope has useful labels
format_error_line(self) + &self.scope.fmt_pretty(context)
}
}
impl PrettyError for wgc::command::ComputePassErrorInner {
fn fmt_pretty(&self, context: &super::Context) -> String {
let global = context.global();
let mut ret = format_error_line(self);
match *self {
Self::InvalidBindGroup(id) => {
let name = wgc::gfx_select!(id => global.bind_group_label(id));
ret.push_str(&format_label_line("bind group", &name));
}
Self::InvalidPipeline(id) => {
let name = wgc::gfx_select!(id => global.compute_pipeline_label(id));
ret.push_str(&format_label_line("pipeline", &name));
}
Self::InvalidIndirectBuffer(id) => {
let name = wgc::gfx_select!(id => global.buffer_label(id));
ret.push_str(&format_label_line("indirect buffer", &name));
}
_ => {}
};
ret
}
}
impl PrettyError for wgc::command::TransferError {
fn fmt_pretty(&self, context: &super::Context) -> String {
let global = context.global();
let mut ret = format_error_line(self);
match *self {
Self::InvalidBuffer(id) => {
let name = wgc::gfx_select!(id => global.buffer_label(id));
ret.push_str(&format_label_line("label", &name));
}
Self::InvalidTexture(id) => {
let name = wgc::gfx_select!(id => global.texture_label(id));
ret.push_str(&format_label_line("texture", &name));
}
// Self::MissingCopySrcUsageFlag(buf_opt, tex_opt) => {
// if let Some(buf) = buf_opt {
// let name = wgc::gfx_select!(buf => global.buffer_label(buf));
// ret.push_str(&format_label_line("source", &name));
// }
// if let Some(tex) = tex_opt {
// let name = wgc::gfx_select!(tex => global.texture_label(tex));
// ret.push_str(&format_label_line("source", &name));
// }
// }
Self::MissingCopyDstUsageFlag(buf_opt, tex_opt) => {
if let Some(buf) = buf_opt {
let name = wgc::gfx_select!(buf => global.buffer_label(buf));
ret.push_str(&format_label_line("destination", &name));
}
if let Some(tex) = tex_opt {
let name = wgc::gfx_select!(tex => global.texture_label(tex));
ret.push_str(&format_label_line("destination", &name));
}
}
_ => {}
};
ret
}
}
impl PrettyError for wgc::command::PassErrorScope {
fn fmt_pretty(&self, context: &super::Context) -> String {
// This error is not in the error chain, only notes are needed
let global = context.global();
match *self {
Self::Pass(id) => {
let name = wgc::gfx_select!(id => global.command_buffer_label(id));
format_label_line("command buffer", &name)
}
Self::SetBindGroup(id) => {
let name = wgc::gfx_select!(id => global.bind_group_label(id));
format_label_line("bind group", &name)
}
Self::SetPipelineRender(id) => {
let name = wgc::gfx_select!(id => global.render_pipeline_label(id));
format_label_line("render pipeline", &name)
}
Self::SetPipelineCompute(id) => {
let name = wgc::gfx_select!(id => global.compute_pipeline_label(id));
format_label_line("compute pipeline", &name)
}
Self::SetVertexBuffer(id) => {
let name = wgc::gfx_select!(id => global.buffer_label(id));
format_label_line("buffer", &name)
}
Self::SetIndexBuffer(id) => {
let name = wgc::gfx_select!(id => global.buffer_label(id));
format_label_line("buffer", &name)
}
Self::Draw { pipeline, .. } => {
if let Some(id) = pipeline {
let name = wgc::gfx_select!(id => global.render_pipeline_label(id));
format_label_line("render pipeline", &name)
} else {
String::new()
}
}
Self::Dispatch { pipeline, .. } => {
if let Some(id) = pipeline {
let name = wgc::gfx_select!(id => global.compute_pipeline_label(id));
format_label_line("compute pipeline", &name)
} else {
String::new()
}
}
_ => String::new(),
}
}
}

15
wgpu/src/backend/mod.rs Normal file
View File

@ -0,0 +1,15 @@
#[cfg(all(target_arch = "wasm32", not(feature = "webgl")))]
mod web;
#[cfg(all(target_arch = "wasm32", not(feature = "webgl")))]
pub(crate) use web::{BufferMappedRange, Context};
#[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))]
mod direct;
#[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))]
mod error;
#[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))]
pub(crate) use direct::{BufferMappedRange, Context};
#[cfg(any(not(target_arch = "wasm32"), feature = "webgl"))]
mod native_gpu_future;

View File

@ -0,0 +1,77 @@
use parking_lot::Mutex;
use std::future::Future;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll, Waker};
enum WakerOrResult<T> {
Waker(Waker),
Result(T),
}
type GpuFutureData<T> = Mutex<Option<WakerOrResult<T>>>;
/// A Future that can poll the wgpu::Device
pub struct GpuFuture<T> {
data: Arc<GpuFutureData<T>>,
}
pub enum OpaqueData {}
//TODO: merge this with `GpuFuture` and avoid `Arc` on the data.
/// A completion handle to set the result on a GpuFuture
pub struct GpuFutureCompletion<T> {
data: Arc<GpuFutureData<T>>,
}
impl<T> Future for GpuFuture<T> {
type Output = T;
fn poll(self: Pin<&mut Self>, context: &mut Context) -> Poll<Self::Output> {
let mut waker_or_result = self.into_ref().get_ref().data.lock();
match waker_or_result.take() {
Some(WakerOrResult::Result(res)) => Poll::Ready(res),
_ => {
*waker_or_result = Some(WakerOrResult::Waker(context.waker().clone()));
Poll::Pending
}
}
}
}
impl<T> GpuFutureCompletion<T> {
pub fn complete(self, value: T) {
let mut waker_or_result = self.data.lock();
match waker_or_result.replace(WakerOrResult::Result(value)) {
Some(WakerOrResult::Waker(waker)) => waker.wake(),
None => {}
Some(WakerOrResult::Result(_)) => {
// Drop before panicking. Not sure if this is necessary, but it makes me feel better.
drop(waker_or_result);
unreachable!()
}
};
}
pub(crate) fn into_raw(self) -> *mut OpaqueData {
Arc::into_raw(self.data) as _
}
pub(crate) unsafe fn from_raw(this: *mut OpaqueData) -> Self {
Self {
data: Arc::from_raw(this as _),
}
}
}
pub(crate) fn new_gpu_future<T>() -> (GpuFuture<T>, GpuFutureCompletion<T>) {
let data = Arc::new(Mutex::new(None));
(
GpuFuture {
data: Arc::clone(&data),
},
GpuFutureCompletion { data },
)
}

2063
wgpu/src/backend/web.rs Normal file

File diff suppressed because it is too large Load Diff

2971
wgpu/src/lib.rs Normal file

File diff suppressed because it is too large Load Diff

74
wgpu/src/macros.rs Normal file
View File

@ -0,0 +1,74 @@
//! Convenience macros
/// Macro to produce an array of [VertexAttribute](crate::VertexAttribute).
///
/// Output has type: `[VertexAttribute; _]`. Usage is as follows:
/// ```
/// # use wgpu::vertex_attr_array;
/// let attrs = vertex_attr_array![0 => Float32x2, 1 => Float32, 2 => Uint16x4];
/// ```
/// This example specifies a list of three [VertexAttribute](crate::VertexAttribute),
/// each with the given `shader_location` and `format`.
/// Offsets are calculated automatically.
#[macro_export]
macro_rules! vertex_attr_array {
($($loc:expr => $fmt:ident),* $(,)?) => {
$crate::vertex_attr_array!([] ; 0; $($loc => $fmt ,)*)
};
([$($t:expr,)*] ; $off:expr ;) => { [$($t,)*] };
([$($t:expr,)*] ; $off:expr ; $loc:expr => $item:ident, $($ll:expr => $ii:ident ,)*) => {
$crate::vertex_attr_array!(
[$($t,)*
$crate::VertexAttribute {
format: $crate::VertexFormat :: $item,
offset: $off,
shader_location: $loc,
},];
$off + $crate::VertexFormat :: $item.size();
$($ll => $ii ,)*
)
};
}
#[test]
fn test_vertex_attr_array() {
let attrs = vertex_attr_array![0 => Float32x2, 3 => Uint16x4];
// VertexAttribute does not support PartialEq, so we cannot test directly
assert_eq!(attrs.len(), 2);
assert_eq!(attrs[0].offset, 0);
assert_eq!(attrs[0].shader_location, 0);
assert_eq!(attrs[1].offset, std::mem::size_of::<(f32, f32)>() as u64);
assert_eq!(attrs[1].shader_location, 3);
}
/// Macro to load a SPIR-V module statically.
///
/// It ensures the word alignment as well as the magic number.
#[macro_export]
macro_rules! include_spirv {
($($token:tt)*) => {
{
//log::info!("including '{}'", $($token)*);
$crate::ShaderModuleDescriptor {
label: Some($($token)*),
source: $crate::util::make_spirv(include_bytes!($($token)*)),
flags: $crate::ShaderFlags::VALIDATION,
}
}
};
}
/// Macro to load a WGSL module statically.
#[macro_export]
macro_rules! include_wgsl {
($($token:tt)*) => {
{
//log::info!("including '{}'", $($token)*);
$crate::ShaderModuleDescriptor {
label: Some($($token)*),
source: $crate::ShaderSource::Wgsl(include_str!($($token)*).into()),
flags: $crate::ShaderFlags::all(),
}
}
};
}

184
wgpu/src/util/belt.rs Normal file
View File

@ -0,0 +1,184 @@
use crate::{
Buffer, BufferAddress, BufferDescriptor, BufferSize, BufferUsage, BufferViewMut,
CommandEncoder, Device, MapMode,
};
use std::pin::Pin;
use std::task::{self, Poll};
use std::{future::Future, sync::mpsc};
// Given a vector of futures, poll each in parallel until all are ready.
struct Join<F> {
futures: Vec<Option<F>>,
}
impl<F: Future<Output = ()>> Future for Join<F> {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut task::Context) -> Poll<Self::Output> {
// This is safe because we have no Drop implementation to violate the Pin requirements and
// do not provide any means of moving the inner futures.
let all_ready = unsafe {
// Poll all remaining futures, removing all that are ready
self.get_unchecked_mut().futures.iter_mut().all(|opt| {
if let Some(future) = opt {
if Pin::new_unchecked(future).poll(cx) == Poll::Ready(()) {
*opt = None;
}
}
opt.is_none()
})
};
if all_ready {
Poll::Ready(())
} else {
Poll::Pending
}
}
}
struct Chunk {
buffer: Buffer,
size: BufferAddress,
offset: BufferAddress,
}
/// Staging belt is a machine that uploads data.
///
/// Internally it uses a ring-buffer of staging buffers that are sub-allocated.
/// It has an advantage over `Queue.write_buffer` in a way that it returns a mutable slice,
/// which you can fill to avoid an extra data copy.
///
/// Using a staging belt is slightly complicated, and generally goes as follows:
/// - Write to buffers that need writing to using `write_buffer`.
/// - Call `finish`.
/// - Submit all command encoders used with `write_buffer`.
/// - Call `recall`
pub struct StagingBelt {
chunk_size: BufferAddress,
/// Chunks that we are actively using for pending transfers at this moment.
active_chunks: Vec<Chunk>,
/// Chunks that have scheduled transfers already.
closed_chunks: Vec<Chunk>,
/// Chunks that are back from the GPU and ready to be used.
free_chunks: Vec<Chunk>,
sender: mpsc::Sender<Chunk>,
receiver: mpsc::Receiver<Chunk>,
}
impl StagingBelt {
/// Create a new staging belt.
///
/// The `chunk_size` is the unit of internal buffer allocation.
/// It's better when it's big, but ideally still 1-4 times less than
/// the total amount of data uploaded per submission.
pub fn new(chunk_size: BufferAddress) -> Self {
let (sender, receiver) = mpsc::channel();
StagingBelt {
chunk_size,
active_chunks: Vec::new(),
closed_chunks: Vec::new(),
free_chunks: Vec::new(),
sender,
receiver,
}
}
/// Allocate the staging belt slice of `size` to be uploaded into the `target` buffer
/// at the specified offset.
///
/// The upload will be placed into the provided command encoder. This encoder
/// must be submitted after `finish` is called and before `recall` is called.
pub fn write_buffer(
&mut self,
encoder: &mut CommandEncoder,
target: &Buffer,
offset: BufferAddress,
size: BufferSize,
device: &Device,
) -> BufferViewMut {
let mut chunk = if let Some(index) = self
.active_chunks
.iter()
.position(|chunk| chunk.offset + size.get() <= chunk.size)
{
self.active_chunks.swap_remove(index)
} else if let Some(index) = self
.free_chunks
.iter()
.position(|chunk| size.get() <= chunk.size)
{
self.free_chunks.swap_remove(index)
} else {
let size = self.chunk_size.max(size.get());
Chunk {
buffer: device.create_buffer(&BufferDescriptor {
label: Some("staging"),
size,
usage: BufferUsage::MAP_WRITE | BufferUsage::COPY_SRC,
mapped_at_creation: true,
}),
size,
offset: 0,
}
};
encoder.copy_buffer_to_buffer(&chunk.buffer, chunk.offset, target, offset, size.get());
let old_offset = chunk.offset;
chunk.offset += size.get();
let remainder = chunk.offset % crate::MAP_ALIGNMENT;
if remainder != 0 {
chunk.offset += crate::MAP_ALIGNMENT - remainder;
}
self.active_chunks.push(chunk);
self.active_chunks
.last()
.unwrap()
.buffer
.slice(old_offset..old_offset + size.get())
.get_mapped_range_mut()
}
/// Prepare currently mapped buffers for use in a submission.
///
/// At this point, all the partially used staging buffers are closed until
/// the GPU is done copying the data from them.
pub fn finish(&mut self) {
for chunk in self.active_chunks.drain(..) {
chunk.buffer.unmap();
self.closed_chunks.push(chunk);
}
}
/// Recall all of the closed buffers back to be reused.
///
/// This has to be called after the command encoders written to `write_buffer` are submitted!
pub fn recall(&mut self) -> impl Future<Output = ()> + Send {
while let Ok(mut chunk) = self.receiver.try_recv() {
chunk.offset = 0;
self.free_chunks.push(chunk);
}
let sender = &self.sender;
let futures = self
.closed_chunks
.drain(..)
.map(|chunk| {
let sender = sender.clone();
let async_buffer = chunk.buffer.slice(..).map_async(MapMode::Write);
Some(async move {
// The result is ignored
async_buffer.await.ok();
// The only possible error is the other side disconnecting, which is fine
let _ = sender.send(chunk);
})
})
.collect::<Vec<_>>();
Join { futures }
}
}

150
wgpu/src/util/device.rs Normal file
View File

@ -0,0 +1,150 @@
use std::{convert::TryFrom, num::NonZeroU32};
/// Describes a [Buffer](crate::Buffer) when allocating.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct BufferInitDescriptor<'a> {
/// Debug label of a buffer. This will show up in graphics debuggers for easy identification.
pub label: crate::Label<'a>,
/// Contents of a buffer on creation.
pub contents: &'a [u8],
/// Usages of a buffer. If the buffer is used in any way that isn't specified here, the operation
/// will panic.
pub usage: crate::BufferUsage,
}
/// Utility methods not meant to be in the main API.
pub trait DeviceExt {
/// Creates a [Buffer](crate::Buffer) with data to initialize it.
fn create_buffer_init(&self, desc: &BufferInitDescriptor) -> crate::Buffer;
/// Upload an entire texture and its mipmaps from a source buffer.
///
/// Expects all mipmaps to be tightly packed in the data buffer.
///
/// If the texture is a 2DArray texture, uploads each layer in order, expecting
/// each layer and its mips to be tightly packed.
///
/// Example:
/// Layer0Mip0 Layer0Mip1 Layer0Mip2 ... Layer1Mip0 Layer1Mip1 Layer1Mip2 ...
fn create_texture_with_data(
&self,
queue: &crate::Queue,
desc: &crate::TextureDescriptor,
data: &[u8],
) -> crate::Texture;
}
impl DeviceExt for crate::Device {
fn create_buffer_init(&self, descriptor: &BufferInitDescriptor<'_>) -> crate::Buffer {
// Skip mapping if the buffer is zero sized
if descriptor.contents.is_empty() {
let wgt_descriptor = crate::BufferDescriptor {
label: descriptor.label,
size: 0,
usage: descriptor.usage,
mapped_at_creation: false,
};
self.create_buffer(&wgt_descriptor)
} else {
let unpadded_size = descriptor.contents.len() as crate::BufferAddress;
// Valid vulkan usage is
// 1. buffer size must be a multiple of COPY_BUFFER_ALIGNMENT.
// 2. buffer size must be greater than 0.
// Therefore we round the value up to the nearest multiple, and ensure it's at least COPY_BUFFER_ALIGNMENT.
let align_mask = crate::COPY_BUFFER_ALIGNMENT - 1;
let padded_size =
((unpadded_size + align_mask) & !align_mask).max(crate::COPY_BUFFER_ALIGNMENT);
let wgt_descriptor = crate::BufferDescriptor {
label: descriptor.label,
size: padded_size,
usage: descriptor.usage,
mapped_at_creation: true,
};
let buffer = self.create_buffer(&wgt_descriptor);
buffer.slice(..).get_mapped_range_mut()[..unpadded_size as usize]
.copy_from_slice(descriptor.contents);
buffer.unmap();
buffer
}
}
fn create_texture_with_data(
&self,
queue: &crate::Queue,
desc: &crate::TextureDescriptor,
data: &[u8],
) -> crate::Texture {
let texture = self.create_texture(desc);
let format_info = desc.format.describe();
let (layer_iterations, mip_extent) = if desc.dimension == crate::TextureDimension::D3 {
(1, desc.size)
} else {
(
desc.size.depth_or_array_layers,
crate::Extent3d {
depth_or_array_layers: 1,
..desc.size
},
)
};
let mip_level_count =
u8::try_from(desc.mip_level_count).expect("mip level count overflows a u8");
let mut binary_offset = 0;
for layer in 0..layer_iterations {
for mip in 0..mip_level_count {
let mip_size = mip_extent.at_mip_level(mip).unwrap();
// When uploading mips of compressed textures and the mip is supposed to be
// a size that isn't a multiple of the block size, the mip needs to be uploaded
// as its "physical size" which is the size rounded up to the nearest block size.
let mip_physical = mip_size.physical_size(desc.format);
// All these calculations are performed on the physical size as that's the
// data that exists in the buffer.
let width_blocks = mip_physical.width / format_info.block_dimensions.0 as u32;
let height_blocks = mip_physical.height / format_info.block_dimensions.1 as u32;
let bytes_per_row = width_blocks * format_info.block_size as u32;
let data_size = bytes_per_row * height_blocks * mip_extent.depth_or_array_layers;
let end_offset = binary_offset + data_size as usize;
queue.write_texture(
crate::ImageCopyTexture {
texture: &texture,
mip_level: mip as u32,
origin: crate::Origin3d {
x: 0,
y: 0,
z: layer,
},
},
&data[binary_offset..end_offset],
crate::ImageDataLayout {
offset: 0,
bytes_per_row: Some(
NonZeroU32::new(bytes_per_row).expect("invalid bytes per row"),
),
rows_per_image: Some(
NonZeroU32::new(mip_physical.height).expect("invalid height"),
),
},
mip_physical,
);
binary_offset = end_offset;
}
}
texture
}
}

223
wgpu/src/util/encoder.rs Normal file
View File

@ -0,0 +1,223 @@
use std::ops::Range;
use wgt::{BufferAddress, DynamicOffset, IndexFormat};
use crate::{BindGroup, Buffer, BufferSlice, RenderBundleEncoder, RenderPass, RenderPipeline};
/// Methods shared by `RenderPass` and `RenderBundleEncoder`
pub trait RenderEncoder<'a> {
/// Sets the active bind group for a given bind group index. The bind group layout
/// in the active pipeline when any `draw()` function is called must match the layout of this bind group.
///
/// If the bind group have dynamic offsets, provide them in order of their declaration.
fn set_bind_group(&mut self, index: u32, bind_group: &'a BindGroup, offsets: &[DynamicOffset]);
/// Sets the active render pipeline.
///
/// Subsequent draw calls will exhibit the behavior defined by `pipeline`.
fn set_pipeline(&mut self, pipeline: &'a RenderPipeline);
/// Sets the active index buffer.
///
/// Subsequent calls to [`draw_indexed`](RenderBundleEncoder::draw_indexed) on this [`RenderBundleEncoder`] will
/// use `buffer` as the source index buffer.
fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat);
/// Assign a vertex buffer to a slot.
///
/// Subsequent calls to [`draw`] and [`draw_indexed`] on this
/// [`RenderBundleEncoder`] will use `buffer` as one of the source vertex buffers.
///
/// The `slot` refers to the index of the matching descriptor in
/// [VertexStateDescriptor::vertex_buffers](crate::VertexStateDescriptor::vertex_buffers).
///
/// [`draw`]: RenderBundleEncoder::draw
/// [`draw_indexed`]: RenderBundleEncoder::draw_indexed
fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>);
/// Draws primitives from the active vertex buffer(s).
///
/// The active vertex buffers can be set with [`RenderBundleEncoder::set_vertex_buffer`].
fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>);
/// Draws indexed primitives using the active index buffer and the active vertex buffers.
///
/// The active index buffer can be set with [`RenderBundleEncoder::set_index_buffer`], while the active
/// vertex buffers can be set with [`RenderBundleEncoder::set_vertex_buffer`].
fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>);
/// Draws primitives from the active vertex buffer(s) based on the contents of the `indirect_buffer`.
///
/// The active vertex buffers can be set with [`RenderBundleEncoder::set_vertex_buffer`].
///
/// The structure expected in `indirect_buffer` is the following:
///
/// ```rust
/// #[repr(C)]
/// struct DrawIndirect {
/// vertex_count: u32, // The number of vertices to draw.
/// instance_count: u32, // The number of instances to draw.
/// base_vertex: u32, // The Index of the first vertex to draw.
/// base_instance: u32, // The instance ID of the first instance to draw.
/// }
/// ```
fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress);
/// Draws indexed primitives using the active index buffer and the active vertex buffers,
/// based on the contents of the `indirect_buffer`.
///
/// The active index buffer can be set with [`RenderBundleEncoder::set_index_buffer`], while the active
/// vertex buffers can be set with [`RenderBundleEncoder::set_vertex_buffer`].
///
/// The structure expected in `indirect_buffer` is the following:
///
/// ```rust
/// #[repr(C)]
/// struct DrawIndexedIndirect {
/// vertex_count: u32, // The number of vertices to draw.
/// instance_count: u32, // The number of instances to draw.
/// base_index: u32, // The base index within the index buffer.
/// vertex_offset: i32, // The value added to the vertex index before indexing into the vertex buffer.
/// base_instance: u32, // The instance ID of the first instance to draw.
/// }
/// ```
fn draw_indexed_indirect(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
);
/// [`wgt::Features::PUSH_CONSTANTS`] must be enabled on the device in order to call this function.
///
/// Set push constant data.
///
/// Offset is measured in bytes, but must be a multiple of [`wgt::PUSH_CONSTANT_ALIGNMENT`].
///
/// Data size must be a multiple of 4 and must be aligned to the 4s, so we take an array of u32.
/// For example, with an offset of 4 and an array of `[u32; 3]`, that will write to the range
/// of 4..16.
///
/// For each byte in the range of push constant data written, the union of the stages of all push constant
/// ranges that covers that byte must be exactly `stages`. There's no good way of explaining this simply,
/// so here are some examples:
///
/// ```text
/// For the given ranges:
/// - 0..4 Vertex
/// - 4..8 Fragment
/// ```
///
/// You would need to upload this in two set_push_constants calls. First for the `Vertex` range, second for the `Fragment` range.
///
/// ```text
/// For the given ranges:
/// - 0..8 Vertex
/// - 4..12 Fragment
/// ```
///
/// You would need to upload this in three set_push_constants calls. First for the `Vertex` only range 0..4, second
/// for the `Vertex | Fragment` range 4..8, third for the `Fragment` range 8..12.
fn set_push_constants(&mut self, stages: wgt::ShaderStage, offset: u32, data: &[u8]);
}
impl<'a> RenderEncoder<'a> for RenderPass<'a> {
#[inline(always)]
fn set_bind_group(&mut self, index: u32, bind_group: &'a BindGroup, offsets: &[DynamicOffset]) {
Self::set_bind_group(self, index, bind_group, offsets);
}
#[inline(always)]
fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
Self::set_pipeline(self, pipeline);
}
#[inline(always)]
fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) {
Self::set_index_buffer(self, buffer_slice, index_format);
}
#[inline(always)]
fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) {
Self::set_vertex_buffer(self, slot, buffer_slice);
}
#[inline(always)]
fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
Self::draw(self, vertices, instances);
}
#[inline(always)]
fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
Self::draw_indexed(self, indices, base_vertex, instances);
}
#[inline(always)]
fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
Self::draw_indirect(self, indirect_buffer, indirect_offset);
}
#[inline(always)]
fn draw_indexed_indirect(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
) {
Self::draw_indexed_indirect(self, indirect_buffer, indirect_offset);
}
#[inline(always)]
fn set_push_constants(&mut self, stages: wgt::ShaderStage, offset: u32, data: &[u8]) {
Self::set_push_constants(self, stages, offset, data);
}
}
impl<'a> RenderEncoder<'a> for RenderBundleEncoder<'a> {
#[inline(always)]
fn set_bind_group(&mut self, index: u32, bind_group: &'a BindGroup, offsets: &[DynamicOffset]) {
Self::set_bind_group(self, index, bind_group, offsets);
}
#[inline(always)]
fn set_pipeline(&mut self, pipeline: &'a RenderPipeline) {
Self::set_pipeline(self, pipeline);
}
#[inline(always)]
fn set_index_buffer(&mut self, buffer_slice: BufferSlice<'a>, index_format: IndexFormat) {
Self::set_index_buffer(self, buffer_slice, index_format);
}
#[inline(always)]
fn set_vertex_buffer(&mut self, slot: u32, buffer_slice: BufferSlice<'a>) {
Self::set_vertex_buffer(self, slot, buffer_slice);
}
#[inline(always)]
fn draw(&mut self, vertices: Range<u32>, instances: Range<u32>) {
Self::draw(self, vertices, instances);
}
#[inline(always)]
fn draw_indexed(&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>) {
Self::draw_indexed(self, indices, base_vertex, instances);
}
#[inline(always)]
fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: BufferAddress) {
Self::draw_indirect(self, indirect_buffer, indirect_offset);
}
#[inline(always)]
fn draw_indexed_indirect(
&mut self,
indirect_buffer: &'a Buffer,
indirect_offset: BufferAddress,
) {
Self::draw_indexed_indirect(self, indirect_buffer, indirect_offset);
}
#[inline(always)]
fn set_push_constants(&mut self, stages: wgt::ShaderStage, offset: u32, data: &[u8]) {
Self::set_push_constants(self, stages, offset, data);
}
}

102
wgpu/src/util/mod.rs Normal file
View File

@ -0,0 +1,102 @@
//! Utility structures and functions.
mod belt;
mod device;
mod encoder;
use std::{
borrow::Cow,
future::Future,
mem::{align_of, size_of},
ptr::copy_nonoverlapping,
};
pub use belt::StagingBelt;
pub use device::{BufferInitDescriptor, DeviceExt};
pub use encoder::RenderEncoder;
/// Treat the given byte slice as a SPIR-V module.
///
/// # Panic
///
/// This function panics if:
///
/// - Input length isn't multiple of 4
/// - Input is longer than [`usize::max_value`]
/// - SPIR-V magic number is missing from beginning of stream
pub fn make_spirv(data: &[u8]) -> super::ShaderSource {
const MAGIC_NUMBER: u32 = 0x0723_0203;
assert_eq!(
data.len() % size_of::<u32>(),
0,
"data size is not a multiple of 4"
);
//If the data happens to be aligned, directly use the byte array,
// otherwise copy the byte array in an owned vector and use that instead.
let words = if data.as_ptr().align_offset(align_of::<u32>()) == 0 {
let (pre, words, post) = unsafe { data.align_to::<u32>() };
debug_assert!(pre.is_empty());
debug_assert!(post.is_empty());
Cow::from(words)
} else {
let mut words = vec![0u32; data.len() / size_of::<u32>()];
unsafe {
copy_nonoverlapping(data.as_ptr(), words.as_mut_ptr() as *mut u8, data.len());
}
Cow::from(words)
};
assert_eq!(
words[0], MAGIC_NUMBER,
"wrong magic word {:x}. Make sure you are using a binary SPIRV file.",
words[0]
);
super::ShaderSource::SpirV(words)
}
/// CPU accessible buffer used to download data back from the GPU.
pub struct DownloadBuffer(super::Buffer, super::BufferMappedRange);
impl DownloadBuffer {
/// Asynchronously read the contents of a buffer.
pub fn read_buffer(
device: &super::Device,
queue: &super::Queue,
buffer: &super::BufferSlice,
) -> impl Future<Output = Result<Self, super::BufferAsyncError>> + Send {
let size = match buffer.size {
Some(size) => size.into(),
None => buffer.buffer.map_context.lock().total_size - buffer.offset,
};
let download = device.create_buffer(&super::BufferDescriptor {
size,
usage: super::BufferUsage::COPY_DST | super::BufferUsage::MAP_READ,
mapped_at_creation: false,
label: None,
});
let mut encoder =
device.create_command_encoder(&super::CommandEncoderDescriptor { label: None });
encoder.copy_buffer_to_buffer(buffer.buffer, buffer.offset, &download, 0, size);
let command_buffer: super::CommandBuffer = encoder.finish();
queue.submit(Some(command_buffer));
let fut = download.slice(..).map_async(super::MapMode::Read);
async move {
fut.await?;
let mapped_range =
super::Context::buffer_get_mapped_range(&*download.context, &download.id, 0..size);
Ok(Self(download, mapped_range))
}
}
}
impl std::ops::Deref for DownloadBuffer {
type Target = [u8];
fn deref(&self) -> &[u8] {
super::BufferMappedRangeSlice::slice(&self.1)
}
}

View File

@ -0,0 +1,52 @@
use naga::{front::wgsl, valid::Validator};
use std::{fs, path::PathBuf};
#[test]
fn parse_example_wgsl() {
let read_dir = match PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("examples")
.read_dir()
{
Ok(iter) => iter,
Err(e) => {
log::error!("Unable to open the examples folder: {:?}", e);
return;
}
};
for example_entry in read_dir {
let read_files = match example_entry {
Ok(dir_entry) => match dir_entry.path().read_dir() {
Ok(iter) => iter,
Err(_) => continue,
},
Err(e) => {
log::warn!("Skipping example: {:?}", e);
continue;
}
};
for file_entry in read_files {
let shader = match file_entry {
Ok(entry) => match entry.path().extension() {
Some(ostr) if &*ostr == "wgsl" => {
println!("Validating {:?}", entry.path());
fs::read_to_string(entry.path()).unwrap_or_default()
}
_ => continue,
},
Err(e) => {
log::warn!("Skipping file: {:?}", e);
continue;
}
};
let module = wgsl::parse_str(&shader).unwrap();
//TODO: re-use the validator
Validator::new(
naga::valid::ValidationFlags::all(),
naga::valid::Capabilities::all(),
)
.validate(&module)
.unwrap();
}
}
}