Skip to content

Instantly share code, notes, and snippets.

@bczhc
Last active December 25, 2025 14:47
Show Gist options
  • Select an option

  • Save bczhc/d546d19ae32db050ef68dc692186adfa to your computer and use it in GitHub Desktop.

Select an option

Save bczhc/d546d19ae32db050ef68dc692186adfa to your computer and use it in GitHub Desktop.
WebGPU/wgpu学习 #cg #webgpu #wgpu
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title></title>
</head>
<body>
<canvas width="500px" height="500px"></canvas>
<script>
async function loadShader(url) {
try {
const response = await fetch(url);
if (!response.ok) throw new Error(`HTTP ${response.status}`);
return await response.text();
} catch (error) {
console.error(`加载shader失败: ${url}`, error);
return null;
}
}
(async () => {
let adapter = await navigator.gpu.requestAdapter();
let device = await adapter.requestDevice();
let canvas = document.querySelector('canvas');
let context = canvas.getContext('webgpu');
let preferredFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: preferredFormat
})
// create shader module
let shaderCode = await loadShader("4.1.wgsl");
let shaderModule = device.createShaderModule({
label: 'shader 1',
code: shaderCode,
});
// create vertex buffer
let verticesData = new Float32Array([
// vertex (vec2f), color (vec3f)
0, 0.5, 1, 0, 0,
-0.5, -0.5, 0, 1, 0,
0.5, -0.5, 0, 0, 1,
]);
let vertexBuffer = device.createBuffer({
size: verticesData.byteLength,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.VERTEX,
mappedAtCreation: false,
})
device.queue.writeBuffer(vertexBuffer, 0, verticesData);
// create pipeline
let pipeline = device.createRenderPipeline({
layout: 'auto',
vertex: {
module: shaderModule,
buffers: [
{
arrayStride: (2 + 3) * 4,
attributes: [
// position 0
{
shaderLocation: 0,
format: 'float32x2',
offset: 0,
},
// position 1
{
shaderLocation: 1,
format: 'float32x3',
offset: 2 * 4
}
]
}
]
},
fragment: {
module: shaderModule,
targets: [{format: preferredFormat}]
}
})
// create command encoder
let encoder= device.createCommandEncoder();
let pass = encoder.beginRenderPass({
colorAttachments: [
// @position(0) on the fragment shader return-value
{
view: context.getCurrentTexture().createView(),
// texture background color (gray)
clearValue: [0.3, 0.3, 0.3, 1],
loadOp: 'clear',
storeOp: 'store',
}
]
});
pass.setVertexBuffer(0, vertexBuffer);
pass.setPipeline(pipeline);
pass.draw(3);
pass.end();
let commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
})();
</script>
</body>
</html>
<!-- 稍微改巴,用了两个vertex shader slots -->
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title></title>
</head>
<body>
<canvas width="500px" height="500px"></canvas>
<script>
async function loadShader(url) {
try {
const response = await fetch(url);
if (!response.ok) throw new Error(`HTTP ${response.status}`);
return await response.text();
} catch (error) {
console.error(`加载shader失败: ${url}`, error);
return null;
}
}
(async () => {
let adapter = await navigator.gpu.requestAdapter();
let device = await adapter.requestDevice();
let canvas = document.querySelector('canvas');
let context = canvas.getContext('webgpu');
let preferredFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: preferredFormat
})
// create shader module
let shaderCode = await loadShader("4.1.wgsl");
let shaderModule = device.createShaderModule({
label: 'shader 1',
code: shaderCode,
});
// create vertex buffer
let verticesData = new Float32Array([
// vertex (vec2f), color (vec3f)
0, 0.5, 1, 0, 0,
-0.5, -0.5, 0, 1, 0,
0.5, -0.5, 0, 0, 1,
0.5 + 0.1, -0.5 + 0.1, 1, 0, 0,
0.5 + 0.1, 0.5 + 0.1, 0, 1, 0,
0.1, 0.5 + 0.1, 0, 0, 1,
-0.5 - 0.1, -0.5 + 0.1, 1, 0, 0,
-0.5 - 0.1, 0.5 + 0.1, 0, 1, 0,
-0.1, 0.5 + 0.1, 0, 0, 1,
]);
let alphaData = new Float32Array([
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
]);
for (let i = 0; i < alphaData.length; i++) {
alphaData[i] /= ((i + 1) / 3);
}
function createVertexBuffer(data) {
let buffer = device.createBuffer({
size: data.byteLength,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.VERTEX,
mappedAtCreation: false,
})
device.queue.writeBuffer(buffer, 0, data);
return buffer;
}
let vertexBuffer = createVertexBuffer(verticesData);
let vertexBuffer2 = createVertexBuffer(alphaData);
// create pipeline
let pipeline = device.createRenderPipeline({
layout: 'auto',
vertex: {
module: shaderModule,
buffers: [
// buffer slot #1
{
arrayStride: (2 + 3) * 4,
attributes: [
// position 0
{
shaderLocation: 0,
format: 'float32x2',
offset: 0,
},
// position 1
{
shaderLocation: 1,
format: 'float32x3',
offset: 2 * 4
}
]
},
// buffer slot #2
{
arrayStride: 1 * 4,
attributes: [
{
shaderLocation: 2,
format: 'float32',
offset: 0,
}
]
}
]
},
fragment: {
module: shaderModule,
targets: [{format: preferredFormat}]
}
})
function render() {
// create command encoder
let encoder = device.createCommandEncoder();
let pass = encoder.beginRenderPass({
colorAttachments: [
// @position(0) on the fragment shader return-value
{
view: context.getCurrentTexture().createView(),
// texture background color (gray)
clearValue: [0.3, 0.3, 0.3, 1],
loadOp: 'clear',
storeOp: 'store',
}
]
});
pass.setVertexBuffer(0, vertexBuffer);
pass.setVertexBuffer(1, vertexBuffer2);
pass.setPipeline(pipeline);
pass.draw(3 * 3);
pass.end();
let commandBuffer = encoder.finish();
device.queue.submit([commandBuffer]);
}
// initial render
render();
canvas.addEventListener('click', () => {
// change colors
function randomColor() {
return [Math.random(), Math.random(), Math.random()];
}
let newVerticesData = verticesData.slice();
let strides = newVerticesData.length / 5;
for (let i = 0; i < strides; ++i) {
let r = randomColor();
newVerticesData[i * 5 + 2] = r[0];
newVerticesData[i * 5 + 3] = r[1];
newVerticesData[i * 5 + 4] = r[2];
}
vertexBuffer.destroy();
vertexBuffer = createVertexBuffer(newVerticesData);
render();
});
})();
</script>
</body>
</html>
struct VsBufferIn {
@location(0) pos: vec2f,
@location(1) color: vec3f,
}
struct VsOut {
@builtin(position) vertex: vec4f,
@location(0) rgb: vec3f,
@location(1) alpha: f32,
}
@vertex fn vs(@builtin(vertex_index) _index: u32, bufferIn: VsBufferIn) -> VsOut {
var out: VsOut;
out.vertex = vec4f(bufferIn.pos, 0.0, 1.0);
out.alpha = 0.5;
out.rgb = bufferIn.color;
return out;
}
@fragment fn fs(in: VsOut) -> @location(0) vec4f {
return vec4f(in.rgb, in.alpha);
}
// 旋转变色三角形。

use std::env;
use std::sync::Arc;
use std::time::Instant;
use wgpu::util::RenderEncoder;
use wgpu::{
    include_wgsl, Buffer, BufferDescriptor, BufferUsages, Color, ColorTargetState, Device,
    FragmentState, Instance, Queue, RenderPipeline, RenderPipelineDescriptor, ShaderModule,
    VertexAttribute, VertexBufferLayout, VertexFormat, VertexState,
};
use winit::event::{ElementState, MouseButton};
use winit::{
    application::ApplicationHandler,
    event::WindowEvent,
    event_loop::{ActiveEventLoop, ControlFlow, EventLoop, OwnedDisplayHandle},
    window::{Window, WindowId},
};

struct State {
    start: Instant,
    window: Arc<Window>,
    device: wgpu::Device,
    queue: wgpu::Queue,
    size: winit::dpi::PhysicalSize<u32>,
    surface: wgpu::Surface<'static>,
    surface_format: wgpu::TextureFormat,
    module: ShaderModule,
    pipeline: RenderPipeline,
    vertex_buffer: Buffer,
    timer_buffer: Buffer,
}

#[rustfmt::skip]
static VERTICES_DATA: [f32; 15] = {
    // 使用 f32 常量计算
    const SQRT_3: f32 = 1.732050808;  // √3
    const SIDE: f32 = 1.0;
    const HALF_SIDE: f32 = SIDE / 2.0;
    const HEIGHT: f32 = SQRT_3 * HALF_SIDE;  // √3/2 * 边长

    [
        // 顶部顶点 (红色)
        0.0, HEIGHT * 2.0 / 3.0, 1.0, 0.0, 0.0,
        // 左下角顶点 (绿色)
        -HALF_SIDE, -HEIGHT / 3.0, 0.0, 1.0, 0.0,
        // 右下角顶点 (蓝色)
        HALF_SIDE, -HEIGHT / 3.0, 0.0, 0.0, 1.0,
    ]
};

impl State {
    async fn new(display: OwnedDisplayHandle, window: Arc<Window>) -> State {
        // let instance = wgpu::Instance::new(
        //     wgpu::InstanceDescriptor::default().with_display_handle(Box::new(display)),
        // );
        let instance = Instance::default();
        let adapter = instance
            .request_adapter(&wgpu::RequestAdapterOptions::default())
            .await
            .unwrap();
        let (device, queue) = adapter
            .request_device(&wgpu::DeviceDescriptor::default())
            .await
            .unwrap();

        let size = window.inner_size();

        let surface = instance.create_surface(window.clone()).unwrap();
        let cap = surface.get_capabilities(&adapter);

        let surface_format = cap.formats[0];

        let shader_module = device.create_shader_module(include_wgsl!("../../2.wgsl"));

        let pipeline = device.create_render_pipeline(&RenderPipelineDescriptor {
            vertex: VertexState {
                module: &shader_module,
                entry_point: None,
                compilation_options: Default::default(),
                buffers: &[
                    // slot 0
                    VertexBufferLayout {
                        array_stride: 5 * 4,
                        attributes: &[
                            // position 0: vertex
                            VertexAttribute {
                                format: VertexFormat::Float32x2,
                                offset: 0,
                                shader_location: 0,
                            },
                            // position 1: color
                            VertexAttribute {
                                format: VertexFormat::Float32x3,
                                offset: 2 * 4,
                                shader_location: 1,
                            },
                        ],
                        step_mode: Default::default(),
                    },
                    // slot1
                    VertexBufferLayout {
                        array_stride: 1 * 4,
                        attributes: &[VertexAttribute {
                            format: VertexFormat::Float32,
                            offset: 0,
                            shader_location: 2,
                        }],
                        step_mode: Default::default(),
                    },
                ],
            },
            fragment: Some(FragmentState {
                module: &shader_module,
                entry_point: None,
                compilation_options: Default::default(),
                targets: &[Some(ColorTargetState {
                    format: surface_format.add_srgb_suffix(),
                    blend: None,
                    write_mask: Default::default(),
                })],
            }),
            label: None,
            layout: None,
            primitive: Default::default(),
            depth_stencil: None,
            multisample: Default::default(),
            multiview_mask: None,
            cache: None,
        });

        let vertex_buffer = Self::create_vertex_buffer(&device, &queue, &VERTICES_DATA);
        let timer_buffer = Self::create_vertex_buffer(&device, &queue, &[0.0, 0.0, 0.0]);
        let state = State {
            start: Instant::now(),
            window,
            device,
            queue,
            size,
            surface,
            surface_format,
            module: shader_module,
            pipeline,
            vertex_buffer,
            timer_buffer,
        };

        // Configure surface for the first time
        state.configure_surface();

        state
    }

    fn create_vertex_buffer(device: &Device, queue: &Queue, data: &[f32]) -> Buffer {
        let buffer = device.create_buffer(&BufferDescriptor {
            label: None,
            size: data.len() as u64 * 4,
            usage: BufferUsages::COPY_DST | BufferUsages::VERTEX,
            mapped_at_creation: false,
        });
        queue.write_buffer(&buffer, 0, bytemuck::cast_slice(data));
        buffer
    }

    fn update_vertex_buffer_colors(&mut self) {
        let mut new_data = VERTICES_DATA;
        let strides = new_data.len() / 5;
        for i in 0..strides {
            let start = i * 5 + 2;
            new_data[start..(start + 3)].copy_from_slice(&random_color()[..]);
        }
        self.vertex_buffer.destroy();
        self.vertex_buffer = Self::create_vertex_buffer(&self.device, &self.queue, &new_data);
    }

    fn get_window(&self) -> &Window {
        &self.window
    }

    fn configure_surface(&self) {
        let surface_config = wgpu::SurfaceConfiguration {
            usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
            format: self.surface_format,
            // Request compatibility with the sRGB-format texture view we‘re going to create later.
            view_formats: vec![self.surface_format.add_srgb_suffix()],
            alpha_mode: wgpu::CompositeAlphaMode::Auto,
            width: self.size.width,
            height: self.size.height,
            desired_maximum_frame_latency: 2,
            present_mode: wgpu::PresentMode::AutoVsync,
        };
        self.surface.configure(&self.device, &surface_config);
    }

    fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
        self.size = new_size;

        // reconfigure the surface
        self.configure_surface();
    }

    fn render(&mut self) {
        // Create texture view
        let surface_texture = self
            .surface
            .get_current_texture()
            .expect("failed to acquire next swapchain texture");
        let texture_view = surface_texture
            .texture
            .create_view(&wgpu::TextureViewDescriptor {
                // Without add_srgb_suffix() the image we will be working with
                // might not be "gamma correct".
                format: Some(self.surface_format.add_srgb_suffix()),
                ..Default::default()
            });

        // Renders a gray screen
        let mut encoder = self.device.create_command_encoder(&Default::default());
        // Create the renderpass which will clear the screen.
        let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
            label: None,
            color_attachments: &[Some(wgpu::RenderPassColorAttachment {
                view: &texture_view,
                depth_slice: None,
                resolve_target: None,
                ops: wgpu::Operations {
                    load: wgpu::LoadOp::Clear(Color::from_vec4d([0.3, 0.3, 0.3, 1.0])),
                    store: wgpu::StoreOp::Store,
                },
            })],
            depth_stencil_attachment: None,
            timestamp_writes: None,
            occlusion_query_set: None,
            multiview_mask: None,
        });

        let t = Instant::now().duration_since(self.start).as_secs_f64() as f32;
        assert!(self.timer_buffer.size() >= 4 * 3);
        self.queue
            .write_buffer(&self.timer_buffer, 0, bytemuck::cast_slice(&[t, t, t]));

        // If you wanted to call any drawing commands, they would go here.
        if t as u32 % 5 == 0 {
            self.update_vertex_buffer_colors();
        }
        pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
        pass.set_vertex_buffer(1, self.timer_buffer.slice(..));
        pass.set_pipeline(&self.pipeline);
        pass.draw(0..3, 0..1);

        // End the renderpass.
        drop(pass);

        // Submit the command in the queue to execute
        self.queue.submit([encoder.finish()]);
        self.window.pre_present_notify();
        surface_texture.present();
    }
}

#[derive(Default)]
struct App {
    state: Option<State>,
}

impl ApplicationHandler for App {
    fn resumed(&mut self, event_loop: &ActiveEventLoop) {
        // Create window object
        let window = Arc::new(
            event_loop
                .create_window(Window::default_attributes())
                .unwrap(),
        );

        let state = pollster::block_on(State::new(
            event_loop.owned_display_handle(),
            window.clone(),
        ));
        self.state = Some(state);

        window.request_redraw();
    }

    fn window_event(&mut self, event_loop: &ActiveEventLoop, _id: WindowId, event: WindowEvent) {
        let state = self.state.as_mut().unwrap();
        match event {
            WindowEvent::CloseRequested => {
                println!("The close button was pressed; stopping");
                event_loop.exit();
            }
            WindowEvent::RedrawRequested => {
                state.render();
                // Emits a new redraw requested event.
                state.get_window().request_redraw();
            }
            WindowEvent::Resized(size) => {
                // Reconfigures the size of the surface. We do not re-render
                // here as this event is always followed up by redraw request.
                state.resize(size);
            }
            WindowEvent::MouseInput {
                state: e_state,
                button,
                ..
            } => {
                if e_state == ElementState::Pressed && button == MouseButton::Left {
                    // click; update the vertex colors
                    state.update_vertex_buffer_colors();
                    state.render();
                }
            }
            _ => {}
        }
    }
}

fn main() {
    // wgpu uses `log` for all of our logging, so we initialize a logger with the `env_logger` crate.
    //
    // To change the log level, set the `RUST_LOG` environment variable. See the `env_logger`
    // documentation for more information.
    unsafe {
        env::set_var("RUST_LOG", "info");
    }
    env_logger::init();

    let event_loop = EventLoop::new().unwrap();

    // When the current loop iteration finishes, immediately begin a new
    // iteration regardless of whether or not new events are available to
    // process. Preferred for applications that want to render as fast as
    // possible, like games.
    event_loop.set_control_flow(ControlFlow::Poll);

    // When the current loop iteration finishes, suspend the thread until
    // another event arrives. Helps keeping CPU utilization low if nothing
    // is happening, which is preferred if the application might be idling in
    // the background.
    // event_loop.set_control_flow(ControlFlow::Wait);

    let mut app = App::default();
    event_loop.run_app(&mut app).unwrap();
}

trait ColorExt {
    fn from_vec4d(x: [f64; 4]) -> Self;
}

impl ColorExt for Color {
    fn from_vec4d(x: [f64; 4]) -> Self {
        Self {
            r: x[0],
            g: x[1],
            b: x[2],
            a: x[3],
        }
    }
}

fn random_color() -> [f32; 3] {
    [
        rand::random::<f32>(),
        rand::random::<f32>(),
        rand::random::<f32>(),
    ]
}
struct VertexInput {
    @location(0) position: vec2f,
    @location(1) color: vec3f,
    @location(2) t: f32,
}

struct VertexOutput {
    @builtin(position) clip_position: vec4f,
    @location(0) color: vec3f,
}

@vertex
fn vs_main(in: VertexInput) -> VertexOutput {
    var out: VertexOutput;

    let r = in.t * 2.0 * 3.1415926536;
    let cos_a = cos(r);
    let sin_a = sin(r);

    // 旋转位置
    let rotated_position = vec2f(
        in.position.x * cos_a - in.position.y * sin_a,
        in.position.x * sin_a + in.position.y * cos_a
    );

    out.clip_position = vec4f(rotated_position, 0.0, 1.0);
    out.color = in.color;

    return out;
}

@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4f {
    return vec4f(in.color, 1.0);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment