每帧中的Device.createBuffer(usage:GPUBufferUsage.VERTEX),几分钟后,浏览器出错

问题描述 投票:0回答:1

我尝试使用 webgpu 编写一个简单的绘制示例,因此,在每次 mousemove 中, arrayData.length 都在变化。我保留一个数组存储所有点(vec2数组),我在每一帧中使用这个点数组创建新的GPUBuffer,因为我想更改只读的gpuBuffer.size。

最后我得到一个错误: RangeError:无法在“GPUDevice”上执行“createBuffer”:createBuffer 失败,当mappedAtCreation == true 时,大小对于实现来说太大

const data = new Float32Array( [ 0.3, 0.3, 0.4, 0.4 ] );

async function frame () {

    const commandEncoder = device.createCommandEncoder();

    ... ...

    const vertexBuffer = device.createBuffer( {
        size: data.byteLength,
        usage: GPUBufferUsage.VERTEX,
        mappedAtCreation: true
    } );

    //@ts-ignore
    const dst = new data.constructor( vertexBuffer.getMappedRange() );
    dst.set( data );
    vertexBuffer.unmap();

    const passEncoder = commandEncoder.beginRenderPass( renderPassDescriptor );
    passEncoder.setPipeline( pipeline );
    passEncoder.setVertexBuffer( 0, vertexBuffer );

    passEncoder.draw( data.length / 2 );
    passEncoder.end();

    device.queue.submit( [ commandEncoder.finish() ] );
    
    // I try to destory the buffer after finish , it is no useful
    vertexBuffer.destroy();

    requestAnimationFrame( frame );
}

我想更新mosemove中的vertexBuffer(在每一帧中),目前,我创建了新的GPUBuffer,它似乎不起作用

webgpu
1个回答
0
投票

不清楚你想做什么,但如果是我,我不会在每次鼠标移动时创建一个新的缓冲区。相反,我会创建一个大缓冲区,并在每次鼠标移动时在其中设置数据。如果缓冲区已满,我会添加一个新缓冲区

/* global GPUBufferUsage */
/* global GPUTextureUsage */

async function main() {
  const adapter = await navigator.gpu?.requestAdapter();
  const device = await adapter?.requestDevice();
  if (!device) {
    alert('need a browser that supports WebGPU');
    return;
  }

  const canvas = document.querySelector('canvas');
  const context = canvas.getContext('webgpu');
  const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
  context.configure({
    device,
    format: presentationFormat,
    alphaMode: 'premultiplied',
  });

  const code = `
  struct MyVSInput {
      @location(0) position: vec4f,
  };

  @vertex
  fn myVSMain(v: MyVSInput) -> @builtin(position) vec4f {
    return v.position;
  }

  @fragment
  fn myFSMain() -> @location(0) vec4f {
    return vec4f(1, 1, 0, 1);
  }
  `;

  const module = device.createShaderModule({code});
  const pipeline = device.createRenderPipeline({
    layout: 'auto',
    vertex: {
      module,
      buffers: [
        {
          arrayStride: 2 * 4,
          attributes: [
            { shaderLocation: 0, offset: 0, format: 'float32x2' },
          ],
        },
      ],
    },
    fragment: {
      module,
      targets: [
        {format: presentationFormat},
      ],
    },
    primitive: {
      topology: 'point-list',
    },
  });

  const buffers = [];
  const pointsPerBuffer = 128;  // should make this much larger (16k) but keeping it small for testing.
  let numPoints = 0;

  const addPoint = (x, y) => {
    const bufferNdx = numPoints / pointsPerBuffer | 0;
    const ndx = numPoints % pointsPerBuffer;
    if (ndx === 0) {
      const buffer = device.createBuffer({
        size: pointsPerBuffer * 2 * 4,
        usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
      });
      buffers.push(buffer);
    }
    const buffer = buffers[bufferNdx];
    device.queue.writeBuffer(buffer, ndx * 2 * 4, new Float32Array([x, y]));
    ++numPoints;
  };

  const renderPassDescriptor = {
    colorAttachments: [
      {
        // view: undefined, // Assigned later
        clearValue: [ 0.2, 0.2, 0.2, 1.0 ],
        loadOp: 'clear',
        storeOp: 'store',
      },
    ],
  };

  function render() {
    const canvasTexture = context.getCurrentTexture();
    renderPassDescriptor.colorAttachments[0].view = canvasTexture.createView();

    const encoder = device.createCommandEncoder();
    const pass = encoder.beginRenderPass(renderPassDescriptor);
    pass.setPipeline(pipeline);
    buffers.forEach((buffer, i) => {
      pass.setVertexBuffer(0, buffer);
      const base = i * pointsPerBuffer;
      const numToDraw = Math.min(numPoints - base, pointsPerBuffer);
      pass.draw(numToDraw);
    })
    pass.end();
    device.queue.submit([encoder.finish()]);

    requestAnimationFrame(render);
  }
  requestAnimationFrame(render);

  window.addEventListener('mousemove', e => {
    const rect = canvas.getBoundingClientRect();
    const x = (e.clientX - rect.left) / rect.width * 2 - 1;
    const y = (e.clientY - rect.top) / rect.height * -2 + 1;
    addPoint(x, y);
  })

  const observer = new ResizeObserver(entries => {
    for (const entry of entries) {
      const canvas = entry.target;
      const width = entry.contentBoxSize[0].inlineSize;
      const height = entry.contentBoxSize[0].blockSize;
      canvas.width = Math.max(1, Math.min(width, device.limits.maxTextureDimension2D));
      canvas.height = Math.max(1, Math.min(height, device.limits.maxTextureDimension2D));
    }
  });
  observer.observe(canvas);
}

main();
html, body { margin: 0; height: 100% }
canvas { width: 100%; height: 100%; display: block; }
<canvas></canvas>

上面的示例每个缓冲区使用 128 个点,只是为了测试当点数超过 128 时它是否有效,但如果我想处理很多点,我会将

pointsPerBuffer
设置为 16k 或 128k 或类似的值。

© www.soinside.com 2019 - 2024. All rights reserved.