Skip to content Skip to sidebar Skip to footer

Decode Images In Web Worker

In our WebGL application I'm trying to load and decode texture images in a web worker in order to avoid rendering hick-ups in the main thread. Using createImageBitmap in the worker

Solution 1:

There's no reason to use createImageBitmap in a worker (well, see bottom). The browser already decodes the image in a separate thread. Doing it in a worker doesn't get you anything. The bigger issue is there's no way for ImageBitmap to know how you are going to use the image when you finally pass it to WebGL. If you ask for a format that's different than what ImageBitmap decoded then WebGL has to convert and/or decode it again and you can't give ImageBitmap enough info to tell it the format you want it to decode in.

On top of that WebGL in Chrome has to transfer the data of the image from the render process to the GPU process which for a large image is a relatively big copy (1024x1024 by RGBA is 4meg)

A better API IMO would have allowed you to tell ImageBitmap what format you want and where you want it (CPU, GPU). That way the browser could prep the image asynchonously and have it require no heavy work when done.

In any case, here's a test. If you uncheck "update texture" then it's still downloading and decoding textures but it's just not calling gl.texImage2D to upload the texture. In that case I see no jank (not proof that's the issue but that's where I think it is)

const m4 = twgl.m4;
const gl = document.querySelector('#webgl').getContext('webgl');
const ctx = document.querySelector('#graph').getContext('2d');

let update = true;
document.querySelector('#update').addEventListener('change', function() {
  update = this.checked;
});

const vs = `
attribute vec4 position;
uniform mat4 matrix;
varying vec2 v_texcoord;
void main() {
  gl_Position = matrix * position;
  v_texcoord = position.xy;
}
`const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
  gl_FragColor = texture2D(tex, v_texcoord);
}
`;

const program = twgl.createProgram(gl, [vs, fs]);
const posLoc = gl.getAttribLocation(program, 'position');
const matLoc = gl.getUniformLocation(program, 'matrix');

const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, newFloat32Array([
  0, 0,
  1, 0,
  0, 1,
  0, 1,
  1, 0,
  1, 1,
]), gl.STATIC_DRAW);

gl.enableVertexAttribArray(posLoc);
gl.vertexAttribPointer(posLoc, 2, gl.FLOAT, false, 0, 0);

const tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texImage2D(
    gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE,
    newUint8Array([0, 0, 255, 255]));
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);

const m = m4.identity();
let frameCount = 0;
let previousTime = 0;
let imgNdx = 0;
let imgAspect = 1;

const imageUrls = [
  'https://i.imgur.com/KjUybBD.png',
  'https://i.imgur.com/AyOufBk.jpg',
  'https://i.imgur.com/UKBsvV0.jpg',
  'https://i.imgur.com/TSiyiJv.jpg',
];

asyncfunctionloadNextImage() {
  const url = `${imageUrls[imgNdx]}?cachebust=${performance.now()}`;
  imgNdx = (imgNdx + 1) % imageUrls.length;
  const res = awaitfetch(url, {mode: 'cors'});
  const blob = await res.blob();
  const bitmap = awaitcreateImageBitmap(blob, {
    premultiplyAlpha: 'none',
    colorSpaceConversion: 'none',
  });
  if (update) {
    gl.bindTexture(gl.TEXTURE_2D, tex);
    gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, bitmap);
    imgAspect = bitmap.width / bitmap.height;
  }
  setTimeout(loadNextImage, 1000);
}
loadNextImage();

functionrender(currentTime) {
  const deltaTime = currentTime - previousTime;
  previousTime = currentTime;
  
  {
    const {width, height} = ctx.canvas;
    const x = frameCount % width;
    const y = 1000 / deltaTime / 60 * height / 2;
    ctx.fillStyle = frameCount % (width * 2) < width ? 'red' : 'blue';
    ctx.clearRect(x, 0, 1, height);
    ctx.fillRect(x, y, 1, height);
    ctx.clearRect(0, 0, 30, 15);
    ctx.fillText((1000 / deltaTime).toFixed(1), 2, 10);
  }

  gl.useProgram(program);
  const dispAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
  m4.scaling([1 / dispAspect, 1, 1], m);
  m4.rotateZ(m, currentTime * 0.001, m);
  m4.scale(m, [imgAspect, 1, 1], m);
  m4.translate(m, [-0.5, -0.5, 0], m);
  gl.uniformMatrix4fv(matLoc, false, m);
  gl.drawArrays(gl.TRIANGLES, 0, 6);
  
  ++frameCount;
  requestAnimationFrame(render);
}
requestAnimationFrame(render);
canvas { border: 1px solid black; margin: 2px; }
#ui { position: absolute; }
<scriptsrc="https://twgljs.org/dist/4.x/twgl-full.min.js"></script><divid="ui"><inputtype="checkbox"id="update"checked><labelfor="update">Update Texture</label></div><canvasid="webgl"></canvas><canvasid="graph"></canvas>

I'm pretty sure the only way you could maybe guarentee no jank is to decode the images yourself in a worker, transfer to the main thread as an arraybuffer, and upload to WebGL a few rows a frame with gl.bufferSubData.

const m4 = twgl.m4;
const gl = document.querySelector('#webgl').getContext('webgl');
const ctx = document.querySelector('#graph').getContext('2d');

const vs = `
attribute vec4 position;
uniform mat4 matrix;
varying vec2 v_texcoord;
void main() {
  gl_Position = matrix * position;
  v_texcoord = position.xy;
}
`const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
  gl_FragColor = texture2D(tex, v_texcoord);
}
`;

const program = twgl.createProgram(gl, [vs, fs]);
const posLoc = gl.getAttribLocation(program, 'position');
const matLoc = gl.getUniformLocation(program, 'matrix');

const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, newFloat32Array([
  0, 0,
  1, 0,
  0, 1,
  0, 1,
  1, 0,
  1, 1,
]), gl.STATIC_DRAW);

gl.enableVertexAttribArray(posLoc);
gl.vertexAttribPointer(posLoc, 2, gl.FLOAT, false, 0, 0);

functioncreateTexture(gl) {
  const tex = gl.createTexture();
  gl.bindTexture(gl.TEXTURE_2D, tex);
  gl.texImage2D(
      gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE,
      newUint8Array([0, 0, 255, 255]));
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
  return tex;
}

let drawingTex = createTexture(gl);
let loadingTex = createTexture(gl);

const m = m4.identity();
let frameCount = 0;
let previousTime = 0;

const workerScript = `
const ctx = new OffscreenCanvas(1, 1).getContext('2d');
let imgNdx = 0;
let imgAspect = 1;

const imageUrls = [
  'https://i.imgur.com/KjUybBD.png',
  'https://i.imgur.com/AyOufBk.jpg',
  'https://i.imgur.com/UKBsvV0.jpg',
  'https://i.imgur.com/TSiyiJv.jpg',
];

async function loadNextImage() {
  const url = \`\${imageUrls[imgNdx]}?cachebust=\${performance.now()}\`;
  imgNdx = (imgNdx + 1) % imageUrls.length;
  const res = await fetch(url, {mode: 'cors'});
  const blob = await res.blob();
  const bitmap = await createImageBitmap(blob, {
    premultiplyAlpha: 'none',
    colorSpaceConversion: 'none',
  });
  ctx.canvas.width = bitmap.width;
  ctx.canvas.height = bitmap.height;
  ctx.drawImage(bitmap, 0, 0);
  const imgData = ctx.getImageData(0, 0, ctx.canvas.width, ctx.canvas.height);
  const data = new Uint8Array(imgData.data);
  postMessage({
    width: imgData.width,
    height: imgData.height,
    data: data.buffer,
  }, [data.buffer]);
}

onmessage = loadNextImage;
`;
const blob = newBlob([workerScript], {type: 'application/javascript'});
const worker = newWorker(URL.createObjectURL(blob));
let imgAspect = 1;
worker.onmessage = async(e) => {
  const {width, height, data} = e.data;
  
  gl.bindTexture(gl.TEXTURE_2D, loadingTex);
    gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, width, height, 0, gl.RGBA, gl.UNSIGNED_BYTE, null);  
  
  const maxRows = 20;
  for (let y = 0; y < height; y += maxRows) {
    const rows = Math.min(maxRows, height - y);
    gl.bindTexture(gl.TEXTURE_2D, loadingTex);
    gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, y, width, rows, gl.RGBA, gl.UNSIGNED_BYTE, newUint8Array(data, y * width * 4, rows * width * 4));  
    awaitwaitRAF();
  }
  const temp = loadingTex;
  loadingTex = drawingTex;
  drawingTex = temp;
  imgAspect = width / height;
  awaitwaitMS(1000);
  worker.postMessage('');
};
worker.postMessage('');

functionwaitRAF() {
  returnnewPromise(resolve =>requestAnimationFrame(resolve));
}

functionwaitMS(ms = 0) {
  returnnewPromise(resolve =>setTimeout(resolve, ms));
}

functionrender(currentTime) {
  const deltaTime = currentTime - previousTime;
  previousTime = currentTime;
  
  {
    const {width, height} = ctx.canvas;
    const x = frameCount % width;
    const y = 1000 / deltaTime / 60 * height / 2;
    ctx.fillStyle = frameCount % (width * 2) < width ? 'red' : 'blue';
    ctx.clearRect(x, 0, 1, height);
    ctx.fillRect(x, y, 1, height);
    ctx.clearRect(0, 0, 30, 15);
    ctx.fillText((1000 / deltaTime).toFixed(1), 2, 10);
  }

  gl.useProgram(program);
  const dispAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
  m4.scaling([1 / dispAspect, 1, 1], m);
  m4.rotateZ(m, currentTime * 0.001, m);
  m4.scale(m, [imgAspect, 1, 1], m);
  m4.translate(m, [-0.5, -0.5, 0], m);
  gl.bindTexture(gl.TEXTURE_2D, drawingTex);
  gl.uniformMatrix4fv(matLoc, false, m);
  gl.drawArrays(gl.TRIANGLES, 0, 6);
  
  ++frameCount;
  requestAnimationFrame(render);
}
requestAnimationFrame(render);
canvas { border: 1px solid black; margin: 2px; }
<scriptsrc="https://twgljs.org/dist/4.x/twgl-full.min.js"></script><canvasid="webgl"></canvas><canvasid="graph"></canvas>

Note: I don't know that this will work either. Several places that are scary and browser implementation defined

  1. What's the performance issues of resizing a canvas. The code is resizing the OffscreenCanvas in the worker. That could be a heavy operation with GPU reprocussions.

  2. What's the performance of drawing an bitmap into a canvas? Again, big GPU perf issues as the browser has to transfer the image to the GPU in order to draw it into a GPU 2D canvas.

  3. What's the performance get getImageData? Yet again the browser has to potentially freeze the GPU to read GPU memory to get the image data out.

  4. There's a possible perf hit reszing the texture.

  5. Only Chrome currently supports OffscreenCanvas

1, 2, 3, and 5 could all be solved by decoding jpg, png the image yourself though it really sucks the browser has the code to decode the image it's just you can't access the decoding code in any useful way.

For 4, If it's an issue it could be solved, by allocating the largest image size texture and then copying smaller textures into a rectangular area. Assuming that's an issue

const m4 = twgl.m4;
const gl = document.querySelector('#webgl').getContext('webgl');
const ctx = document.querySelector('#graph').getContext('2d');

const vs = `
attribute vec4 position;
uniform mat4 matrix;
varying vec2 v_texcoord;
void main() {
  gl_Position = matrix * position;
  v_texcoord = position.xy;
}
`const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
  gl_FragColor = texture2D(tex, v_texcoord);
}
`;

const program = twgl.createProgram(gl, [vs, fs]);
const posLoc = gl.getAttribLocation(program, 'position');
const matLoc = gl.getUniformLocation(program, 'matrix');

const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, newFloat32Array([
  0, 0,
  1, 0,
  0, 1,
  0, 1,
  1, 0,
  1, 1,
]), gl.STATIC_DRAW);

gl.enableVertexAttribArray(posLoc);
gl.vertexAttribPointer(posLoc, 2, gl.FLOAT, false, 0, 0);

functioncreateTexture(gl) {
  const tex = gl.createTexture();
  gl.bindTexture(gl.TEXTURE_2D, tex);
  gl.texImage2D(
      gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE,
      newUint8Array([0, 0, 255, 255]));
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
  return tex;
}

let drawingTex = createTexture(gl);
let loadingTex = createTexture(gl);

const m = m4.identity();
let frameCount = 0;
let previousTime = 0;

const workerScript = `
importScripts(
    // from https://github.com/eugeneware/jpeg-js
    'https://greggman.github.io/doodles/js/JPG-decoder.js',
    // from https://github.com/photopea/UPNG.js
    'https://greggman.github.io/doodles/js/UPNG.js',
);

let imgNdx = 0;
let imgAspect = 1;

const imageUrls = [
  'https://i.imgur.com/KjUybBD.png',
  'https://i.imgur.com/AyOufBk.jpg',
  'https://i.imgur.com/UKBsvV0.jpg',
  'https://i.imgur.com/TSiyiJv.jpg',
];

function decodePNG(arraybuffer) {
  return UPNG.decode(arraybuffer)
}

function decodeJPG(arrayBuffer) {
  return decode(new Uint8Array(arrayBuffer), true);
}

const decoders = {
  'image/png': decodePNG,
  'image/jpeg': decodeJPG,
  'image/jpg': decodeJPG,
};

async function loadNextImage() {
  const url = \`\${imageUrls[imgNdx]}?cachebust=\${performance.now()}\`;
  imgNdx = (imgNdx + 1) % imageUrls.length;
  const res = await fetch(url, {mode: 'cors'});
  const arrayBuffer = await res.arrayBuffer();
  const type = res.headers.get('Content-Type');
  let decoder = decoders[type];
  if (!decoder) {
    console.error('unknown image type:', type);
  }
  const imgData = decoder(arrayBuffer);
  postMessage({
    width: imgData.width,
    height: imgData.height,
    arrayBuffer: imgData.data.buffer,
  }, [imgData.data.buffer]);
}

onmessage = loadNextImage;
`;
const blob = newBlob([workerScript], {type: 'application/javascript'});
const worker = newWorker(URL.createObjectURL(blob));
let imgAspect = 1;
worker.onmessage = async(e) => {
  const {width, height, arrayBuffer} = e.data;
  
  gl.bindTexture(gl.TEXTURE_2D, loadingTex);
    gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, width, height, 0, gl.RGBA, gl.UNSIGNED_BYTE, null);  
  
  const maxRows = 20;
  for (let y = 0; y < height; y += maxRows) {
    const rows = Math.min(maxRows, height - y);
    gl.bindTexture(gl.TEXTURE_2D, loadingTex);
    gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, y, width, rows, gl.RGBA, gl.UNSIGNED_BYTE, newUint8Array(arrayBuffer, y * width * 4, rows * width * 4));  
    awaitwaitRAF();
  }
  const temp = loadingTex;
  loadingTex = drawingTex;
  drawingTex = temp;
  imgAspect = width / height;
  awaitwaitMS(1000);
  worker.postMessage('');
};
worker.postMessage('');

functionwaitRAF() {
  returnnewPromise(resolve =>requestAnimationFrame(resolve));
}

functionwaitMS(ms = 0) {
  returnnewPromise(resolve =>setTimeout(resolve, ms));
}

functionrender(currentTime) {
  const deltaTime = currentTime - previousTime;
  previousTime = currentTime;
  
  {
    const {width, height} = ctx.canvas;
    const x = frameCount % width;
    const y = 1000 / deltaTime / 60 * height / 2;
    ctx.fillStyle = frameCount % (width * 2) < width ? 'red' : 'blue';
    ctx.clearRect(x, 0, 1, height);
    ctx.fillRect(x, y, 1, height);
    ctx.clearRect(0, 0, 30, 15);
    ctx.fillText((1000 / deltaTime).toFixed(1), 2, 10);
  }

  gl.useProgram(program);
  const dispAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
  m4.scaling([1 / dispAspect, 1, 1], m);
  m4.rotateZ(m, currentTime * 0.001, m);
  m4.scale(m, [imgAspect, 1, 1], m);
  m4.translate(m, [-0.5, -0.5, 0], m);
  gl.bindTexture(gl.TEXTURE_2D, drawingTex);
  gl.uniformMatrix4fv(matLoc, false, m);
  gl.drawArrays(gl.TRIANGLES, 0, 6);
  
  ++frameCount;
  requestAnimationFrame(render);
}
requestAnimationFrame(render);
canvas { border: 1px solid black; margin: 2px; }
<scriptsrc="https://twgljs.org/dist/4.x/twgl-full.min.js"></script><canvasid="webgl"></canvas><canvasid="graph"></canvas>

note the jpeg decoder is slow. If you find or make a faster one please post a comment


Update

I just want to say that ImageBitmapshould be fast enough and that some of my comments above about it not having enough info might not be exactly right.

My current understanding is the entire point if ImageBitmap was to make uploads fast. It's supposed to work by you give it a blob and asynchronously it loads that image into the GPU. When you call texImage2D with it, the browser can "blit" (render with the GPU) that image into your texture. I have no idea why there is jank in the first example given that but I see jank every 6 or so images.

On the other hand, while uploading the image to the GPU was the point of ImageBitmap, browsers are not required to upload to the GPU. ImageBitmap is still supposed to work even if the user doesn't have a GPU. The point being it's up to the browser to decide how to implement the feature and whether it's fast or slow or jank free is entirely up to the browser.

Post a Comment for "Decode Images In Web Worker"