You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
256 lines
7.5 KiB
256 lines
7.5 KiB
"use strict"; |
|
|
|
function main() { |
|
|
|
|
|
var fieldOfViewRadians = degToRad(80); |
|
var cameraLookSensitivity = 8/(window.innerWidth); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Get A WebGL context |
|
/** @type {HTMLCanvasElement} */ |
|
var canvas = document.querySelector("#canvas"); |
|
var gl = canvas.getContext("webgl"); |
|
if (!gl) { |
|
return; |
|
} |
|
|
|
// setup GLSL program |
|
var program = webglUtils.createProgramFromScripts(gl, ["vertex-shader-3d", "fragment-shader-3d"]); |
|
|
|
// look up where the vertex data needs to go. |
|
var positionLocation = gl.getAttribLocation(program, "a_position"); |
|
|
|
// lookup uniforms |
|
var skyboxLocation = gl.getUniformLocation(program, "u_skybox"); |
|
var viewDirectionProjectionInverseLocation = |
|
gl.getUniformLocation(program, "u_viewDirectionProjectionInverse"); |
|
|
|
// Create a buffer for positions |
|
var positionBuffer = gl.createBuffer(); |
|
// Bind it to ARRAY_BUFFER (think of it as ARRAY_BUFFER = positionBuffer) |
|
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer); |
|
// Put the positions in the buffer |
|
setGeometry(gl); |
|
|
|
// Create a texture. |
|
var texture = gl.createTexture(); |
|
gl.bindTexture(gl.TEXTURE_CUBE_MAP, texture); |
|
|
|
const faceInfos = [ |
|
{ |
|
target: gl.TEXTURE_CUBE_MAP_POSITIVE_X, |
|
url: 'skybox/px.avif', |
|
}, |
|
{ |
|
target: gl.TEXTURE_CUBE_MAP_NEGATIVE_X, |
|
url: 'skybox/nx.avif', |
|
}, |
|
{ |
|
target: gl.TEXTURE_CUBE_MAP_POSITIVE_Y, |
|
url: 'skybox/py.avif', |
|
}, |
|
{ |
|
target: gl.TEXTURE_CUBE_MAP_NEGATIVE_Y, |
|
url: 'skybox/ny.avif', |
|
}, |
|
{ |
|
target: gl.TEXTURE_CUBE_MAP_POSITIVE_Z, |
|
url: 'skybox/pz.avif', |
|
}, |
|
{ |
|
target: gl.TEXTURE_CUBE_MAP_NEGATIVE_Z, |
|
url: 'skybox/nz.avif', |
|
}, |
|
]; |
|
faceInfos.forEach((faceInfo) => { |
|
const {target, url} = faceInfo; |
|
|
|
// Upload the canvas to the cubemap face. |
|
const level = 0; |
|
const internalFormat = gl.RGBA; |
|
const width = 2048; |
|
const height = 2048; |
|
const format = gl.RGBA; |
|
const type = gl.UNSIGNED_BYTE; |
|
|
|
// setup each face so it's immediately renderable |
|
gl.texImage2D(target, level, internalFormat, width, height, 0, format, type, null); |
|
|
|
// Asynchronously load an image |
|
const image = new Image(); |
|
image.src = url; |
|
image.addEventListener('load', function() { |
|
// Now that the image has loaded make copy it to the texture. |
|
gl.bindTexture(gl.TEXTURE_CUBE_MAP, texture); |
|
gl.texImage2D(target, level, internalFormat, format, type, image); |
|
gl.generateMipmap(gl.TEXTURE_CUBE_MAP); |
|
}); |
|
}); |
|
gl.generateMipmap(gl.TEXTURE_CUBE_MAP); |
|
|
|
gl.texParameteri(gl.TEXTURE_CUBE_MAP, gl.TEXTURE_MIN_FILTER, gl.LINEAR_MIPMAP_LINEAR); |
|
|
|
// Anisotropic filtering gives best results for skyboxes. Otherwise the mipmaps can muddy them a bit |
|
// https://developer.mozilla.org/en-US/docs/Web/API/EXT_texture_filter_anisotropic |
|
const ext = |
|
gl.getExtension("EXT_texture_filter_anisotropic") || |
|
gl.getExtension("MOZ_EXT_texture_filter_anisotropic") || |
|
gl.getExtension("WEBKIT_EXT_texture_filter_anisotropic"); |
|
if (ext) { |
|
const max = gl.getParameter(ext.MAX_TEXTURE_MAX_ANISOTROPY_EXT); |
|
gl.texParameterf(gl.TEXTURE_CUBE_MAP, ext.TEXTURE_MAX_ANISOTROPY_EXT, max); |
|
} |
|
|
|
function radToDeg(r) { |
|
return r * 180 / Math.PI; |
|
} |
|
|
|
function degToRad(d) { |
|
return d * Math.PI / 180; |
|
} |
|
|
|
var lastMouseX = 0; |
|
var lastMouseY = 0; |
|
var mouseMoveTime = new Date().getTime(); |
|
var lastMouseMoveTime = new Date().getTime(); |
|
var mouseX = 0; |
|
var mouseY = 0; |
|
var lastCameraPos = [1,0,0]; |
|
|
|
function mouseOrTouchMoved(mouseOrTouchEvent) { |
|
if(!mouseOrTouchEvent.offsetX && (!mouseOrTouchEvent.touches || mouseOrTouchEvent.touches.length == 0)) { |
|
return; |
|
} |
|
var hasTouches = mouseOrTouchEvent.touches && mouseOrTouchEvent.touches[0]; |
|
mouseX = hasTouches ? -mouseOrTouchEvent.touches[0].screenX*0.3 : mouseOrTouchEvent.offsetX; |
|
mouseY = hasTouches ? -mouseOrTouchEvent.touches[0].screenY*0.3 : mouseOrTouchEvent.offsetY; |
|
mouseMoveTime = new Date().getTime(); |
|
} |
|
|
|
document.addEventListener('mousemove', mouseOrTouchMoved, false); |
|
document.body.addEventListener('touchmove', mouseOrTouchMoved, false); |
|
|
|
|
|
requestAnimationFrame(drawScene); |
|
|
|
// Draw the scene. |
|
function drawScene(time) { |
|
// convert to seconds |
|
time *= 0.001; |
|
// Subtract the previous time from the current ti |
|
|
|
webglUtils.resizeCanvasToDisplaySize(gl.canvas); |
|
|
|
// Tell WebGL how to convert from clip space to pixels |
|
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height); |
|
|
|
gl.enable(gl.CULL_FACE); |
|
gl.enable(gl.DEPTH_TEST); |
|
|
|
// Clear the canvas AND the depth buffer. |
|
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT); |
|
|
|
// Tell it to use our program (pair of shaders) |
|
gl.useProgram(program); |
|
|
|
// Turn on the position attribute |
|
gl.enableVertexAttribArray(positionLocation); |
|
|
|
// Bind the position buffer. |
|
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer); |
|
|
|
// Tell the position attribute how to get data out of positionBuffer (ARRAY_BUFFER) |
|
var size = 2; // 2 components per iteration |
|
var type = gl.FLOAT; // the data is 32bit floats |
|
var normalize = false; // don't normalize the data |
|
var stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position |
|
var offset = 0; // start at the beginning of the buffer |
|
gl.vertexAttribPointer( |
|
positionLocation, size, type, normalize, stride, offset); |
|
|
|
// Compute the projection matrix |
|
var aspect = gl.canvas.clientWidth / gl.canvas.clientHeight; |
|
var projectionMatrix = |
|
m4.perspective(fieldOfViewRadians, aspect, 1, 2000); |
|
|
|
|
|
var target = [0, 0, 0]; |
|
var up = [0, 1, 0]; |
|
|
|
var right = m4.cross(lastCameraPos, up); |
|
var inputX = (mouseX - lastMouseX)*cameraLookSensitivity; |
|
var inputY = (mouseY - lastMouseY)*cameraLookSensitivity; |
|
var msSinceLastMouseMove = new Date().getTime() - lastMouseMoveTime; |
|
if((lastMouseX == 0 && lastMouseY == 0) || msSinceLastMouseMove > 200) { |
|
inputX = 0; |
|
inputY = 0; |
|
} |
|
lastMouseMoveTime = mouseMoveTime; |
|
lastMouseX = mouseX; |
|
lastMouseY = mouseY; |
|
|
|
var cameraMovement = m4.addVectors( |
|
m4.scaleVector(right, inputX), |
|
m4.scaleVector(up, inputY) |
|
) |
|
var cameraPosition = m4.normalize(m4.addVectors(lastCameraPos, cameraMovement)); |
|
lastCameraPos = cameraPosition; |
|
|
|
// Compute the camera's matrix using look at. |
|
var cameraMatrix = m4.lookAt(cameraPosition, target, up); |
|
|
|
// Make a view matrix from the camera matrix. |
|
var viewMatrix = m4.inverse(cameraMatrix); |
|
|
|
// We only care about direciton so remove the translation |
|
viewMatrix[12] = 0; |
|
viewMatrix[13] = 0; |
|
viewMatrix[14] = 0; |
|
|
|
var viewDirectionProjectionMatrix = |
|
m4.multiply(projectionMatrix, viewMatrix); |
|
var viewDirectionProjectionInverseMatrix = |
|
m4.inverse(viewDirectionProjectionMatrix); |
|
|
|
// Set the uniforms |
|
gl.uniformMatrix4fv( |
|
viewDirectionProjectionInverseLocation, false, |
|
viewDirectionProjectionInverseMatrix); |
|
|
|
// Tell the shader to use texture unit 0 for u_skybox |
|
gl.uniform1i(skyboxLocation, 0); |
|
|
|
// let our quad pass the depth test at 1.0 |
|
gl.depthFunc(gl.LEQUAL); |
|
|
|
// Draw the geometry. |
|
gl.drawArrays(gl.TRIANGLES, 0, 1 * 6); |
|
|
|
requestAnimationFrame(drawScene); |
|
} |
|
} |
|
|
|
// Fill the buffer with the values that define a quad. |
|
function setGeometry(gl) { |
|
var positions = new Float32Array( |
|
[ |
|
-1, -1, |
|
1, -1, |
|
-1, 1, |
|
-1, 1, |
|
1, -1, |
|
1, 1, |
|
]); |
|
gl.bufferData(gl.ARRAY_BUFFER, positions, gl.STATIC_DRAW); |
|
} |
|
|
|
main();
|
|
|