I would non-ironically recommend trying developing a web tool with ChatGPT or another AI model. Here is what it was able to pull off in about 15 minutes. Just take this code and either save it as HTML or copypaste it into an online compiler like
https://onecompiler.com/html/Instructions:
1. Set up your LightMix in Corona
2. Use the "save all" option and pick PNG as the format (24-bit) - this will save all your LightSelect elements. The tool specifically expects the PNG format!
3. Click "choose files" and pick your LightSelect layers
4. What it does is blend all those layers together and display with an option to control the overall exposure. It even does the mumbo jumbo required for proper linear blending.
Feel free to take the code and develop it further with or without some help. :)
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<title>Additive Blend Tool – Gamma Correction & Exposure</title>
<style>
/* Basic styles for layout and clarity */
body { font-family: sans-serif; background: #f9f9f9; padding: 1em; }
h2 { margin-top: 0; }
#controls { margin-bottom: 1em; }
label { font-weight: bold; margin-right: 0.5em; }
input[type="range"] { vertical-align: middle; }
input[type="number"] { width: 4em; text-align: right; }
canvas { display: block; border: 1px solid #ccc; }
</style>
</head>
<body>
<h2>Additive Image Blending Tool</h2>
<div id="controls">
<!-- File input for multiple images -->
<input type="file" id="fileInput" multiple accept="image/png" />
<br/><br/>
<!-- Exposure slider and number input -->
<label for="exposure">Exposure:</label>
<input type="range" id="exposure" min="0.1" max="5.0" step="0.1" value="1.0">
<input type="number" id="exposureNum" min="0.1" max="5.0" step="0.1" value="1.0">
</div>
<!-- Canvas for the blended output -->
<canvas id="canvas"></canvas>
<script>
// ==== Global Variables and Setup ====
const fileInput = document.getElementById('fileInput');
const canvas = document.getElementById('canvas');
const ctx = canvas.getContext('2d');
const exposureSlider = document.getElementById('exposure');
const exposureNum = document.getElementById('exposureNum');
let imagesData = []; // Array to hold image pixel data (ImageData objects or pixel arrays)
let imgWidth = 0, imgHeight = 0; // Dimensions of the images (assumed all the same)
// Helper: sRGB -> Linear (gamma 2.2)
function srgbToLinear(value) {
// value is in [0,1] range (normalized sRGB)
return Math.pow(value, 2.2);
}
// Helper: Linear -> sRGB (gamma 1/2.2)
function linearToSrgb(value) {
// value is in [0,1] range (linear)
return Math.pow(value, 1/2.2);
}
// Main blending function: converts all images to linear, blends, then draws to canvas
function blendImages() {
if (imagesData.length === 0) {
return; // No images loaded yet
}
const exposure = parseFloat(exposureSlider.value) || 1.0; // current exposure multiplier
// Create an output ImageData to hold the blended result
let outputImage = ctx.createImageData(imgWidth, imgHeight);
let outputPixels = outputImage.data; // Uint8ClampedArray for output pixel bytes
// We will accumulate linear color in a separate array of floats (to avoid precision loss during addition)
let linearAccum = new Float32Array(imgWidth * imgHeight * 3);
// Note: We use 3 components per pixel (R,G,B). Alpha channel, if any, is handled separately below.
// Loop over each image’s data and accumulate linear values
imagesData.forEach(imageData => {
const data = imageData.data; // Uint8ClampedArray of [r,g,b,a,...] for this image
const len = data.length;
for (let i = 0; i < len; i += 4) { // iterate over every pixel (4 bytes per pixel in ImageData)
const r_srgb = data[i] / 255; // normalized sRGB red
const g_srgb = data[i+1] / 255; // normalized sRGB green
const b_srgb = data[i+2] / 255; // normalized sRGB blue
const alpha = data[i+3] / 255; // normalized alpha (1 = fully opaque)
// Convert to linear and apply exposure scaling
const r_lin = srgbToLinear(r_srgb) * exposure * alpha;
const g_lin = srgbToLinear(g_srgb) * exposure * alpha;
const b_lin = srgbToLinear(b_srgb) * exposure * alpha;
// Accumulate linear values (note: index/4 gives pixel index)
let pixelIndex = (i / 4) * 3;
linearAccum[pixelIndex] += r_lin;
linearAccum[pixelIndex + 1] += g_lin;
linearAccum[pixelIndex + 2] += b_lin;
}
});
// Now linearAccum contains the sum of all images in linear space for each pixel.
// Convert this accumulated linear color back to sRGB and store in outputPixels.
const numPixels = imgWidth * imgHeight;
for (let p = 0; p < numPixels; p++) {
let r_lin_sum = linearAccum[p*3]; // accumulated linear red
let g_lin_sum = linearAccum[p*3 + 1]; // accumulated linear green
let b_lin_sum = linearAccum[p*3 + 2]; // accumulated linear blue
// Clamp linear values to 1.0 (to avoid overflow beyond displayable range)
if (r_lin_sum > 1) r_lin_sum = 1;
if (g_lin_sum > 1) g_lin_sum = 1;
if (b_lin_sum > 1) b_lin_sum = 1;
// Convert back to sRGB space
const r_srgb_out = linearToSrgb(r_lin_sum);
const g_srgb_out = linearToSrgb(g_lin_sum);
const b_srgb_out = linearToSrgb(b_lin_sum);
// Convert to 0-255 and write to output pixel array
const outIndex = p * 4;
outputPixels[outIndex] = Math.round(r_srgb_out * 255);
outputPixels[outIndex + 1] = Math.round(g_srgb_out * 255);
outputPixels[outIndex + 2] = Math.round(b_srgb_out * 255);
outputPixels[outIndex + 3] = 255; // Set alpha to fully opaque in output
}
// Draw the output ImageData onto the canvas
ctx.putImageData(outputImage, 0, 0);
}
// ==== Event Handlers ====
// Handle file input (when user selects images)
fileInput.addEventListener('change', () => {
const files = fileInput.files;
if (!files || files.length === 0) return;
imagesData = []; // reset previous images
// Load each file as an image
let loadCount = 0;
for (let file of files) {
const reader = new FileReader();
reader.onload = function(event) {
const img = new Image();
img.onload = function() {
// Set canvas size based on first image (assuming all images same size)
if (imgWidth === 0 && imgHeight === 0) {
imgWidth = img.width;
imgHeight = img.height;
canvas.width = imgWidth;
canvas.height = imgHeight;
}
// Draw image to an off-screen canvas to get pixel data
const offCanvas = document.createElement('canvas');
offCanvas.width = imgWidth;
offCanvas.height = imgHeight;
const offCtx = offCanvas.getContext('2d');
offCtx.drawImage(img, 0, 0, imgWidth, imgHeight);
// Get image data (this captures the RGBA pixel values of the image)
const imageData = offCtx.getImageData(0, 0, imgWidth, imgHeight);
imagesData.push(imageData);
loadCount++;
// When all images have been loaded and processed, perform the blending
if (loadCount === files.length) {
blendImages();
}
};
img.src = event.target.result; // start loading the image (data URL)
};
// Read the file as a Data URL (so we can use it as image source)
reader.readAsDataURL(file);
}
});
// Handle exposure slider change (update number field and re-blend)
exposureSlider.addEventListener('input', () => {
exposureNum.value = exposureSlider.value;
blendImages();
});
// Handle exposure number input change (update slider field and re-blend)
exposureNum.addEventListener('input', () => {
// Ensure the number is clamped within min-max
if (exposureNum.value === "") return; // ignore empty input
let val = parseFloat(exposureNum.value);
if (val < 0.1) val = 0.1;
if (val > 5.0) val = 5.0;
exposureNum.value = val.toFixed(1);
exposureSlider.value = exposureNum.value;
blendImages();
});
</script>
</body>
</html>